aboutsummaryrefslogtreecommitdiff
path: root/gcc/gimple-loop-versioning.cc
blob: 002b2a68b96a2b9aa6fbcb54d682c1385a92d5fb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
/* Loop versioning pass.
   Copyright (C) 2018-2020 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3, or (at your option) any
later version.

GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "tree.h"
#include "gimple.h"
#include "gimple-iterator.h"
#include "tree-pass.h"
#include "gimplify-me.h"
#include "cfgloop.h"
#include "tree-ssa-loop.h"
#include "ssa.h"
#include "tree-scalar-evolution.h"
#include "tree-chrec.h"
#include "tree-ssa-loop-ivopts.h"
#include "fold-const.h"
#include "tree-ssa-propagate.h"
#include "tree-inline.h"
#include "domwalk.h"
#include "alloc-pool.h"
#include "vr-values.h"
#include "gimple-ssa-evrp-analyze.h"
#include "tree-vectorizer.h"
#include "omp-general.h"
#include "predict.h"
#include "tree-into-ssa.h"

namespace {

/* This pass looks for loops that could be simplified if certain loop
   invariant conditions were true.  It is effectively a form of loop
   splitting in which the pass produces the split conditions itself,
   instead of using ones that are already present in the IL.

   Versioning for when strides are 1
   ---------------------------------

   At the moment the only thing the pass looks for are memory references
   like:

     for (auto i : ...)
       ...x[i * stride]...

   It considers changing such loops to:

     if (stride == 1)
       for (auto i : ...)    [A]
	 ...x[i]...
     else
       for (auto i : ...)    [B]
	 ...x[i * stride]...

   This can have several benefits:

   (1) [A] is often easier or cheaper to vectorize than [B].

   (2) The scalar code in [A] is simpler than the scalar code in [B]
       (if the loops cannot be vectorized or need an epilogue loop).

   (3) We might recognize [A] as a pattern, such as a memcpy or memset.

   (4) [A] has simpler address evolutions, which can help other passes
       like loop interchange.

   The optimization is particularly useful for assumed-shape arrays in
   Fortran, where the stride of the innermost dimension depends on the
   array descriptor but is often equal to 1 in practice.  For example:

     subroutine f1(x)
       real :: x(:)
       x(:) = 100
     end subroutine f1

   generates the equivalent of:

     raw_stride = *x.dim[0].stride;
     stride = raw_stride != 0 ? raw_stride : 1;
     x_base = *x.data;
     ...
     tmp1 = stride * S;
     tmp2 = tmp1 - stride;
     *x_base[tmp2] = 1.0e+2;

   but in the common case that stride == 1, the last three statements
   simplify to:

     tmp3 = S + -1;
     *x_base[tmp3] = 1.0e+2;

   The optimization is in principle very simple.  The difficult parts are:

   (a) deciding which parts of a general address calculation correspond
       to the inner dimension of an array, since this usually isn't explicit
       in the IL, and for C often isn't even explicit in the source code

   (b) estimating when the transformation is worthwhile

   Structure
   ---------

   The pass has four phases:

   (1) Walk through the statements looking for and recording potential
       versioning opportunities.  Stop if there are none.

   (2) Use context-sensitive range information to see whether any versioning
       conditions are impossible in practice.  Remove them if so, and stop
       if no opportunities remain.

       (We do this only after (1) to keep compile time down when no
       versioning opportunities exist.)

   (3) Apply the cost model.  Decide which versioning opportunities are
       worthwhile and at which nesting level they should be applied.

   (4) Attempt to version all the loops selected by (3), so that:

	 for (...)
	   ...

       becomes:

	 if (!cond)
	   for (...) // Original loop
	     ...
	 else
	   for (...) // New loop
	     ...

       Use the version condition COND to simplify the new loop.  */

/* Enumerates the likelihood that a particular value indexes the inner
   dimension of an array.  */
enum inner_likelihood {
  INNER_UNLIKELY,
  INNER_DONT_KNOW,
  INNER_LIKELY
};

/* Information about one term of an address_info.  */
struct address_term_info
{
  /* The value of the term is EXPR * MULTIPLIER.  */
  tree expr;
  unsigned HOST_WIDE_INT multiplier;

  /* The stride applied by EXPR in each iteration of some unrecorded loop,
     or null if no stride has been identified.  */
  tree stride;

  /* Enumerates the likelihood that EXPR indexes the inner dimension
     of an array.  */
  enum inner_likelihood inner_likelihood;

  /* True if STRIDE == 1 is a versioning opportunity when considered
     in isolation.  */
  bool versioning_opportunity_p;
};

/* Information about an address calculation, and the range of constant
   offsets applied to it.  */
class address_info
{
public:
  static const unsigned int MAX_TERMS = 8;

  /* One statement that calculates the address.  If multiple statements
     share the same address, we only record the first.  */
  gimple *stmt;

  /* The loop containing STMT (cached for convenience).  If multiple
     statements share the same address, they all belong to this loop.  */
  class loop *loop;

  /* A decomposition of the calculation into a sum of terms plus an
     optional base.  When BASE is provided, it is never an SSA name.
     Once initialization is complete, all members of TERMs are SSA names.  */
  tree base;
  auto_vec<address_term_info, MAX_TERMS> terms;

  /* All bytes accessed from the address fall in the offset range
     [MIN_OFFSET, MAX_OFFSET).  */
  HOST_WIDE_INT min_offset, max_offset;
};

/* Stores addresses based on their base and terms (ignoring the offsets).  */
struct address_info_hasher : nofree_ptr_hash <address_info>
{
  static hashval_t hash (const address_info *);
  static bool equal (const address_info *, const address_info *);
};

/* Information about the versioning we'd like to apply to a loop.  */
class loop_info
{
public:
  bool worth_versioning_p () const;

  /* True if we've decided not to version this loop.  The remaining
     fields are meaningless if so.  */
  bool rejected_p;

  /* True if at least one subloop of this loop benefits from versioning.  */
  bool subloops_benefit_p;

  /* An estimate of the total number of instructions in the loop,
     excluding those in subloops that benefit from versioning.  */
  unsigned int num_insns;

  /* The outermost loop that can handle all the version checks
     described below.  */
  class loop *outermost;

  /* The first entry in the list of blocks that belong to this loop
     (and not to subloops).  m_next_block_in_loop provides the chain
     pointers for the list.  */
  basic_block block_list;

  /* We'd like to version the loop for the case in which these SSA names
     (keyed off their SSA_NAME_VERSION) are all equal to 1 at runtime.  */
  bitmap_head unity_names;

  /* If versioning succeeds, this points the version of the loop that
     assumes the version conditions holds.  */
  class loop *optimized_loop;
};

/* The main pass structure.  */
class loop_versioning
{
public:
  loop_versioning (function *);
  ~loop_versioning ();
  unsigned int run ();

private:
  /* Used to walk the dominator tree to find loop versioning conditions
     that are always false.  */
  class lv_dom_walker : public dom_walker
  {
  public:
    lv_dom_walker (loop_versioning &);

    edge before_dom_children (basic_block) FINAL OVERRIDE;
    void after_dom_children (basic_block) FINAL OVERRIDE;

  private:
    /* The parent pass.  */
    loop_versioning &m_lv;

    /* Used to build context-dependent range information.  */
    evrp_range_analyzer m_range_analyzer;
  };

  /* Used to simplify statements based on conditions that are established
     by the version checks.  */
  class name_prop : public substitute_and_fold_engine
  {
  public:
    name_prop (loop_info &li) : m_li (li) {}
    tree get_value (tree, gimple *) FINAL OVERRIDE;

  private:
    /* Information about the versioning we've performed on the loop.  */
    loop_info &m_li;
  };

  loop_info &get_loop_info (class loop *loop) { return m_loops[loop->num]; }

  unsigned int max_insns_for_loop (class loop *);
  bool expensive_stmt_p (gimple *);

  void version_for_unity (gimple *, tree);
  bool acceptable_multiplier_p (tree, unsigned HOST_WIDE_INT,
				unsigned HOST_WIDE_INT * = 0);
  bool acceptable_type_p (tree, unsigned HOST_WIDE_INT *);
  bool multiply_term_by (address_term_info &, tree);
  inner_likelihood get_inner_likelihood (tree, unsigned HOST_WIDE_INT);
  void dump_inner_likelihood (address_info &, address_term_info &);
  void analyze_stride (address_info &, address_term_info &,
		       tree, class loop *);
  bool find_per_loop_multiplication (address_info &, address_term_info &);
  bool analyze_term_using_scevs (address_info &, address_term_info &);
  void analyze_arbitrary_term (address_info &, address_term_info &);
  void analyze_address_fragment (address_info &);
  void record_address_fragment (gimple *, unsigned HOST_WIDE_INT,
				tree, unsigned HOST_WIDE_INT, HOST_WIDE_INT);
  void analyze_expr (gimple *, tree);
  bool analyze_block (basic_block);
  bool analyze_blocks ();

  void prune_loop_conditions (class loop *, vr_values *);
  bool prune_conditions ();

  void merge_loop_info (class loop *, class loop *);
  void add_loop_to_queue (class loop *);
  bool decide_whether_loop_is_versionable (class loop *);
  bool make_versioning_decisions ();

  bool version_loop (class loop *);
  void implement_versioning_decisions ();

  /* The function we're optimizing.  */
  function *m_fn;

  /* The obstack to use for all pass-specific bitmaps.  */
  bitmap_obstack m_bitmap_obstack;

  /* An obstack to use for general allocation.  */
  obstack m_obstack;

  /* The number of loops in the function.  */
  unsigned int m_nloops;

  /* The total number of loop version conditions we've found.  */
  unsigned int m_num_conditions;

  /* Assume that an address fragment of the form i * stride * scale
     (for variable stride and constant scale) will not benefit from
     versioning for stride == 1 when scale is greater than this value.  */
  unsigned HOST_WIDE_INT m_maximum_scale;

  /* Information about each loop.  */
  auto_vec<loop_info> m_loops;

  /* Used to form a linked list of blocks that belong to a loop,
     started by loop_info::block_list.  */
  auto_vec<basic_block> m_next_block_in_loop;

  /* The list of loops that we've decided to version.  */
  auto_vec<class loop *> m_loops_to_version;

  /* A table of addresses in the current loop, keyed off their values
     but not their offsets.  */
  hash_table <address_info_hasher> m_address_table;

  /* A list of all addresses in M_ADDRESS_TABLE, in a predictable order.  */
  auto_vec <address_info *, 32> m_address_list;
};

/* If EXPR is an SSA name and not a default definition, return the
   defining statement, otherwise return null.  */

static gimple *
maybe_get_stmt (tree expr)
{
  if (TREE_CODE (expr) == SSA_NAME && !SSA_NAME_IS_DEFAULT_DEF (expr))
    return SSA_NAME_DEF_STMT (expr);
  return NULL;
}

/* Like maybe_get_stmt, but also return null if the defining
   statement isn't an assignment.  */

static gassign *
maybe_get_assign (tree expr)
{
  return safe_dyn_cast <gassign *> (maybe_get_stmt (expr));
}

/* Return true if this pass should look through a cast of expression FROM
   to type TYPE when analyzing pieces of an address.  */

static bool
look_through_cast_p (tree type, tree from)
{
  return (INTEGRAL_TYPE_P (TREE_TYPE (from)) == INTEGRAL_TYPE_P (type)
	  && POINTER_TYPE_P (TREE_TYPE (from)) == POINTER_TYPE_P (type));
}

/* Strip all conversions of integers or pointers from EXPR, regardless
   of whether the conversions are nops.  This is useful in the context
   of this pass because we're not trying to fold or simulate the
   expression; we just want to see how it's structured.  */

static tree
strip_casts (tree expr)
{
  const unsigned int MAX_NITERS = 4;

  tree type = TREE_TYPE (expr);
  while (CONVERT_EXPR_P (expr)
	 && look_through_cast_p (type, TREE_OPERAND (expr, 0)))
    expr = TREE_OPERAND (expr, 0);

  for (unsigned int niters = 0; niters < MAX_NITERS; ++niters)
    {
      gassign *assign = maybe_get_assign (expr);
      if (assign
	  && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign))
	  && look_through_cast_p (type, gimple_assign_rhs1 (assign)))
	expr = gimple_assign_rhs1 (assign);
      else
	break;
    }
  return expr;
}

/* Compare two address_term_infos in the same address_info.  */

static int
compare_address_terms (const void *a_uncast, const void *b_uncast)
{
  const address_term_info *a = (const address_term_info *) a_uncast;
  const address_term_info *b = (const address_term_info *) b_uncast;

  if (a->expr != b->expr)
    return SSA_NAME_VERSION (a->expr) < SSA_NAME_VERSION (b->expr) ? -1 : 1;

  if (a->multiplier != b->multiplier)
    return a->multiplier < b->multiplier ? -1 : 1;

  return 0;
}

/* Dump ADDRESS using flags FLAGS.  */

static void
dump_address_info (dump_flags_t flags, address_info &address)
{
  if (address.base)
    dump_printf (flags, "%T + ", address.base);
  for (unsigned int i = 0; i < address.terms.length (); ++i)
    {
      if (i != 0)
	dump_printf (flags, " + ");
      dump_printf (flags, "%T", address.terms[i].expr);
      if (address.terms[i].multiplier != 1)
	dump_printf (flags, " * %wd", address.terms[i].multiplier);
    }
  dump_printf (flags, " + [%wd, %wd]",
	       address.min_offset, address.max_offset - 1);
}

/* Hash an address_info based on its base and terms.  */

hashval_t
address_info_hasher::hash (const address_info *info)
{
  inchash::hash hash;
  hash.add_int (info->base ? TREE_CODE (info->base) : 0);
  hash.add_int (info->terms.length ());
  for (unsigned int i = 0; i < info->terms.length (); ++i)
    {
      hash.add_int (SSA_NAME_VERSION (info->terms[i].expr));
      hash.add_hwi (info->terms[i].multiplier);
    }
  return hash.end ();
}

/* Return true if two address_infos have equal bases and terms.  Other
   properties might be different (such as the statement or constant
   offset range).  */

bool
address_info_hasher::equal (const address_info *a, const address_info *b)
{
  if (a->base != b->base
      && (!a->base || !b->base || !operand_equal_p (a->base, b->base, 0)))
    return false;

  if (a->terms.length () != b->terms.length ())
    return false;

  for (unsigned int i = 0; i < a->terms.length (); ++i)
    if (a->terms[i].expr != b->terms[i].expr
	|| a->terms[i].multiplier != b->terms[i].multiplier)
      return false;

  return true;
}

/* Return true if we want to version the loop, i.e. if we have a
   specific reason for doing so and no specific reason not to.  */

bool
loop_info::worth_versioning_p () const
{
  return (!rejected_p
	  && (!bitmap_empty_p (&unity_names) || subloops_benefit_p));
}

loop_versioning::lv_dom_walker::lv_dom_walker (loop_versioning &lv)
  : dom_walker (CDI_DOMINATORS), m_lv (lv), m_range_analyzer (false)
{
}

/* Process BB before processing the blocks it dominates.  */

edge
loop_versioning::lv_dom_walker::before_dom_children (basic_block bb)
{
  m_range_analyzer.enter (bb);

  if (bb == bb->loop_father->header)
    m_lv.prune_loop_conditions (bb->loop_father,
				m_range_analyzer.get_vr_values ());

  for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
       gsi_next (&si))
    m_range_analyzer.record_ranges_from_stmt (gsi_stmt (si), false);

  return NULL;
}

/* Process BB after processing the blocks it dominates.  */

void
loop_versioning::lv_dom_walker::after_dom_children (basic_block bb)
{
  m_range_analyzer.leave (bb);
}

/* Decide whether to replace VAL with a new value in a versioned loop.
   Return the new value if so, otherwise return null.  */

tree
loop_versioning::name_prop::get_value (tree val,
				       gimple *stmt ATTRIBUTE_UNUSED)
{
  if (TREE_CODE (val) == SSA_NAME
      && bitmap_bit_p (&m_li.unity_names, SSA_NAME_VERSION (val)))
    return build_one_cst (TREE_TYPE (val));
  return NULL_TREE;
}

/* Initialize the structure to optimize FN.  */

loop_versioning::loop_versioning (function *fn)
  : m_fn (fn),
    m_nloops (number_of_loops (fn)),
    m_num_conditions (0),
    m_address_table (31)
{
  bitmap_obstack_initialize (&m_bitmap_obstack);
  gcc_obstack_init (&m_obstack);

  /* Initialize the loop information.  */
  m_loops.safe_grow_cleared (m_nloops);
  for (unsigned int i = 0; i < m_nloops; ++i)
    {
      m_loops[i].outermost = get_loop (m_fn, 0);
      bitmap_initialize (&m_loops[i].unity_names, &m_bitmap_obstack);
    }

  /* Initialize the list of blocks that belong to each loop.  */
  unsigned int nbbs = last_basic_block_for_fn (fn);
  m_next_block_in_loop.safe_grow (nbbs);
  basic_block bb;
  FOR_EACH_BB_FN (bb, fn)
    {
      loop_info &li = get_loop_info (bb->loop_father);
      m_next_block_in_loop[bb->index] = li.block_list;
      li.block_list = bb;
    }

  /* MAX_FIXED_MODE_SIZE should be a reasonable maximum scale for
     unvectorizable code, since it is the largest size that can be
     handled efficiently by scalar code.  omp_max_vf calculates the
     maximum number of bytes in a vector, when such a value is relevant
     to loop optimization.  */
  m_maximum_scale = estimated_poly_value (omp_max_vf ());
  m_maximum_scale = MAX (m_maximum_scale, MAX_FIXED_MODE_SIZE);
}

loop_versioning::~loop_versioning ()
{
  bitmap_obstack_release (&m_bitmap_obstack);
  obstack_free (&m_obstack, NULL);
}

/* Return the maximum number of instructions allowed in LOOP before
   it becomes too big for versioning.

   There are separate limits for inner and outer loops.  The limit for
   inner loops applies only to loops that benefit directly from versioning.
   The limit for outer loops applies to all code in the outer loop and
   its subloops that *doesn't* benefit directly from versioning; such code
   would be "taken along for the ride".  The idea is that if the cost of
   the latter is small, it is better to version outer loops rather than
   inner loops, both to reduce the number of repeated checks and to enable
   more of the loop nest to be optimized as a natural nest (e.g. by loop
   interchange or outer-loop vectorization).  */

unsigned int
loop_versioning::max_insns_for_loop (class loop *loop)
{
  return (loop->inner
	  ? param_loop_versioning_max_outer_insns
	  : param_loop_versioning_max_inner_insns);
}

/* Return true if for cost reasons we should avoid versioning any loop
   that contains STMT.

   Note that we don't need to check whether versioning is invalid for
   correctness reasons, since the versioning process does that for us.
   The conditions involved are too rare to be worth duplicating here.  */

bool
loop_versioning::expensive_stmt_p (gimple *stmt)
{
  if (gcall *call = dyn_cast <gcall *> (stmt))
    /* Assume for now that the time spent in an "expensive" call would
       overwhelm any saving from versioning.  */
    return !gimple_inexpensive_call_p (call);
  return false;
}

/* Record that we want to version the loop that contains STMT for the
   case in which SSA name NAME is equal to 1.  We already know that NAME
   is invariant in the loop.  */

void
loop_versioning::version_for_unity (gimple *stmt, tree name)
{
  class loop *loop = loop_containing_stmt (stmt);
  loop_info &li = get_loop_info (loop);

  if (bitmap_set_bit (&li.unity_names, SSA_NAME_VERSION (name)))
    {
      /* This is the first time we've wanted to version LOOP for NAME.
	 Keep track of the outermost loop that can handle all versioning
	 checks in LI.  */
      class loop *outermost
	= outermost_invariant_loop_for_expr (loop, name);
      if (loop_depth (li.outermost) < loop_depth (outermost))
	li.outermost = outermost;

      if (dump_enabled_p ())
	{
	  dump_printf_loc (MSG_NOTE, stmt, "want to version containing loop"
			   " for when %T == 1", name);
	  if (outermost == loop)
	    dump_printf (MSG_NOTE, "; cannot hoist check further");
	  else
	    {
	      dump_printf (MSG_NOTE, "; could implement the check at loop"
			   " depth %d", loop_depth (outermost));
	      if (loop_depth (li.outermost) > loop_depth (outermost))
		dump_printf (MSG_NOTE, ", but other checks only allow"
			     " a depth of %d", loop_depth (li.outermost));
	    }
	  dump_printf (MSG_NOTE, "\n");
	}

      m_num_conditions += 1;
    }
  else
    {
      /* This is a duplicate request.  */
      if (dump_enabled_p ())
	dump_printf_loc (MSG_NOTE, stmt, "already asked to version containing"
			 " loop for when %T == 1\n", name);
    }
}

/* Return true if OP1_TREE is constant and if in principle it is worth
   versioning an address fragment of the form:

     i * OP1_TREE * OP2 * stride

   for the case in which stride == 1.  This in practice means testing
   whether:

     OP1_TREE * OP2 <= M_MAXIMUM_SCALE.

   If RESULT is nonnull, store OP1_TREE * OP2 there when returning true.  */

bool
loop_versioning::acceptable_multiplier_p (tree op1_tree,
					  unsigned HOST_WIDE_INT op2,
					  unsigned HOST_WIDE_INT *result)
{
  if (tree_fits_uhwi_p (op1_tree))
    {
      unsigned HOST_WIDE_INT op1 = tree_to_uhwi (op1_tree);
      /* The first part checks for overflow.  */
      if (op1 * op2 >= op2 && op1 * op2 <= m_maximum_scale)
	{
	  if (result)
	    *result = op1 * op2;
	  return true;
	}
    }
  return false;
}

/* Return true if it is worth using loop versioning on a memory access
   of type TYPE.  Store the size of the access in *SIZE if so.  */

bool
loop_versioning::acceptable_type_p (tree type, unsigned HOST_WIDE_INT *size)
{
  return (TYPE_SIZE_UNIT (type)
	  && acceptable_multiplier_p (TYPE_SIZE_UNIT (type), 1, size));
}

/* See whether OP is constant and whether we can multiply TERM by that
   constant without exceeding M_MAXIMUM_SCALE.  Return true and update
   TERM if so.  */

bool
loop_versioning::multiply_term_by (address_term_info &term, tree op)
{
  return acceptable_multiplier_p (op, term.multiplier, &term.multiplier);
}

/* Decide whether an address fragment of the form STRIDE * MULTIPLIER
   is likely to be indexing an innermost dimension, returning the result
   as an INNER_* probability.  */

inner_likelihood
loop_versioning::get_inner_likelihood (tree stride,
				       unsigned HOST_WIDE_INT multiplier)
{
  const unsigned int MAX_NITERS = 8;

  /* Iterate over possible values of STRIDE.  Return INNER_LIKELY if at
     least one of those values is likely to be for the innermost dimension.
     Record in UNLIKELY_P if at least one of those values is unlikely to be
     for the innermost dimension.

     E.g. for:

       stride = cond ? a * b : 1

     we should treat STRIDE as being a likely inner dimension, since
     we know that it is 1 under at least some circumstances.  (See the
     Fortran example below.)  However:

       stride = a * b

     on its own is unlikely to be for the innermost dimension, since
     that would require both a and b to be 1 at runtime.  */
  bool unlikely_p = false;
  tree worklist[MAX_NITERS];
  unsigned int length = 0;
  worklist[length++] = stride;
  for (unsigned int i = 0; i < length; ++i)
    {
      tree expr = worklist[i];

      if (CONSTANT_CLASS_P (expr))
	{
	  /* See if EXPR * MULTIPLIER would be consistent with an individual
	     access or a small grouped access.  */
	  if (acceptable_multiplier_p (expr, multiplier))
	    return INNER_LIKELY;
	  else
	    unlikely_p = true;
	}
      else if (gimple *stmt = maybe_get_stmt (expr))
	{
	  /* If EXPR is set by a PHI node, queue its arguments in case
	     we find one that is consistent with an inner dimension.

	     An important instance of this is the Fortran handling of array
	     descriptors, which calculates the stride of the inner dimension
	     using a PHI equivalent of:

		raw_stride = a.dim[0].stride;
		stride = raw_stride != 0 ? raw_stride : 1;

	     (Strides for outer dimensions do not treat 0 specially.)  */
	  if (gphi *phi = dyn_cast <gphi *> (stmt))
	    {
	      unsigned int nargs = gimple_phi_num_args (phi);
	      for (unsigned int j = 0; j < nargs && length < MAX_NITERS; ++j)
		worklist[length++] = strip_casts (gimple_phi_arg_def (phi, j));
	    }
	  /* If the value is set by an assignment, expect it to be read
	     from memory (such as an array descriptor) rather than be
	     calculated.  */
	  else if (gassign *assign = dyn_cast <gassign *> (stmt))
	    {
	      if (!gimple_assign_load_p (assign))
		unlikely_p = true;
	    }
	  /* Things like calls don't really tell us anything.  */
	}
    }

  /* We didn't find any possible values of STRIDE that were likely to be
     for the innermost dimension.  If we found one that was actively
     unlikely to be for the innermost dimension, assume that that applies
     to STRIDE too.  */
  return unlikely_p ? INNER_UNLIKELY : INNER_DONT_KNOW;
}

/* Dump the likelihood that TERM's stride is for the innermost dimension.
   ADDRESS is the address that contains TERM.  */

void
loop_versioning::dump_inner_likelihood (address_info &address,
					address_term_info &term)
{
  if (term.inner_likelihood == INNER_LIKELY)
    dump_printf_loc (MSG_NOTE, address.stmt, "%T is likely to be the"
		     " innermost dimension\n", term.stride);
  else if (term.inner_likelihood == INNER_UNLIKELY)
    dump_printf_loc (MSG_NOTE, address.stmt, "%T is probably not the"
		     " innermost dimension\n", term.stride);
  else
    dump_printf_loc (MSG_NOTE, address.stmt, "cannot tell whether %T"
		     " is the innermost dimension\n", term.stride);
}

/* The caller has identified that STRIDE is the stride of interest
   in TERM, and that the stride is applied in OP_LOOP.  Record this
   information in TERM, deciding whether STRIDE is likely to be for
   the innermost dimension of an array and whether it represents a
   versioning opportunity.  ADDRESS is the address that contains TERM.  */

void
loop_versioning::analyze_stride (address_info &address,
				 address_term_info &term,
				 tree stride, class loop *op_loop)
{
  term.stride = stride;

  term.inner_likelihood = get_inner_likelihood (stride, term.multiplier);
  if (dump_enabled_p ())
    dump_inner_likelihood (address, term);

  /* To be a versioning opportunity we require:

     - The multiplier applied by TERM is equal to the access size,
       so that when STRIDE is 1, the accesses in successive loop
       iterations are consecutive.

       This is deliberately conservative.  We could relax it to handle
       other cases (such as those with gaps between iterations) if we
       find any real testcases for which it's useful.

     - the stride is applied in the same loop as STMT rather than
       in an outer loop.  Although versioning for strides applied in
       outer loops could help in some cases -- such as enabling
       more loop interchange -- the savings are much lower than for
       inner loops.

     - the stride is an SSA name that is invariant in STMT's loop,
       since otherwise versioning isn't possible.  */
  unsigned HOST_WIDE_INT access_size = address.max_offset - address.min_offset;
  if (term.multiplier == access_size
      && address.loop == op_loop
      && TREE_CODE (stride) == SSA_NAME
      && expr_invariant_in_loop_p (address.loop, stride))
    {
      term.versioning_opportunity_p = true;
      if (dump_enabled_p ())
	dump_printf_loc (MSG_NOTE, address.stmt, "%T == 1 is a versioning"
			 " opportunity\n", stride);
    }
}

/* See whether address term TERM (which belongs to ADDRESS) is the result
   of multiplying a varying SSA name by a loop-invariant SSA name.
   Return true and update TERM if so.

   This handles both cases that SCEV might handle, such as:

     for (int i = 0; i < n; ++i)
       res += a[i * stride];

   and ones in which the term varies arbitrarily between iterations, such as:

     for (int i = 0; i < n; ++i)
       res += a[index[i] * stride];  */

bool
loop_versioning::find_per_loop_multiplication (address_info &address,
					       address_term_info &term)
{
  gassign *mult = maybe_get_assign (term.expr);
  if (!mult || gimple_assign_rhs_code (mult) != MULT_EXPR)
    return false;

  class loop *mult_loop = loop_containing_stmt (mult);
  if (!loop_outer (mult_loop))
    return false;

  tree op1 = strip_casts (gimple_assign_rhs1 (mult));
  tree op2 = strip_casts (gimple_assign_rhs2 (mult));
  if (TREE_CODE (op1) != SSA_NAME || TREE_CODE (op2) != SSA_NAME)
    return false;

  bool invariant1_p = expr_invariant_in_loop_p (mult_loop, op1);
  bool invariant2_p = expr_invariant_in_loop_p (mult_loop, op2);
  if (invariant1_p == invariant2_p)
    return false;

  /* Make sure that the loop invariant is OP2 rather than OP1.  */
  if (invariant1_p)
    std::swap (op1, op2);

  if (dump_enabled_p ())
    dump_printf_loc (MSG_NOTE, address.stmt, "address term %T = varying %T"
		     " * loop-invariant %T\n", term.expr, op1, op2);
  analyze_stride (address, term, op2, mult_loop);
  return true;
}

/* Try to use scalar evolutions to find an address stride for TERM,
   which belongs to ADDRESS.  Return true and update TERM if so.

   Here we are interested in any evolution information we can find,
   not just evolutions wrt ADDRESS->LOOP.  For example, if we find that
   an outer loop obviously iterates over the inner dimension of an array,
   that information can help us eliminate worthless versioning opportunities
   in inner loops.  */

bool
loop_versioning::analyze_term_using_scevs (address_info &address,
					   address_term_info &term)
{
  gimple *setter = maybe_get_stmt (term.expr);
  if (!setter)
    return false;

  class loop *wrt_loop = loop_containing_stmt (setter);
  if (!loop_outer (wrt_loop))
    return false;

  tree chrec = strip_casts (analyze_scalar_evolution (wrt_loop, term.expr));
  if (TREE_CODE (chrec) == POLYNOMIAL_CHREC)
    {
      if (dump_enabled_p ())
	dump_printf_loc (MSG_NOTE, address.stmt,
			 "address term %T = %T\n", term.expr, chrec);

      /* Peel casts and accumulate constant multiplications, up to the
	 limit allowed by M_MAXIMUM_SCALE.  */
      tree stride = strip_casts (CHREC_RIGHT (chrec));
      while (TREE_CODE (stride) == MULT_EXPR
	     && multiply_term_by (term, TREE_OPERAND (stride, 1)))
	stride = strip_casts (TREE_OPERAND (stride, 0));

      gassign *assign;
      while ((assign = maybe_get_assign (stride))
	     && gimple_assign_rhs_code (assign) == MULT_EXPR
	     && multiply_term_by (term, gimple_assign_rhs2 (assign)))
	{
	  if (dump_enabled_p ())
	    dump_printf_loc (MSG_NOTE, address.stmt,
			     "looking through %G", assign);
	  stride = strip_casts (gimple_assign_rhs1 (assign));
	}

      analyze_stride (address, term, stride, get_chrec_loop (chrec));
      return true;
    }

  return false;
}

/* Address term TERM is an arbitrary term that provides no versioning
   opportunities.  Analyze it to see whether it contains any likely
   inner strides, so that we don't mistakenly version for other
   (less likely) candidates.

   This copes with invariant innermost indices such as:

     x(i, :) = 100

   where the "i" component of the address is invariant in the loop
   but provides the real inner stride.

   ADDRESS is the address that contains TERM.  */

void
loop_versioning::analyze_arbitrary_term (address_info &address,
					 address_term_info &term)

{
  /* A multiplication offers two potential strides.  Pick the one that
     is most likely to be an innermost stride.  */
  tree expr = term.expr, alt = NULL_TREE;
  gassign *mult = maybe_get_assign (expr);
  if (mult && gimple_assign_rhs_code (mult) == MULT_EXPR)
    {
      expr = strip_casts (gimple_assign_rhs1 (mult));
      alt = strip_casts (gimple_assign_rhs2 (mult));
    }
  term.stride = expr;
  term.inner_likelihood = get_inner_likelihood (expr, term.multiplier);
  if (alt)
    {
      inner_likelihood alt_l = get_inner_likelihood (alt, term.multiplier);
      if (alt_l > term.inner_likelihood)
	{
	  term.stride = alt;
	  term.inner_likelihood = alt_l;
	}
    }
  if (dump_enabled_p ())
    dump_inner_likelihood (address, term);
}

/* Try to identify loop strides in ADDRESS and try to choose realistic
   versioning opportunities based on these strides.

   The main difficulty here isn't finding strides that could be used
   in a version check (that's pretty easy).  The problem instead is to
   avoid versioning for some stride S that is unlikely ever to be 1 at
   runtime.  Versioning for S == 1 on its own would lead to unnecessary
   code bloat, while adding S == 1 to more realistic version conditions
   would lose the optimisation opportunity offered by those other conditions.

   For example, versioning for a stride of 1 in the Fortran code:

     integer :: a(:,:)
     a(1,:) = 1

   is not usually a good idea, since the assignment is iterating over
   an outer dimension and is relatively unlikely to have a stride of 1.
   (It isn't impossible, since the inner dimension might be 1, or the
   array might be transposed.)  Similarly, in:

     integer :: a(:,:), b(:,:)
     b(:,1) = a(1,:)

   b(:,1) is relatively likely to have a stride of 1 while a(1,:) isn't.
   Versioning for when both strides are 1 would lose most of the benefit
   of versioning for b's access.

   The approach we take is as follows:

   - Analyze each term to see whether it has an identifiable stride,
     regardless of which loop applies the stride.

   - Evaluate the likelihood that each such stride is for the innermost
     dimension of an array, on the scale "likely", "don't know" or "unlikely".

   - If there is a single "likely" innermost stride, and that stride is
     applied in the loop that contains STMT, version the loop for when the
     stride is 1.  This deals with the cases in which we're fairly
     confident of doing the right thing, such as the b(:,1) reference above.

   - If there are no "likely" innermost strides, and the loop that contains
     STMT uses a stride that we rated as "don't know", version for when
     that stride is 1.  This is principally used for C code such as:

       for (int i = 0; i < n; ++i)
	 a[i * x] = ...;

     and:

       for (int j = 0; j < n; ++j)
	 for (int i = 0; i < n; ++i)
	   a[i * x + j * y] = ...;

     where nothing in the way "x" and "y" are set gives a hint as to
     whether "i" iterates over the innermost dimension of the array.
     In these situations it seems reasonable to assume the
     programmer has nested the loops appropriately (although of course
     there are examples like GEMM in which this assumption doesn't hold
     for all accesses in the loop).

     This case is also useful for the Fortran equivalent of the
     above C code.  */

void
loop_versioning::analyze_address_fragment (address_info &address)
{
  if (dump_enabled_p ())
    {
      dump_printf_loc (MSG_NOTE, address.stmt, "analyzing address fragment ");
      dump_address_info (MSG_NOTE, address);
      dump_printf (MSG_NOTE, "\n");
    }

  /* Analyze each component of the sum to see whether it involves an
     apparent stride.

     There is an overlap between the addresses that
     find_per_loop_multiplication and analyze_term_using_scevs can handle,
     but the former is much cheaper than SCEV analysis, so try it first.  */
  for (unsigned int i = 0; i < address.terms.length (); ++i)
    if (!find_per_loop_multiplication (address, address.terms[i])
	&& !analyze_term_using_scevs (address, address.terms[i])
	&& !POINTER_TYPE_P (TREE_TYPE (address.terms[i].expr)))
      analyze_arbitrary_term (address, address.terms[i]);

  /* Check for strides that are likely to be for the innermost dimension.

     1. If there is a single likely inner stride, if it is an SSA name,
	and if it is worth versioning the loop for when the SSA name
	equals 1, record that we want to do so.

     2. Otherwise, if there any likely inner strides, bail out.  This means
	one of:

	(a) There are multiple likely inner strides.  This suggests we're
	    confused and be can't be confident of doing the right thing.

	(b) There is a single likely inner stride and it is a constant
	    rather than an SSA name.  This can mean either that the access
	    is a natural one without any variable strides, such as:

	      for (int i = 0; i < n; ++i)
		a[i] += 1;

	    or that a variable stride is applied to an outer dimension,
	    such as:

	      for (int i = 0; i < n; ++i)
		for (int j = 0; j < n; ++j)
		  a[j * stride][i] += 1;

	(c) There is a single likely inner stride, and it is an SSA name,
	    but it isn't a worthwhile versioning opportunity.  This usually
	    means that the variable stride is applied by an outer loop,
	    such as:

	      for (int i = 0; i < n; ++i)
		for (int j = 0; j < n; ++j)
		  a[j][i * stride] += 1;

	    or (using an example with a more natural loop nesting):

	      for (int i = 0; i < n; ++i)
		for (int j = 0; j < n; ++j)
		  a[i][j] += b[i * stride];

	    in cases where b[i * stride] cannot (yet) be hoisted for
	    aliasing reasons.

     3. If there are no likely inner strides, fall through to the next
	set of checks.

     Pointer equality is enough to check for uniqueness in (1), since we
     only care about SSA names.  */
  tree chosen_stride = NULL_TREE;
  tree version_stride = NULL_TREE;
  for (unsigned int i = 0; i < address.terms.length (); ++i)
    if (chosen_stride != address.terms[i].stride
	&& address.terms[i].inner_likelihood == INNER_LIKELY)
      {
	if (chosen_stride)
	  return;
	chosen_stride = address.terms[i].stride;
	if (address.terms[i].versioning_opportunity_p)
	  version_stride = chosen_stride;
      }

  /* If there are no likely inner strides, see if there is a single
     versioning opportunity for a stride that was rated as INNER_DONT_KNOW.
     See the comment above the function for the cases that this code
     handles.  */
  if (!chosen_stride)
    for (unsigned int i = 0; i < address.terms.length (); ++i)
      if (version_stride != address.terms[i].stride
	  && address.terms[i].inner_likelihood == INNER_DONT_KNOW
	  && address.terms[i].versioning_opportunity_p)
	{
	  if (version_stride)
	    return;
	  version_stride = address.terms[i].stride;
	}

  if (version_stride)
    version_for_unity (address.stmt, version_stride);
}

/* Treat EXPR * MULTIPLIER + OFFSET as a fragment of an address that addresses
   TYPE_SIZE bytes and record this address fragment for later processing.
   STMT is the statement that contains the address.  */

void
loop_versioning::record_address_fragment (gimple *stmt,
					  unsigned HOST_WIDE_INT type_size,
					  tree expr,
					  unsigned HOST_WIDE_INT multiplier,
					  HOST_WIDE_INT offset)
{
  /* We're only interested in computed values.  */
  if (TREE_CODE (expr) != SSA_NAME)
    return;

  /* Quick exit if no part of the address is calculated in STMT's loop,
     since such addresses have no versioning opportunities.  */
  class loop *loop = loop_containing_stmt (stmt);
  if (expr_invariant_in_loop_p (loop, expr))
    return;

  /* Set up an address_info for EXPR * MULTIPLIER.  */
  address_info *address = XOBNEW (&m_obstack, address_info);
  new (address) address_info;
  address->stmt = stmt;
  address->loop = loop;
  address->base = NULL_TREE;
  address->terms.quick_grow (1);
  address->terms[0].expr = expr;
  address->terms[0].multiplier = multiplier;
  address->terms[0].stride = NULL_TREE;
  address->terms[0].inner_likelihood = INNER_UNLIKELY;
  address->terms[0].versioning_opportunity_p = false;
  address->min_offset = offset;

  /* Peel apart the expression into a sum of address_terms, where each
     term is multiplied by a constant.  Treat a + b and a - b the same,
     since it doesn't matter for our purposes whether an address is
     increasing or decreasing.  Distribute (a + b) * constant into
     a * constant + b * constant.

     We don't care which loop each term belongs to, since we want to
     examine as many candidate strides as possible when determining
     which is likely to be for the innermost dimension.  We therefore
     don't limit the search to statements in STMT's loop.  */
  for (unsigned int i = 0; i < address->terms.length (); )
    {
      if (gassign *assign = maybe_get_assign (address->terms[i].expr))
	{
	  tree_code code = gimple_assign_rhs_code (assign);
	  if (code == PLUS_EXPR
	      || code == POINTER_PLUS_EXPR
	      || code == MINUS_EXPR)
	    {
	      tree op1 = gimple_assign_rhs1 (assign);
	      tree op2 = gimple_assign_rhs2 (assign);
	      if (TREE_CODE (op2) == INTEGER_CST)
		{
		  address->terms[i].expr = strip_casts (op1);
		  /* This is heuristic only, so don't worry about truncation
		     or overflow.  */
		  address->min_offset += (TREE_INT_CST_LOW (op2)
					  * address->terms[i].multiplier);
		  continue;
		}
	      else if (address->terms.length () < address_info::MAX_TERMS)
		{
		  unsigned int j = address->terms.length ();
		  address->terms.quick_push (address->terms[i]);
		  address->terms[i].expr = strip_casts (op1);
		  address->terms[j].expr = strip_casts (op2);
		  continue;
		}
	    }
	  if (code == MULT_EXPR)
	    {
	      tree op1 = gimple_assign_rhs1 (assign);
	      tree op2 = gimple_assign_rhs2 (assign);
	      if (multiply_term_by (address->terms[i], op2))
		{
		  address->terms[i].expr = strip_casts (op1);
		  continue;
		}
	    }
	  if (CONVERT_EXPR_CODE_P (code))
	    {
	      tree op1 = gimple_assign_rhs1 (assign);
	      address->terms[i].expr = strip_casts (op1);
	      continue;
	    }
	}
      i += 1;
    }

  /* Peel off any symbolic pointer.  */
  if (TREE_CODE (address->terms[0].expr) != SSA_NAME
      && address->terms[0].multiplier == 1)
    {
      if (address->terms.length () == 1)
	{
	  obstack_free (&m_obstack, address);
	  return;
	}
      address->base = address->terms[0].expr;
      address->terms.ordered_remove (0);
    }

  /* Require all remaining terms to be SSA names.  (This could be false
     for unfolded statements, but they aren't worth dealing with.)  */
  for (unsigned int i = 0; i < address->terms.length (); ++i)
    if (TREE_CODE (address->terms[i].expr) != SSA_NAME)
      {
	obstack_free (&m_obstack, address);
	return;
      }

  /* The loop above set MIN_OFFSET based on the first byte of the
     referenced data.  Calculate the end + 1.  */
  address->max_offset = address->min_offset + type_size;

  /* Put the terms into a canonical order for the hash table lookup below.  */
  address->terms.qsort (compare_address_terms);

  if (dump_enabled_p ())
    {
      dump_printf_loc (MSG_NOTE, stmt, "recording address fragment %T", expr);
      if (multiplier != 1)
	dump_printf (MSG_NOTE, " * %wd", multiplier);
      dump_printf (MSG_NOTE, " = ");
      dump_address_info (MSG_NOTE, *address);
      dump_printf (MSG_NOTE, "\n");
    }

  /* Pool address information with the same terms (but potentially
     different offsets).  */
  address_info **slot = m_address_table.find_slot (address, INSERT);
  if (address_info *old_address = *slot)
    {
      /* We've already seen an address with the same terms.  Extend the
	 offset range to account for this access.  Doing this can paper
	 over gaps, such as in:

	   a[i * stride * 4] + a[i * stride * 4 + 3];

	 where nothing references "+ 1" or "+ 2".  However, the vectorizer
	 handles such gapped accesses without problems, so it's not worth
	 trying to exclude them.  */
      if (old_address->min_offset > address->min_offset)
	old_address->min_offset = address->min_offset;
      if (old_address->max_offset < address->max_offset)
	old_address->max_offset = address->max_offset;
      obstack_free (&m_obstack, address);
    }
  else
    {
      /* This is the first time we've seen an address with these terms.  */
      *slot = address;
      m_address_list.safe_push (address);
    }
}

/* Analyze expression EXPR, which occurs in STMT.  */

void
loop_versioning::analyze_expr (gimple *stmt, tree expr)
{
  unsigned HOST_WIDE_INT type_size;

  while (handled_component_p (expr))
    {
      /* See whether we can use versioning to avoid a multiplication
	 in an array index.  */
      if (TREE_CODE (expr) == ARRAY_REF
	  && acceptable_type_p (TREE_TYPE (expr), &type_size))
	record_address_fragment (stmt, type_size,
				 TREE_OPERAND (expr, 1), type_size, 0);
      expr = TREE_OPERAND (expr, 0);
    }

  /* See whether we can use versioning to avoid a multiplication
     in the pointer calculation of a MEM_REF.  */
  if (TREE_CODE (expr) == MEM_REF
      && acceptable_type_p (TREE_TYPE (expr), &type_size))
    record_address_fragment (stmt, type_size, TREE_OPERAND (expr, 0), 1,
			     /* This is heuristic only, so don't worry
				about truncation or overflow.  */
			     TREE_INT_CST_LOW (TREE_OPERAND (expr, 1)));

  /* These would be easy to handle if they existed at this stage.  */
  gcc_checking_assert (TREE_CODE (expr) != TARGET_MEM_REF);
}

/* Analyze all the statements in BB looking for useful version checks.
   Return true on success, false if something prevents the block from
   being versioned.  */

bool
loop_versioning::analyze_block (basic_block bb)
{
  class loop *loop = bb->loop_father;
  loop_info &li = get_loop_info (loop);
  for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
       gsi_next (&gsi))
    {
      gimple *stmt = gsi_stmt (gsi);
      if (is_gimple_debug (stmt))
	continue;

      if (expensive_stmt_p (stmt))
	{
	  if (dump_enabled_p ())
	    dump_printf_loc (MSG_NOTE, stmt, "expensive statement"
			     " prevents versioning: %G", stmt);
	  return false;
	}

      /* Only look for direct versioning opportunities in inner loops
	 since the benefit tends to be much smaller for outer loops.  */
      if (!loop->inner)
	{
	  unsigned int nops = gimple_num_ops (stmt);
	  for (unsigned int i = 0; i < nops; ++i)
	    if (tree op = gimple_op (stmt, i))
	      analyze_expr (stmt, op);
	}

      /* The point of the instruction limit is to prevent excessive
	 code growth, so this is a size-based estimate even though
	 the optimization is aimed at speed.  */
      li.num_insns += estimate_num_insns (stmt, &eni_size_weights);
    }

  return true;
}

/* Analyze all the blocks in the function, looking for useful version checks.
   Return true if we found one.  */

bool
loop_versioning::analyze_blocks ()
{
  AUTO_DUMP_SCOPE ("analyze_blocks",
		   dump_user_location_t::from_function_decl (m_fn->decl));

  /* For now we don't try to version the whole function, although
     versioning at that level could be useful in some cases.  */
  get_loop_info (get_loop (m_fn, 0)).rejected_p = true;

  class loop *loop;
  FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
    {
      loop_info &linfo = get_loop_info (loop);

      /* Ignore cold loops.  */
      if (!optimize_loop_for_speed_p (loop))
	linfo.rejected_p = true;

      /* See whether an inner loop prevents versioning of this loop.  */
      if (!linfo.rejected_p)
	for (class loop *inner = loop->inner; inner; inner = inner->next)
	  if (get_loop_info (inner).rejected_p)
	    {
	      linfo.rejected_p = true;
	      break;
	    }

      /* If versioning the loop is still a possibility, examine the
	 statements in the loop to look for versioning opportunities.  */
      if (!linfo.rejected_p)
	{
	  void *start_point = obstack_alloc (&m_obstack, 0);

	  for (basic_block bb = linfo.block_list; bb;
	       bb = m_next_block_in_loop[bb->index])
	    if (!analyze_block (bb))
	      {
		linfo.rejected_p = true;
		break;
	    }

	  if (!linfo.rejected_p)
	    {
	      /* Process any queued address fragments, now that we have
		 complete grouping information.  */
	      address_info *address;
	      unsigned int i;
	      FOR_EACH_VEC_ELT (m_address_list, i, address)
		analyze_address_fragment (*address);
	    }

	  m_address_table.empty ();
	  m_address_list.truncate (0);
	  obstack_free (&m_obstack, start_point);
	}
    }

  return m_num_conditions != 0;
}

/* Use the ranges in VRS to remove impossible versioning conditions from
   LOOP.  */

void
loop_versioning::prune_loop_conditions (class loop *loop, vr_values *vrs)
{
  loop_info &li = get_loop_info (loop);

  int to_remove = -1;
  bitmap_iterator bi;
  unsigned int i;
  EXECUTE_IF_SET_IN_BITMAP (&li.unity_names, 0, i, bi)
    {
      tree name = ssa_name (i);
      const value_range_equiv *vr = vrs->get_value_range (name);
      if (vr && !vr->may_contain_p (build_one_cst (TREE_TYPE (name))))
	{
	  if (dump_enabled_p ())
	    dump_printf_loc (MSG_NOTE, find_loop_location (loop),
			     "%T can never be 1 in this loop\n", name);

	  if (to_remove >= 0)
	    bitmap_clear_bit (&li.unity_names, to_remove);
	  to_remove = i;
	  m_num_conditions -= 1;
	}
    }
  if (to_remove >= 0)
    bitmap_clear_bit (&li.unity_names, to_remove);
}

/* Remove any scheduled loop version conditions that will never be true.
   Return true if any remain.  */

bool
loop_versioning::prune_conditions ()
{
  AUTO_DUMP_SCOPE ("prune_loop_conditions",
		   dump_user_location_t::from_function_decl (m_fn->decl));

  calculate_dominance_info (CDI_DOMINATORS);
  lv_dom_walker dom_walker (*this);
  dom_walker.walk (ENTRY_BLOCK_PTR_FOR_FN (m_fn));
  return m_num_conditions != 0;
}

/* Merge the version checks for INNER into immediately-enclosing loop
   OUTER.  */

void
loop_versioning::merge_loop_info (class loop *outer, class loop *inner)
{
  loop_info &inner_li = get_loop_info (inner);
  loop_info &outer_li = get_loop_info (outer);

  if (dump_enabled_p ())
    {
      bitmap_iterator bi;
      unsigned int i;
      EXECUTE_IF_SET_IN_BITMAP (&inner_li.unity_names, 0, i, bi)
	if (!bitmap_bit_p (&outer_li.unity_names, i))
	  dump_printf_loc (MSG_NOTE, find_loop_location (inner),
			   "hoisting check that %T == 1 to outer loop\n",
			   ssa_name (i));
    }

  bitmap_ior_into (&outer_li.unity_names, &inner_li.unity_names);
  if (loop_depth (outer_li.outermost) < loop_depth (inner_li.outermost))
    outer_li.outermost = inner_li.outermost;
}

/* Add LOOP to the queue of loops to version.  */

void
loop_versioning::add_loop_to_queue (class loop *loop)
{
  loop_info &li = get_loop_info (loop);

  if (dump_enabled_p ())
    dump_printf_loc (MSG_NOTE, find_loop_location (loop),
		     "queuing this loop for versioning\n");
  m_loops_to_version.safe_push (loop);

  /* Don't try to version superloops.  */
  li.rejected_p = true;
}

/* Decide whether the cost model would allow us to version LOOP,
   either directly or as part of a parent loop, and return true if so.
   This does not imply that the loop is actually worth versioning in its
   own right, just that it would be valid to version it if something
   benefited.

   We have already made this decision for all inner loops of LOOP.  */

bool
loop_versioning::decide_whether_loop_is_versionable (class loop *loop)
{
  loop_info &li = get_loop_info (loop);

  if (li.rejected_p)
    return false;

  /* Examine the decisions made for inner loops.  */
  for (class loop *inner = loop->inner; inner; inner = inner->next)
    {
      loop_info &inner_li = get_loop_info (inner);
      if (inner_li.rejected_p)
	{
	  if (dump_enabled_p ())
	    dump_printf_loc (MSG_NOTE, find_loop_location (loop),
			     "not versioning this loop because one of its"
			     " inner loops should not be versioned\n");
	  return false;
	}

      if (inner_li.worth_versioning_p ())
	li.subloops_benefit_p = true;

      /* Accumulate the number of instructions from subloops that are not
	 the innermost, or that don't benefit from versioning.  Only the
	 instructions from innermost loops that benefit from versioning
	 should be weighed against loop-versioning-max-inner-insns;
	 everything else should be weighed against
	 loop-versioning-max-outer-insns.  */
      if (!inner_li.worth_versioning_p () || inner->inner)
	{
	  if (dump_enabled_p ())
	    dump_printf_loc (MSG_NOTE, find_loop_location (loop),
			     "counting %d instructions from this loop"
			     " against its parent loop\n", inner_li.num_insns);
	  li.num_insns += inner_li.num_insns;
	}
    }

  /* Enforce the size limits.  */
  if (li.worth_versioning_p ())
    {
      unsigned int max_num_insns = max_insns_for_loop (loop);
      if (dump_enabled_p ())
	dump_printf_loc (MSG_NOTE, find_loop_location (loop),
			 "this loop has %d instructions, against"
			 " a versioning limit of %d\n",
			 li.num_insns, max_num_insns);
      if (li.num_insns > max_num_insns)
	{
	  if (dump_enabled_p ())
	    dump_printf_loc (MSG_MISSED_OPTIMIZATION
			     | MSG_PRIORITY_USER_FACING,
			     find_loop_location (loop),
			     "this loop is too big to version");
	  return false;
	}
    }

  /* Hoist all version checks from subloops to this loop.  */
  for (class loop *subloop = loop->inner; subloop; subloop = subloop->next)
    merge_loop_info (loop, subloop);

  return true;
}

/* Decide which loops to version and add them to the versioning queue.
   Return true if there are any loops to version.  */

bool
loop_versioning::make_versioning_decisions ()
{
  AUTO_DUMP_SCOPE ("make_versioning_decisions",
		   dump_user_location_t::from_function_decl (m_fn->decl));

  class loop *loop;
  FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
    {
      loop_info &linfo = get_loop_info (loop);
      if (decide_whether_loop_is_versionable (loop))
	{
	  /* Commit to versioning LOOP directly if we can't hoist the
	     version checks any further.  */
	  if (linfo.worth_versioning_p ()
	      && (loop_depth (loop) == 1 || linfo.outermost == loop))
	    add_loop_to_queue (loop);
	}
      else
	{
	  /* We can't version this loop, so individually version any
	     subloops that would benefit and haven't been versioned yet.  */
	  linfo.rejected_p = true;
	  for (class loop *subloop = loop->inner; subloop;
	       subloop = subloop->next)
	    if (get_loop_info (subloop).worth_versioning_p ())
	      add_loop_to_queue (subloop);
	}
    }

  return !m_loops_to_version.is_empty ();
}

/* Attempt to implement loop versioning for LOOP, using the information
   cached in the associated loop_info.  Return true on success.  */

bool
loop_versioning::version_loop (class loop *loop)
{
  loop_info &li = get_loop_info (loop);

  /* Build up a condition that selects the original loop instead of
     the simplified loop.  */
  tree cond = boolean_false_node;
  bitmap_iterator bi;
  unsigned int i;
  EXECUTE_IF_SET_IN_BITMAP (&li.unity_names, 0, i, bi)
    {
      tree name = ssa_name (i);
      tree ne_one = fold_build2 (NE_EXPR, boolean_type_node, name,
				 build_one_cst (TREE_TYPE (name)));
      cond = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond, ne_one);
    }

  /* Convert the condition into a suitable gcond.  */
  gimple_seq stmts = NULL;
  cond = force_gimple_operand_1 (cond, &stmts, is_gimple_condexpr, NULL_TREE);

  /* Version the loop.  */
  initialize_original_copy_tables ();
  basic_block cond_bb;
  li.optimized_loop = loop_version (loop, cond, &cond_bb,
				    profile_probability::unlikely (),
				    profile_probability::likely (),
				    profile_probability::unlikely (),
				    profile_probability::likely (), true);
  free_original_copy_tables ();
  if (!li.optimized_loop)
    {
      if (dump_enabled_p ())
	dump_printf_loc (MSG_MISSED_OPTIMIZATION, find_loop_location (loop),
			 "tried but failed to version this loop for when"
			 " certain strides are 1\n");
      return false;
    }

  if (dump_enabled_p ())
    dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, find_loop_location (loop),
		     "versioned this loop for when certain strides are 1\n");

  /* Insert the statements that feed COND.  */
  if (stmts)
    {
      gimple_stmt_iterator gsi = gsi_last_bb (cond_bb);
      gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
    }

  return true;
}

/* Attempt to version all loops in the versioning queue.  */

void
loop_versioning::implement_versioning_decisions ()
{
  /* No AUTO_DUMP_SCOPE here since all messages are top-level and
     user-facing at this point.  */

  bool any_succeeded_p = false;
  class loop *loop;
  unsigned int i;
  FOR_EACH_VEC_ELT (m_loops_to_version, i, loop)
    if (version_loop (loop))
      any_succeeded_p = true;
  if (!any_succeeded_p)
    return;

  update_ssa (TODO_update_ssa);

  /* Simplify the new loop, which is used when COND is false.  */
  FOR_EACH_VEC_ELT (m_loops_to_version, i, loop)
    {
      loop_info &linfo = get_loop_info (loop);
      if (linfo.optimized_loop)
	name_prop (linfo).substitute_and_fold (linfo.optimized_loop->header);
    }
}

/* Run the pass and return a set of TODO_* flags.  */

unsigned int
loop_versioning::run ()
{
  gcc_assert (scev_initialized_p ());

  if (analyze_blocks ()
      && prune_conditions ()
      && make_versioning_decisions ())
    implement_versioning_decisions ();

  return 0;
}

/* Loop versioning pass.  */

const pass_data pass_data_loop_versioning =
{
  GIMPLE_PASS, /* type */
  "lversion", /* name */
  OPTGROUP_LOOP, /* optinfo_flags */
  TV_LOOP_VERSIONING, /* tv_id */
  PROP_cfg, /* properties_required */
  0, /* properties_provided */
  0, /* properties_destroyed */
  0, /* todo_flags_start */
  0, /* todo_flags_finish */
};

class pass_loop_versioning : public gimple_opt_pass
{
public:
  pass_loop_versioning (gcc::context *ctxt)
    : gimple_opt_pass (pass_data_loop_versioning, ctxt)
  {}

  /* opt_pass methods: */
  virtual bool gate (function *) { return flag_version_loops_for_strides; }
  virtual unsigned int execute (function *);
};

unsigned int
pass_loop_versioning::execute (function *fn)
{
  if (number_of_loops (fn) <= 1)
    return 0;

  return loop_versioning (fn).run ();
}

} // anon namespace

gimple_opt_pass *
make_pass_loop_versioning (gcc::context *ctxt)
{
  return new pass_loop_versioning (ctxt);
}
12'>6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810 7811 7812 7813 7814 7815 7816 7817 7818 7819 7820 7821 7822 7823 7824 7825 7826 7827 7828 7829 7830 7831 7832 7833 7834 7835 7836 7837 7838 7839 7840 7841 7842 7843 7844 7845 7846 7847 7848 7849 7850 7851 7852 7853 7854 7855 7856 7857 7858 7859 7860 7861 7862 7863 7864 7865 7866 7867 7868 7869 7870 7871 7872 7873 7874 7875 7876 7877 7878 7879 7880 7881 7882 7883 7884 7885 7886 7887 7888 7889 7890 7891 7892 7893 7894 7895 7896 7897 7898 7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924 7925 7926 7927 7928 7929 7930 7931 7932 7933 7934 7935 7936 7937 7938 7939 7940 7941 7942 7943 7944 7945 7946 7947 7948 7949 7950 7951 7952 7953 7954 7955 7956 7957 7958 7959 7960 7961 7962 7963 7964 7965 7966 7967 7968 7969 7970 7971 7972 7973 7974 7975 7976 7977 7978 7979 7980 7981 7982 7983 7984 7985 7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001 8002 8003 8004 8005 8006 8007 8008 8009 8010 8011 8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024 8025 8026 8027 8028 8029 8030 8031 8032 8033 8034 8035 8036 8037 8038 8039 8040 8041 8042 8043 8044 8045 8046 8047 8048 8049 8050 8051 8052 8053 8054 8055 8056 8057 8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090 8091 8092 8093 8094 8095 8096 8097 8098 8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111 8112 8113 8114 8115 8116 8117 8118 8119 8120 8121 8122 8123 8124 8125 8126 8127 8128 8129 8130 8131 8132 8133 8134 8135 8136 8137 8138 8139 8140 8141 8142 8143 8144 8145 8146 8147 8148 8149 8150 8151 8152 8153 8154 8155 8156 8157 8158 8159 8160 8161 8162 8163 8164 8165 8166 8167 8168 8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182 8183 8184 8185 8186 8187 8188 8189 8190 8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201 8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216 8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247 8248 8249 8250 8251 8252 8253 8254 8255 8256 8257 8258 8259 8260 8261 8262 8263 8264 8265 8266 8267 8268 8269 8270 8271 8272 8273 8274 8275 8276 8277 8278 8279 8280 8281 8282 8283 8284 8285 8286 8287 8288 8289 8290 8291 8292 8293 8294 8295 8296 8297 8298 8299 8300 8301 8302 8303 8304 8305 8306 8307 8308 8309 8310 8311 8312 8313 8314 8315 8316 8317 8318 8319 8320 8321 8322 8323 8324 8325 8326 8327 8328 8329 8330 8331 8332 8333 8334 8335 8336 8337 8338 8339 8340 8341 8342 8343 8344 8345 8346 8347 8348 8349 8350 8351 8352 8353 8354 8355 8356 8357 8358 8359 8360 8361 8362 8363 8364 8365 8366 8367 8368 8369 8370 8371 8372 8373 8374 8375 8376 8377 8378 8379 8380 8381 8382 8383 8384 8385 8386 8387 8388 8389 8390 8391 8392 8393 8394 8395 8396 8397 8398 8399 8400 8401 8402 8403 8404 8405 8406 8407 8408 8409 8410 8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466 8467 8468 8469 8470 8471 8472 8473 8474 8475 8476 8477 8478 8479 8480 8481 8482 8483 8484 8485 8486 8487 8488 8489 8490 8491 8492 8493 8494 8495 8496 8497 8498 8499 8500 8501 8502 8503 8504 8505 8506 8507 8508 8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529 8530 8531 8532 8533 8534 8535 8536 8537 8538 8539 8540 8541 8542 8543 8544 8545 8546 8547 8548 8549 8550 8551 8552 8553 8554 8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579 8580 8581 8582 8583 8584 8585 8586 8587 8588 8589 8590 8591 8592 8593 8594 8595 8596 8597 8598 8599 8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624 8625 8626 8627 8628 8629 8630 8631 8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642 8643 8644 8645 8646 8647 8648 8649 8650 8651 8652 8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770 8771 8772 8773 8774 8775 8776 8777 8778 8779 8780 8781 8782 8783 8784 8785 8786 8787 8788 8789 8790 8791 8792 8793 8794 8795 8796 8797 8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818 8819 8820 8821 8822 8823 8824 8825 8826 8827 8828 8829 8830 8831 8832 8833 8834 8835 8836 8837 8838 8839 8840 8841 8842 8843 8844 8845 8846 8847 8848 8849 8850 8851 8852 8853 8854 8855 8856 8857 8858 8859 8860 8861 8862 8863 8864 8865 8866 8867 8868 8869 8870 8871 8872 8873 8874 8875 8876 8877 8878 8879 8880 8881 8882 8883 8884 8885 8886 8887 8888 8889 8890 8891 8892 8893 8894 8895 8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906 8907 8908 8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932 8933 8934 8935 8936 8937 8938 8939 8940 8941 8942 8943 8944 8945 8946 8947 8948 8949 8950 8951 8952 8953 8954 8955 8956 8957 8958 8959 8960 8961 8962 8963 8964 8965 8966 8967 8968 8969 8970 8971 8972 8973 8974 8975 8976 8977 8978 8979 8980 8981 8982 8983 8984 8985 8986 8987 8988 8989 8990 8991 8992 8993 8994 8995 8996 8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024 9025 9026 9027 9028 9029 9030 9031 9032 9033 9034 9035 9036 9037 9038 9039 9040 9041 9042 9043 9044 9045 9046 9047 9048 9049 9050 9051 9052 9053 9054 9055 9056 9057 9058 9059 9060 9061 9062 9063 9064 9065 9066 9067 9068 9069 9070 9071 9072 9073 9074 9075 9076 9077 9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094 9095 9096 9097 9098 9099 9100 9101 9102 9103 9104 9105 9106 9107 9108 9109 9110 9111 9112 9113 9114 9115 9116 9117 9118 9119 9120 9121 9122 9123 9124 9125 9126 9127 9128 9129 9130 9131 9132 9133 9134 9135 9136 9137 9138 9139 9140 9141 9142 9143 9144 9145 9146 9147 9148 9149 9150 9151 9152 9153 9154 9155 9156 9157 9158 9159 9160 9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182 9183 9184 9185 9186 9187 9188 9189 9190 9191 9192 9193 9194 9195 9196 9197 9198 9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222 9223 9224 9225 9226 9227 9228 9229 9230 9231 9232 9233 9234 9235 9236 9237 9238 9239 9240 9241 9242 9243 9244 9245 9246 9247 9248 9249 9250 9251 9252 9253 9254 9255 9256 9257 9258 9259 9260 9261 9262 9263 9264 9265 9266 9267 9268 9269 9270 9271 9272 9273 9274 9275 9276 9277 9278 9279 9280 9281 9282 9283 9284 9285 9286 9287 9288 9289 9290 9291 9292 9293 9294 9295 9296 9297 9298 9299 9300 9301 9302 9303 9304 9305 9306 9307 9308 9309 9310 9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324 9325 9326 9327 9328 9329 9330 9331 9332 9333 9334 9335 9336 9337 9338 9339 9340 9341 9342 9343 9344 9345 9346 9347 9348 9349 9350 9351 9352 9353 9354 9355 9356 9357 9358 9359 9360 9361 9362 9363 9364 9365 9366 9367 9368 9369 9370 9371 9372 9373 9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388 9389 9390 9391 9392 9393 9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417 9418 9419 9420 9421 9422 9423 9424 9425 9426 9427 9428 9429 9430 9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442 9443 9444 9445 9446 9447 9448 9449 9450 9451 9452 9453 9454 9455 9456 9457 9458 9459 9460 9461 9462 9463 9464 9465 9466 9467 9468 9469 9470 9471 9472 9473 9474 9475 9476 9477 9478 9479 9480 9481 9482 9483 9484 9485 9486 9487 9488 9489 9490 9491 9492 9493 9494 9495 9496 9497 9498 9499 9500 9501 9502 9503 9504 9505 9506 9507 9508 9509 9510 9511 9512 9513 9514 9515 9516 9517 9518 9519 9520 9521 9522 9523 9524 9525 9526 9527 9528 9529 9530 9531 9532 9533 9534 9535 9536 9537 9538 9539 9540 9541 9542 9543 9544 9545 9546 9547 9548 9549 9550 9551 9552 9553 9554 9555 9556 9557 9558 9559 9560 9561 9562 9563 9564 9565 9566 9567 9568 9569 9570 9571 9572 9573 9574 9575 9576 9577 9578 9579 9580 9581 9582 9583 9584 9585 9586 9587 9588 9589 9590 9591 9592 9593 9594 9595 9596 9597 9598 9599 9600 9601 9602 9603 9604 9605 9606 9607 9608 9609 9610 9611 9612 9613 9614 9615 9616 9617 9618 9619 9620 9621 9622 9623 9624 9625 9626 9627 9628 9629 9630 9631 9632 9633 9634 9635 9636 9637 9638 9639 9640 9641 9642 9643 9644 9645 9646 9647 9648 9649 9650 9651 9652 9653 9654 9655 9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668 9669 9670 9671 9672 9673 9674 9675 9676 9677 9678 9679 9680 9681 9682 9683 9684 9685 9686 9687 9688 9689 9690 9691 9692 9693 9694 9695 9696 9697 9698 9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709 9710 9711 9712 9713 9714 9715 9716 9717 9718 9719 9720 9721 9722 9723 9724 9725 9726 9727 9728 9729 9730 9731 9732 9733 9734 9735 9736 9737 9738 9739 9740 9741 9742 9743 9744 9745 9746 9747 9748 9749 9750 9751 9752 9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766 9767 9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779 9780 9781 9782 9783 9784 9785 9786 9787 9788 9789 9790 9791 9792 9793 9794 9795 9796 9797 9798 9799 9800 9801 9802 9803 9804 9805 9806 9807 9808 9809 9810 9811 9812 9813 9814 9815 9816 9817 9818 9819 9820 9821 9822 9823 9824 9825 9826 9827 9828 9829 9830 9831 9832 9833 9834 9835 9836 9837 9838 9839 9840 9841 9842 9843 9844 9845 9846 9847 9848 9849 9850 9851 9852 9853 9854 9855 9856 9857 9858 9859 9860 9861 9862 9863 9864 9865 9866 9867 9868 9869 9870 9871 9872 9873 9874 9875 9876 9877 9878 9879 9880 9881 9882 9883 9884 9885 9886 9887 9888 9889 9890 9891 9892 9893 9894 9895 9896 9897 9898 9899 9900 9901 9902 9903 9904 9905 9906 9907 9908 9909 9910 9911 9912 9913 9914 9915 9916 9917 9918 9919 9920 9921 9922 9923 9924 9925 9926 9927 9928 9929 9930 9931 9932 9933 9934 9935 9936 9937 9938 9939 9940 9941 9942 9943 9944 9945 9946 9947 9948 9949 9950 9951 9952 9953 9954 9955 9956 9957 9958 9959 9960 9961 9962 9963 9964 9965 9966 9967 9968 9969 9970 9971 9972 9973 9974 9975 9976 9977 9978 9979 9980 9981 9982 9983 9984 9985 9986 9987 9988 9989 9990 9991 9992 9993 9994 9995 9996 9997 9998 9999 10000 10001 10002 10003 10004 10005 10006 10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027 10028 10029 10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 10041 10042 10043 10044 10045 10046 10047 10048 10049 10050 10051 10052 10053 10054 10055 10056 10057 10058 10059 10060 10061 10062 10063 10064 10065 10066 10067 10068 10069 10070 10071 10072 10073 10074 10075 10076 10077 10078 10079 10080 10081 10082 10083 10084 10085 10086 10087 10088 10089 10090 10091 10092 10093 10094 10095 10096 10097 10098 10099 10100 10101 10102 10103 10104 10105 10106 10107 10108 10109 10110 10111 10112 10113 10114 10115 10116 10117 10118 10119 10120 10121 10122 10123 10124 10125 10126 10127 10128 10129 10130 10131 10132 10133 10134 10135 10136 10137 10138 10139 10140 10141 10142 10143 10144 10145 10146 10147 10148 10149 10150 10151 10152 10153 10154 10155 10156 10157 10158 10159 10160 10161 10162 10163 10164 10165 10166 10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178 10179 10180 10181 10182 10183 10184 10185 10186 10187 10188 10189 10190 10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203 10204 10205 10206 10207 10208 10209 10210 10211 10212 10213 10214 10215 10216 10217 10218 10219 10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 10236 10237 10238 10239 10240 10241 10242 10243 10244 10245 10246 10247 10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259 10260 10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274 10275 10276 10277 10278 10279 10280 10281 10282 10283 10284 10285 10286 10287 10288 10289 10290 10291 10292 10293 10294 10295 10296 10297 10298 10299 10300 10301 10302 10303 10304 10305 10306 10307 10308 10309 10310 10311 10312 10313 10314 10315 10316 10317 10318 10319 10320 10321 10322 10323 10324 10325 10326 10327 10328 10329 10330 10331 10332 10333 10334 10335 10336 10337 10338 10339 10340 10341 10342 10343 10344 10345 10346 10347 10348 10349 10350 10351 10352 10353 10354 10355 10356 10357 10358 10359 10360 10361 10362 10363 10364 10365 10366 10367 10368 10369 10370 10371 10372 10373 10374 10375 10376 10377 10378 10379 10380 10381 10382 10383 10384 10385 10386 10387 10388 10389 10390 10391 10392 10393 10394 10395 10396 10397 10398 10399 10400 10401 10402 10403 10404 10405 10406 10407 10408 10409 10410 10411 10412 10413 10414 10415 10416 10417 10418 10419 10420 10421 10422 10423 10424 10425 10426 10427 10428 10429 10430 10431 10432 10433 10434 10435 10436 10437 10438 10439 10440 10441 10442 10443 10444 10445 10446 10447 10448 10449 10450 10451 10452 10453 10454 10455 10456 10457 10458 10459 10460 10461 10462 10463 10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478 10479 10480 10481 10482 10483 10484 10485 10486 10487 10488 10489 10490 10491 10492 10493 10494 10495 10496 10497 10498 10499 10500 10501 10502 10503 10504 10505 10506 10507 10508 10509 10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522 10523 10524 10525 10526 10527 10528 10529 10530 10531 10532 10533 10534 10535 10536 10537 10538 10539 10540 10541 10542 10543 10544 10545 10546 10547 10548 10549 10550 10551 10552 10553 10554 10555 10556 10557 10558 10559 10560 10561 10562 10563 10564 10565 10566 10567 10568 10569 10570 10571 10572 10573 10574 10575 10576 10577 10578 10579 10580 10581 10582 10583 10584 10585 10586 10587 10588 10589 10590 10591 10592 10593 10594 10595 10596 10597 10598 10599 10600 10601 10602 10603 10604 10605 10606 10607 10608 10609 10610 10611 10612 10613 10614 10615 10616 10617 10618 10619 10620 10621 10622 10623 10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639 10640 10641 10642 10643 10644 10645 10646 10647 10648 10649 10650 10651 10652 10653 10654 10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669 10670 10671 10672 10673 10674 10675 10676 10677 10678 10679 10680 10681 10682 10683 10684 10685 10686 10687 10688 10689 10690 10691 10692 10693 10694 10695 10696 10697 10698 10699 10700 10701 10702 10703 10704 10705 10706 10707 10708 10709 10710 10711 10712 10713 10714 10715 10716 10717 10718 10719 10720 10721 10722 10723 10724 10725 10726 10727 10728 10729 10730 10731 10732 10733 10734 10735 10736 10737 10738 10739 10740 10741 10742 10743 10744 10745 10746 10747 10748 10749 10750 10751 10752 10753 10754 10755 10756 10757 10758 10759 10760 10761 10762 10763 10764 10765 10766 10767 10768 10769 10770 10771 10772 10773 10774 10775 10776 10777 10778 10779 10780 10781 10782 10783 10784 10785 10786 10787 10788 10789 10790 10791 10792 10793 10794 10795 10796 10797 10798 10799 10800 10801 10802 10803 10804 10805 10806 10807 10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818 10819 10820 10821 10822 10823 10824 10825 10826 10827 10828 10829 10830 10831 10832 10833 10834 10835 10836 10837 10838 10839 10840 10841 10842 10843 10844 10845 10846 10847 10848 10849 10850 10851 10852 10853 10854 10855 10856 10857 10858 10859 10860 10861 10862 10863 10864 10865 10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10882 10883 10884 10885 10886 10887 10888 10889 10890 10891 10892 10893 10894 10895 10896 10897 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917 10918 10919 10920 10921 10922 10923 10924 10925 10926 10927 10928 10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947 10948 10949 10950 10951 10952 10953 10954 10955 10956 10957 10958 10959 10960 10961 10962 10963 10964 10965 10966 10967 10968 10969 10970 10971 10972 10973 10974 10975 10976 10977 10978 10979 10980 10981 10982 10983 10984 10985 10986 10987 10988 10989 10990 10991 10992 10993 10994 10995 10996 10997 10998 10999 11000 11001 11002 11003 11004 11005 11006 11007 11008 11009 11010 11011 11012 11013 11014 11015 11016 11017 11018 11019 11020 11021 11022 11023 11024 11025 11026 11027 11028 11029 11030 11031 11032 11033 11034 11035 11036 11037 11038 11039 11040 11041 11042 11043 11044 11045 11046 11047 11048 11049 11050 11051 11052 11053 11054 11055 11056 11057 11058 11059 11060 11061 11062 11063 11064 11065 11066 11067 11068 11069 11070 11071 11072 11073 11074 11075 11076 11077 11078 11079 11080 11081 11082 11083 11084 11085 11086 11087 11088 11089 11090 11091 11092 11093 11094 11095 11096 11097 11098 11099 11100 11101 11102 11103 11104 11105 11106 11107 11108 11109 11110 11111 11112 11113 11114 11115 11116 11117 11118 11119 11120 11121 11122 11123 11124 11125 11126 11127 11128 11129 11130 11131 11132 11133 11134 11135 11136 11137 11138 11139 11140 11141 11142 11143 11144 11145 11146 11147 11148 11149 11150 11151 11152 11153 11154 11155 11156 11157 11158 11159 11160 11161 11162 11163 11164 11165 11166 11167 11168 11169 11170 11171 11172 11173 11174 11175 11176 11177 11178 11179 11180 11181 11182 11183 11184 11185 11186 11187 11188 11189 11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200 11201 11202 11203 11204 11205 11206 11207 11208 11209 11210 11211 11212 11213 11214 11215 11216 11217 11218 11219 11220 11221 11222 11223 11224 11225 11226 11227 11228 11229 11230 11231 11232 11233 11234 11235 11236 11237 11238 11239 11240 11241 11242 11243 11244 11245 11246 11247 11248 11249 11250 11251 11252 11253 11254 11255 11256 11257 11258 11259 11260 11261 11262 11263 11264 11265 11266 11267 11268 11269 11270 11271 11272 11273 11274 11275 11276 11277 11278 11279 11280 11281 11282 11283 11284 11285 11286 11287 11288 11289 11290 11291 11292 11293 11294 11295 11296 11297 11298 11299 11300 11301 11302 11303 11304 11305 11306 11307 11308 11309 11310 11311 11312 11313 11314 11315 11316 11317 11318 11319 11320 11321 11322 11323 11324 11325 11326 11327 11328 11329 11330 11331 11332 11333 11334 11335 11336 11337 11338 11339 11340 11341 11342 11343 11344 11345 11346 11347 11348 11349 11350 11351 11352 11353 11354 11355 11356 11357 11358 11359 11360 11361 11362 11363 11364 11365 11366 11367 11368 11369 11370 11371 11372 11373 11374 11375 11376 11377 11378 11379 11380 11381 11382 11383 11384 11385 11386 11387 11388 11389 11390 11391 11392 11393 11394 11395 11396 11397 11398 11399 11400 11401 11402 11403 11404 11405 11406 11407 11408 11409 11410 11411 11412 11413 11414 11415 11416 11417 11418 11419 11420 11421 11422 11423 11424 11425 11426 11427 11428 11429 11430 11431 11432 11433 11434 11435 11436 11437 11438 11439 11440 11441 11442 11443 11444 11445 11446 11447 11448 11449 11450 11451 11452 11453 11454 11455 11456 11457 11458 11459 11460 11461 11462 11463 11464 11465 11466 11467 11468 11469 11470 11471 11472 11473 11474 11475 11476 11477 11478 11479 11480 11481 11482 11483 11484 11485 11486 11487 11488 11489 11490 11491 11492 11493 11494 11495 11496 11497 11498 11499 11500 11501 11502 11503 11504 11505 11506 11507 11508 11509 11510 11511 11512 11513 11514 11515 11516 11517 11518 11519 11520 11521 11522 11523 11524 11525 11526 11527 11528 11529 11530 11531 11532 11533 11534 11535 11536 11537 11538 11539 11540 11541 11542 11543 11544 11545 11546 11547 11548 11549 11550 11551 11552 11553 11554 11555 11556 11557 11558 11559 11560 11561 11562 11563 11564 11565 11566 11567 11568 11569 11570 11571 11572 11573 11574 11575 11576 11577 11578 11579 11580 11581 11582 11583 11584 11585 11586 11587 11588 11589 11590 11591 11592 11593 11594 11595 11596 11597 11598 11599 11600 11601 11602 11603 11604 11605 11606 11607 11608 11609 11610 11611 11612 11613 11614 11615 11616 11617 11618 11619 11620 11621 11622 11623 11624 11625 11626 11627 11628 11629 11630 11631 11632 11633 11634 11635 11636 11637 11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651 11652 11653 11654 11655 11656 11657 11658 11659 11660 11661 11662 11663 11664 11665 11666 11667 11668 11669 11670 11671 11672 11673 11674 11675 11676 11677 11678 11679 11680 11681 11682 11683 11684 11685 11686 11687 11688 11689 11690 11691 11692 11693 11694 11695 11696 11697 11698 11699 11700 11701 11702 11703 11704 11705 11706 11707 11708 11709 11710 11711 11712 11713 11714 11715 11716 11717 11718 11719 11720 11721 11722 11723 11724 11725 11726 11727 11728 11729 11730 11731 11732 11733 11734 11735 11736 11737 11738 11739 11740 11741 11742 11743 11744 11745 11746 11747 11748 11749 11750 11751 11752 11753 11754 11755 11756 11757 11758 11759 11760 11761 11762 11763 11764 11765 11766 11767 11768 11769 11770 11771 11772 11773 11774 11775 11776 11777 11778 11779 11780 11781 11782 11783 11784 11785 11786 11787 11788 11789 11790 11791 11792 11793 11794 11795 11796 11797 11798 11799 11800 11801 11802 11803 11804 11805 11806 11807 11808 11809 11810 11811 11812 11813 11814 11815 11816 11817 11818 11819 11820 11821 11822 11823 11824 11825 11826 11827 11828 11829 11830 11831 11832 11833 11834 11835 11836 11837 11838 11839 11840 11841 11842 11843 11844 11845 11846 11847 11848 11849 11850 11851 11852 11853 11854 11855 11856 11857 11858 11859 11860 11861 11862 11863 11864 11865 11866 11867 11868 11869 11870 11871 11872 11873 11874 11875 11876 11877 11878 11879 11880 11881 11882 11883 11884 11885 11886 11887 11888 11889 11890 11891 11892 11893 11894 11895 11896 11897 11898 11899 11900 11901 11902 11903 11904 11905 11906 11907 11908 11909 11910 11911 11912 11913 11914 11915 11916 11917 11918 11919 11920 11921 11922 11923 11924 11925 11926 11927 11928 11929 11930 11931 11932 11933 11934 11935 11936 11937 11938 11939 11940 11941 11942 11943 11944 11945 11946 11947 11948 11949 11950 11951 11952 11953 11954 11955 11956 11957 11958 11959 11960 11961 11962 11963 11964 11965 11966 11967 11968 11969 11970 11971 11972 11973 11974 11975 11976 11977 11978 11979 11980 11981 11982 11983 11984 11985 11986 11987 11988 11989 11990 11991 11992 11993 11994 11995 11996 11997 11998 11999 12000 12001 12002 12003 12004 12005 12006 12007 12008 12009 12010 12011 12012 12013 12014 12015 12016 12017 12018 12019 12020 12021 12022 12023 12024 12025 12026 12027 12028 12029 12030 12031 12032 12033 12034 12035 12036 12037 12038 12039 12040 12041 12042 12043 12044 12045 12046 12047 12048 12049 12050 12051 12052 12053 12054 12055 12056 12057 12058 12059 12060 12061 12062 12063 12064 12065 12066 12067 12068 12069 12070 12071 12072 12073 12074 12075 12076 12077 12078 12079 12080 12081 12082 12083 12084 12085 12086 12087 12088 12089 12090 12091 12092 12093 12094 12095 12096 12097 12098 12099 12100 12101 12102 12103 12104 12105 12106 12107 12108 12109 12110 12111 12112 12113 12114 12115 12116 12117 12118 12119 12120 12121 12122 12123 12124 12125 12126 12127 12128 12129 12130 12131 12132 12133 12134 12135 12136 12137 12138 12139 12140 12141 12142 12143 12144 12145 12146 12147 12148 12149 12150 12151 12152 12153 12154 12155 12156 12157 12158 12159 12160 12161 12162 12163 12164 12165 12166 12167 12168 12169 12170 12171 12172 12173 12174 12175 12176 12177 12178 12179 12180 12181 12182 12183 12184 12185 12186 12187 12188 12189 12190 12191 12192 12193 12194 12195 12196 12197 12198 12199 12200 12201 12202 12203 12204 12205 12206 12207 12208 12209 12210 12211 12212 12213 12214 12215 12216 12217 12218 12219 12220 12221 12222 12223 12224 12225 12226 12227 12228 12229 12230 12231 12232 12233 12234 12235 12236 12237 12238 12239 12240 12241 12242 12243 12244 12245 12246 12247 12248 12249 12250 12251 12252 12253 12254 12255 12256 12257 12258 12259 12260 12261 12262 12263 12264 12265 12266 12267 12268 12269 12270 12271 12272 12273 12274 12275 12276 12277 12278 12279 12280 12281 12282 12283 12284 12285 12286 12287 12288 12289 12290 12291 12292 12293 12294 12295 12296 12297 12298 12299 12300 12301 12302 12303 12304 12305 12306 12307 12308 12309 12310 12311 12312 12313 12314 12315 12316 12317 12318 12319 12320 12321 12322 12323 12324 12325 12326 12327 12328 12329 12330 12331 12332 12333 12334 12335 12336 12337 12338 12339 12340 12341 12342 12343 12344 12345 12346 12347 12348 12349 12350 12351 12352 12353 12354 12355 12356 12357 12358 12359 12360 12361 12362 12363 12364 12365 12366 12367 12368 12369 12370 12371 12372 12373 12374 12375 12376 12377 12378 12379 12380 12381 12382 12383 12384 12385 12386 12387 12388 12389 12390 12391 12392 12393 12394 12395 12396 12397 12398 12399 12400 12401 12402 12403 12404 12405 12406 12407 12408 12409 12410 12411 12412 12413 12414 12415 12416 12417 12418 12419 12420 12421 12422 12423 12424 12425 12426 12427 12428 12429 12430 12431 12432 12433 12434 12435 12436 12437 12438 12439 12440 12441 12442 12443 12444 12445 12446 12447 12448 12449 12450 12451 12452 12453 12454 12455 12456 12457 12458 12459 12460 12461 12462 12463 12464 12465 12466 12467 12468 12469 12470 12471 12472 12473 12474 12475 12476 12477 12478 12479 12480 12481 12482 12483 12484 12485 12486 12487 12488 12489 12490 12491 12492 12493 12494 12495 12496 12497 12498 12499 12500 12501 12502 12503 12504 12505 12506 12507 12508 12509 12510 12511 12512 12513 12514 12515 12516 12517 12518 12519 12520 12521 12522 12523 12524 12525 12526 12527 12528 12529 12530 12531 12532 12533 12534 12535 12536 12537 12538 12539 12540 12541 12542 12543 12544 12545 12546 12547 12548 12549 12550 12551 12552 12553 12554 12555 12556 12557 12558 12559 12560 12561 12562 12563 12564 12565 12566 12567 12568 12569 12570 12571 12572 12573 12574 12575 12576 12577 12578 12579 12580 12581 12582 12583 12584 12585 12586 12587 12588 12589 12590 12591 12592 12593 12594 12595 12596 12597 12598 12599 12600 12601 12602 12603 12604 12605 12606 12607 12608 12609 12610 12611 12612 12613 12614 12615 12616 12617 12618 12619 12620 12621 12622 12623 12624 12625 12626 12627 12628 12629 12630 12631 12632 12633 12634 12635 12636 12637 12638 12639 12640 12641 12642 12643 12644 12645 12646 12647 12648 12649 12650 12651 12652 12653 12654 12655 12656 12657 12658 12659 12660 12661 12662 12663 12664 12665 12666 12667 12668 12669 12670 12671 12672 12673 12674 12675 12676 12677 12678 12679 12680 12681 12682 12683 12684 12685 12686 12687 12688 12689 12690 12691 12692 12693 12694 12695 12696 12697 12698 12699 12700 12701 12702 12703 12704 12705 12706 12707 12708 12709 12710 12711 12712 12713 12714 12715 12716 12717 12718 12719 12720 12721 12722 12723 12724 12725 12726 12727 12728 12729 12730 12731 12732 12733 12734 12735 12736 12737 12738 12739 12740 12741 12742 12743 12744 12745 12746 12747 12748 12749 12750 12751 12752 12753 12754 12755 12756 12757 12758 12759 12760 12761 12762 12763 12764 12765 12766 12767 12768 12769 12770 12771 12772 12773 12774 12775 12776 12777 12778 12779 12780 12781 12782 12783 12784 12785 12786 12787 12788 12789 12790 12791 12792 12793 12794 12795 12796 12797 12798 12799 12800 12801 12802 12803 12804 12805 12806 12807 12808 12809 12810 12811 12812 12813 12814 12815 12816 12817 12818 12819 12820 12821 12822 12823 12824 12825 12826 12827 12828 12829 12830 12831 12832 12833 12834 12835 12836 12837 12838 12839 12840 12841 12842 12843 12844 12845 12846 12847 12848 12849 12850 12851 12852 12853 12854 12855 12856 12857 12858 12859 12860 12861 12862 12863 12864 12865 12866 12867 12868 12869 12870 12871 12872 12873 12874 12875 12876 12877 12878 12879 12880 12881 12882 12883 12884 12885 12886 12887 12888 12889 12890 12891 12892 12893 12894 12895 12896 12897 12898 12899 12900 12901 12902 12903 12904 12905 12906 12907 12908 12909 12910 12911 12912 12913 12914 12915 12916 12917 12918 12919 12920 12921 12922 12923 12924 12925 12926 12927 12928 12929 12930 12931 12932 12933 12934 12935 12936 12937 12938 12939 12940 12941 12942 12943 12944 12945 12946 12947 12948 12949 12950 12951 12952 12953 12954 12955 12956 12957 12958 12959 12960 12961 12962 12963 12964 12965 12966 12967 12968 12969 12970 12971 12972 12973 12974 12975 12976 12977 12978 12979 12980 12981 12982 12983 12984 12985 12986 12987 12988 12989 12990 12991 12992 12993 12994 12995 12996 12997 12998 12999 13000 13001 13002 13003 13004 13005 13006 13007 13008 13009 13010 13011 13012 13013 13014 13015 13016 13017 13018 13019 13020 13021 13022 13023 13024 13025 13026 13027 13028 13029 13030 13031 13032 13033 13034 13035 13036 13037 13038 13039 13040 13041 13042 13043 13044 13045 13046 13047 13048 13049 13050 13051 13052 13053 13054 13055 13056 13057 13058 13059 13060 13061 13062 13063 13064 13065 13066 13067 13068 13069 13070 13071 13072 13073 13074 13075 13076 13077 13078
/* Optimize by combining instructions for GNU compiler.
   Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
   Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */

/* This module is essentially the "combiner" phase of the U. of Arizona
   Portable Optimizer, but redone to work on our list-structured
   representation for RTL instead of their string representation.

   The LOG_LINKS of each insn identify the most recent assignment
   to each REG used in the insn.  It is a list of previous insns,
   each of which contains a SET for a REG that is used in this insn
   and not used or set in between.  LOG_LINKs never cross basic blocks.
   They were set up by the preceding pass (lifetime analysis).

   We try to combine each pair of insns joined by a logical link.
   We also try to combine triples of insns A, B and C when
   C has a link back to B and B has a link back to A.

   LOG_LINKS does not have links for use of the CC0.  They don't
   need to, because the insn that sets the CC0 is always immediately
   before the insn that tests it.  So we always regard a branch
   insn as having a logical link to the preceding insn.  The same is true
   for an insn explicitly using CC0.

   We check (with use_crosses_set_p) to avoid combining in such a way
   as to move a computation to a place where its value would be different.

   Combination is done by mathematically substituting the previous
   insn(s) values for the regs they set into the expressions in
   the later insns that refer to these regs.  If the result is a valid insn
   for our target machine, according to the machine description,
   we install it, delete the earlier insns, and update the data flow
   information (LOG_LINKS and REG_NOTES) for what we did.

   There are a few exceptions where the dataflow information isn't
   completely updated (however this is only a local issue since it is
   regenerated before the next pass that uses it):

   - reg_live_length is not updated
   - reg_n_refs is not adjusted in the rare case when a register is
     no longer required in a computation
   - there are extremely rare cases (see distribute_notes) when a
     REG_DEAD note is lost
   - a LOG_LINKS entry that refers to an insn with multiple SETs may be
     removed because there is no way to know which register it was
     linking

   To simplify substitution, we combine only when the earlier insn(s)
   consist of only a single assignment.  To simplify updating afterward,
   we never combine when a subroutine call appears in the middle.

   Since we do not represent assignments to CC0 explicitly except when that
   is all an insn does, there is no LOG_LINKS entry in an insn that uses
   the condition code for the insn that set the condition code.
   Fortunately, these two insns must be consecutive.
   Therefore, every JUMP_INSN is taken to have an implicit logical link
   to the preceding insn.  This is not quite right, since non-jumps can
   also use the condition code; but in practice such insns would not
   combine anyway.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "tree.h"
#include "tm_p.h"
#include "flags.h"
#include "regs.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "insn-config.h"
#include "function.h"
/* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
#include "expr.h"
#include "insn-attr.h"
#include "recog.h"
#include "real.h"
#include "toplev.h"
#include "target.h"
#include "optabs.h"
#include "insn-codes.h"
#include "rtlhooks-def.h"
/* Include output.h for dump_file.  */
#include "output.h"
#include "params.h"
#include "timevar.h"
#include "tree-pass.h"
#include "df.h"
#include "cgraph.h"

/* Number of attempts to combine instructions in this function.  */

static int combine_attempts;

/* Number of attempts that got as far as substitution in this function.  */

static int combine_merges;

/* Number of instructions combined with added SETs in this function.  */

static int combine_extras;

/* Number of instructions combined in this function.  */

static int combine_successes;

/* Totals over entire compilation.  */

static int total_attempts, total_merges, total_extras, total_successes;

/* combine_instructions may try to replace the right hand side of the
   second instruction with the value of an associated REG_EQUAL note
   before throwing it at try_combine.  That is problematic when there
   is a REG_DEAD note for a register used in the old right hand side
   and can cause distribute_notes to do wrong things.  This is the
   second instruction if it has been so modified, null otherwise.  */

static rtx i2mod;

/* When I2MOD is nonnull, this is a copy of the old right hand side.  */

static rtx i2mod_old_rhs;

/* When I2MOD is nonnull, this is a copy of the new right hand side.  */

static rtx i2mod_new_rhs;

typedef struct reg_stat_struct {
  /* Record last point of death of (hard or pseudo) register n.  */
  rtx				last_death;

  /* Record last point of modification of (hard or pseudo) register n.  */
  rtx				last_set;

  /* The next group of fields allows the recording of the last value assigned
     to (hard or pseudo) register n.  We use this information to see if an
     operation being processed is redundant given a prior operation performed
     on the register.  For example, an `and' with a constant is redundant if
     all the zero bits are already known to be turned off.

     We use an approach similar to that used by cse, but change it in the
     following ways:

     (1) We do not want to reinitialize at each label.
     (2) It is useful, but not critical, to know the actual value assigned
	 to a register.  Often just its form is helpful.

     Therefore, we maintain the following fields:

     last_set_value		the last value assigned
     last_set_label		records the value of label_tick when the
				register was assigned
     last_set_table_tick	records the value of label_tick when a
				value using the register is assigned
     last_set_invalid		set to nonzero when it is not valid
				to use the value of this register in some
				register's value

     To understand the usage of these tables, it is important to understand
     the distinction between the value in last_set_value being valid and
     the register being validly contained in some other expression in the
     table.

     (The next two parameters are out of date).

     reg_stat[i].last_set_value is valid if it is nonzero, and either
     reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.

     Register I may validly appear in any expression returned for the value
     of another register if reg_n_sets[i] is 1.  It may also appear in the
     value for register J if reg_stat[j].last_set_invalid is zero, or
     reg_stat[i].last_set_label < reg_stat[j].last_set_label.

     If an expression is found in the table containing a register which may
     not validly appear in an expression, the register is replaced by
     something that won't match, (clobber (const_int 0)).  */

  /* Record last value assigned to (hard or pseudo) register n.  */

  rtx				last_set_value;

  /* Record the value of label_tick when an expression involving register n
     is placed in last_set_value.  */

  int				last_set_table_tick;

  /* Record the value of label_tick when the value for register n is placed in
     last_set_value.  */

  int				last_set_label;

  /* These fields are maintained in parallel with last_set_value and are
     used to store the mode in which the register was last set, the bits
     that were known to be zero when it was last set, and the number of
     sign bits copies it was known to have when it was last set.  */

  unsigned HOST_WIDE_INT	last_set_nonzero_bits;
  char				last_set_sign_bit_copies;
  ENUM_BITFIELD(machine_mode)	last_set_mode : 8;

  /* Set nonzero if references to register n in expressions should not be
     used.  last_set_invalid is set nonzero when this register is being
     assigned to and last_set_table_tick == label_tick.  */

  char				last_set_invalid;

  /* Some registers that are set more than once and used in more than one
     basic block are nevertheless always set in similar ways.  For example,
     a QImode register may be loaded from memory in two places on a machine
     where byte loads zero extend.

     We record in the following fields if a register has some leading bits
     that are always equal to the sign bit, and what we know about the
     nonzero bits of a register, specifically which bits are known to be
     zero.

     If an entry is zero, it means that we don't know anything special.  */

  unsigned char			sign_bit_copies;

  unsigned HOST_WIDE_INT	nonzero_bits;

  /* Record the value of the label_tick when the last truncation
     happened.  The field truncated_to_mode is only valid if
     truncation_label == label_tick.  */

  int				truncation_label;

  /* Record the last truncation seen for this register.  If truncation
     is not a nop to this mode we might be able to save an explicit
     truncation if we know that value already contains a truncated
     value.  */

  ENUM_BITFIELD(machine_mode)	truncated_to_mode : 8;
} reg_stat_type;

DEF_VEC_O(reg_stat_type);
DEF_VEC_ALLOC_O(reg_stat_type,heap);

static VEC(reg_stat_type,heap) *reg_stat;

/* Record the luid of the last insn that invalidated memory
   (anything that writes memory, and subroutine calls, but not pushes).  */

static int mem_last_set;

/* Record the luid of the last CALL_INSN
   so we can tell whether a potential combination crosses any calls.  */

static int last_call_luid;

/* When `subst' is called, this is the insn that is being modified
   (by combining in a previous insn).  The PATTERN of this insn
   is still the old pattern partially modified and it should not be
   looked at, but this may be used to examine the successors of the insn
   to judge whether a simplification is valid.  */

static rtx subst_insn;

/* This is the lowest LUID that `subst' is currently dealing with.
   get_last_value will not return a value if the register was set at or
   after this LUID.  If not for this mechanism, we could get confused if
   I2 or I1 in try_combine were an insn that used the old value of a register
   to obtain a new value.  In that case, we might erroneously get the
   new value of the register when we wanted the old one.  */

static int subst_low_luid;

/* This contains any hard registers that are used in newpat; reg_dead_at_p
   must consider all these registers to be always live.  */

static HARD_REG_SET newpat_used_regs;

/* This is an insn to which a LOG_LINKS entry has been added.  If this
   insn is the earlier than I2 or I3, combine should rescan starting at
   that location.  */

static rtx added_links_insn;

/* Basic block in which we are performing combines.  */
static basic_block this_basic_block;


/* Length of the currently allocated uid_insn_cost array.  */

static int max_uid_known;

/* The following array records the insn_rtx_cost for every insn
   in the instruction stream.  */

static int *uid_insn_cost;

/* The following array records the LOG_LINKS for every insn in the
   instruction stream as an INSN_LIST rtx.  */

static rtx *uid_log_links;

#define INSN_COST(INSN)		(uid_insn_cost[INSN_UID (INSN)])
#define LOG_LINKS(INSN)		(uid_log_links[INSN_UID (INSN)])

/* Incremented for each basic block.  */

static int label_tick;

/* Reset to label_tick for each label.  */

static int label_tick_ebb_start;

/* Mode used to compute significance in reg_stat[].nonzero_bits.  It is the
   largest integer mode that can fit in HOST_BITS_PER_WIDE_INT.  */

static enum machine_mode nonzero_bits_mode;

/* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
   be safely used.  It is zero while computing them and after combine has
   completed.  This former test prevents propagating values based on
   previously set values, which can be incorrect if a variable is modified
   in a loop.  */

static int nonzero_sign_valid;


/* Record one modification to rtl structure
   to be undone by storing old_contents into *where.  */

struct undo
{
  struct undo *next;
  enum { UNDO_RTX, UNDO_INT, UNDO_MODE } kind;
  union { rtx r; int i; enum machine_mode m; } old_contents;
  union { rtx *r; int *i; } where;
};

/* Record a bunch of changes to be undone, up to MAX_UNDO of them.
   num_undo says how many are currently recorded.

   other_insn is nonzero if we have modified some other insn in the process
   of working on subst_insn.  It must be verified too.  */

struct undobuf
{
  struct undo *undos;
  struct undo *frees;
  rtx other_insn;
};

static struct undobuf undobuf;

/* Number of times the pseudo being substituted for
   was found and replaced.  */

static int n_occurrences;

static rtx reg_nonzero_bits_for_combine (const_rtx, enum machine_mode, const_rtx,
					 enum machine_mode,
					 unsigned HOST_WIDE_INT,
					 unsigned HOST_WIDE_INT *);
static rtx reg_num_sign_bit_copies_for_combine (const_rtx, enum machine_mode, const_rtx,
						enum machine_mode,
						unsigned int, unsigned int *);
static void do_SUBST (rtx *, rtx);
static void do_SUBST_INT (int *, int);
static void init_reg_last (void);
static void setup_incoming_promotions (rtx);
static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
static int cant_combine_insn_p (rtx);
static int can_combine_p (rtx, rtx, rtx, rtx, rtx *, rtx *);
static int combinable_i3pat (rtx, rtx *, rtx, rtx, int, rtx *);
static int contains_muldiv (rtx);
static rtx try_combine (rtx, rtx, rtx, int *);
static void undo_all (void);
static void undo_commit (void);
static rtx *find_split_point (rtx *, rtx);
static rtx subst (rtx, rtx, rtx, int, int);
static rtx combine_simplify_rtx (rtx, enum machine_mode, int);
static rtx simplify_if_then_else (rtx);
static rtx simplify_set (rtx);
static rtx simplify_logical (rtx);
static rtx expand_compound_operation (rtx);
static const_rtx expand_field_assignment (const_rtx);
static rtx make_extraction (enum machine_mode, rtx, HOST_WIDE_INT,
			    rtx, unsigned HOST_WIDE_INT, int, int, int);
static rtx extract_left_shift (rtx, int);
static rtx make_compound_operation (rtx, enum rtx_code);
static int get_pos_from_mask (unsigned HOST_WIDE_INT,
			      unsigned HOST_WIDE_INT *);
static rtx canon_reg_for_combine (rtx, rtx);
static rtx force_to_mode (rtx, enum machine_mode,
			  unsigned HOST_WIDE_INT, int);
static rtx if_then_else_cond (rtx, rtx *, rtx *);
static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
static int rtx_equal_for_field_assignment_p (rtx, rtx);
static rtx make_field_assignment (rtx);
static rtx apply_distributive_law (rtx);
static rtx distribute_and_simplify_rtx (rtx, int);
static rtx simplify_and_const_int_1 (enum machine_mode, rtx,
				     unsigned HOST_WIDE_INT);
static rtx simplify_and_const_int (rtx, enum machine_mode, rtx,
				   unsigned HOST_WIDE_INT);
static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
			    HOST_WIDE_INT, enum machine_mode, int *);
static rtx simplify_shift_const_1 (enum rtx_code, enum machine_mode, rtx, int);
static rtx simplify_shift_const (rtx, enum rtx_code, enum machine_mode, rtx,
				 int);
static int recog_for_combine (rtx *, rtx, rtx *);
static rtx gen_lowpart_for_combine (enum machine_mode, rtx);
static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
static void update_table_tick (rtx);
static void record_value_for_reg (rtx, rtx, rtx);
static void check_promoted_subreg (rtx, rtx);
static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
static void record_dead_and_set_regs (rtx);
static int get_last_value_validate (rtx *, rtx, int, int);
static rtx get_last_value (const_rtx);
static int use_crosses_set_p (const_rtx, int);
static void reg_dead_at_p_1 (rtx, const_rtx, void *);
static int reg_dead_at_p (rtx, rtx);
static void move_deaths (rtx, rtx, int, rtx, rtx *);
static int reg_bitfield_target_p (rtx, rtx);
static void distribute_notes (rtx, rtx, rtx, rtx, rtx, rtx);
static void distribute_links (rtx);
static void mark_used_regs_combine (rtx);
static void record_promoted_value (rtx, rtx);
static int unmentioned_reg_p_1 (rtx *, void *);
static bool unmentioned_reg_p (rtx, rtx);
static int record_truncated_value (rtx *, void *);
static void record_truncated_values (rtx *, void *);
static bool reg_truncated_to_mode (enum machine_mode, const_rtx);
static rtx gen_lowpart_or_truncate (enum machine_mode, rtx);


/* It is not safe to use ordinary gen_lowpart in combine.
   See comments in gen_lowpart_for_combine.  */
#undef RTL_HOOKS_GEN_LOWPART
#define RTL_HOOKS_GEN_LOWPART              gen_lowpart_for_combine

/* Our implementation of gen_lowpart never emits a new pseudo.  */
#undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
#define RTL_HOOKS_GEN_LOWPART_NO_EMIT      gen_lowpart_for_combine

#undef RTL_HOOKS_REG_NONZERO_REG_BITS
#define RTL_HOOKS_REG_NONZERO_REG_BITS     reg_nonzero_bits_for_combine

#undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
#define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES  reg_num_sign_bit_copies_for_combine

#undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
#define RTL_HOOKS_REG_TRUNCATED_TO_MODE    reg_truncated_to_mode

static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;


/* Try to split PATTERN found in INSN.  This returns NULL_RTX if
   PATTERN can not be split.  Otherwise, it returns an insn sequence.
   This is a wrapper around split_insns which ensures that the
   reg_stat vector is made larger if the splitter creates a new
   register.  */

static rtx
combine_split_insns (rtx pattern, rtx insn)
{
  rtx ret;
  unsigned int nregs;

  ret = split_insns (pattern, insn);
  nregs = max_reg_num ();
  if (nregs > VEC_length (reg_stat_type, reg_stat))
    VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);
  return ret;
}

/* This is used by find_single_use to locate an rtx in LOC that
   contains exactly one use of DEST, which is typically either a REG
   or CC0.  It returns a pointer to the innermost rtx expression
   containing DEST.  Appearances of DEST that are being used to
   totally replace it are not counted.  */

static rtx *
find_single_use_1 (rtx dest, rtx *loc)
{
  rtx x = *loc;
  enum rtx_code code = GET_CODE (x);
  rtx *result = NULL;
  rtx *this_result;
  int i;
  const char *fmt;

  switch (code)
    {
    case CONST_INT:
    case CONST:
    case LABEL_REF:
    case SYMBOL_REF:
    case CONST_DOUBLE:
    case CONST_VECTOR:
    case CLOBBER:
      return 0;

    case SET:
      /* If the destination is anything other than CC0, PC, a REG or a SUBREG
	 of a REG that occupies all of the REG, the insn uses DEST if
	 it is mentioned in the destination or the source.  Otherwise, we
	 need just check the source.  */
      if (GET_CODE (SET_DEST (x)) != CC0
	  && GET_CODE (SET_DEST (x)) != PC
	  && !REG_P (SET_DEST (x))
	  && ! (GET_CODE (SET_DEST (x)) == SUBREG
		&& REG_P (SUBREG_REG (SET_DEST (x)))
		&& (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
		      + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
		    == ((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
			 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))))
	break;

      return find_single_use_1 (dest, &SET_SRC (x));

    case MEM:
    case SUBREG:
      return find_single_use_1 (dest, &XEXP (x, 0));

    default:
      break;
    }

  /* If it wasn't one of the common cases above, check each expression and
     vector of this code.  Look for a unique usage of DEST.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	{
	  if (dest == XEXP (x, i)
	      || (REG_P (dest) && REG_P (XEXP (x, i))
		  && REGNO (dest) == REGNO (XEXP (x, i))))
	    this_result = loc;
	  else
	    this_result = find_single_use_1 (dest, &XEXP (x, i));

	  if (result == NULL)
	    result = this_result;
	  else if (this_result)
	    /* Duplicate usage.  */
	    return NULL;
	}
      else if (fmt[i] == 'E')
	{
	  int j;

	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	    {
	      if (XVECEXP (x, i, j) == dest
		  || (REG_P (dest)
		      && REG_P (XVECEXP (x, i, j))
		      && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
		this_result = loc;
	      else
		this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));

	      if (result == NULL)
		result = this_result;
	      else if (this_result)
		return NULL;
	    }
	}
    }

  return result;
}


/* See if DEST, produced in INSN, is used only a single time in the
   sequel.  If so, return a pointer to the innermost rtx expression in which
   it is used.

   If PLOC is nonzero, *PLOC is set to the insn containing the single use.

   If DEST is cc0_rtx, we look only at the next insn.  In that case, we don't
   care about REG_DEAD notes or LOG_LINKS.

   Otherwise, we find the single use by finding an insn that has a
   LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST.  If DEST is
   only referenced once in that insn, we know that it must be the first
   and last insn referencing DEST.  */

static rtx *
find_single_use (rtx dest, rtx insn, rtx *ploc)
{
  rtx next;
  rtx *result;
  rtx link;

#ifdef HAVE_cc0
  if (dest == cc0_rtx)
    {
      next = NEXT_INSN (insn);
      if (next == 0
	  || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
	return 0;

      result = find_single_use_1 (dest, &PATTERN (next));
      if (result && ploc)
	*ploc = next;
      return result;
    }
#endif

  if (!REG_P (dest))
    return 0;

  for (next = next_nonnote_insn (insn);
       next != 0 && !LABEL_P (next);
       next = next_nonnote_insn (next))
    if (INSN_P (next) && dead_or_set_p (next, dest))
      {
	for (link = LOG_LINKS (next); link; link = XEXP (link, 1))
	  if (XEXP (link, 0) == insn)
	    break;

	if (link)
	  {
	    result = find_single_use_1 (dest, &PATTERN (next));
	    if (ploc)
	      *ploc = next;
	    return result;
	  }
      }

  return 0;
}

/* Substitute NEWVAL, an rtx expression, into INTO, a place in some
   insn.  The substitution can be undone by undo_all.  If INTO is already
   set to NEWVAL, do not record this change.  Because computing NEWVAL might
   also call SUBST, we have to compute it before we put anything into
   the undo table.  */

static void
do_SUBST (rtx *into, rtx newval)
{
  struct undo *buf;
  rtx oldval = *into;

  if (oldval == newval)
    return;

  /* We'd like to catch as many invalid transformations here as
     possible.  Unfortunately, there are way too many mode changes
     that are perfectly valid, so we'd waste too much effort for
     little gain doing the checks here.  Focus on catching invalid
     transformations involving integer constants.  */
  if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
      && GET_CODE (newval) == CONST_INT)
    {
      /* Sanity check that we're replacing oldval with a CONST_INT
	 that is a valid sign-extension for the original mode.  */
      gcc_assert (INTVAL (newval)
		  == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));

      /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
	 CONST_INT is not valid, because after the replacement, the
	 original mode would be gone.  Unfortunately, we can't tell
	 when do_SUBST is called to replace the operand thereof, so we
	 perform this test on oldval instead, checking whether an
	 invalid replacement took place before we got here.  */
      gcc_assert (!(GET_CODE (oldval) == SUBREG
		    && GET_CODE (SUBREG_REG (oldval)) == CONST_INT));
      gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
		    && GET_CODE (XEXP (oldval, 0)) == CONST_INT));
    }

  if (undobuf.frees)
    buf = undobuf.frees, undobuf.frees = buf->next;
  else
    buf = XNEW (struct undo);

  buf->kind = UNDO_RTX;
  buf->where.r = into;
  buf->old_contents.r = oldval;
  *into = newval;

  buf->next = undobuf.undos, undobuf.undos = buf;
}

#define SUBST(INTO, NEWVAL)	do_SUBST(&(INTO), (NEWVAL))

/* Similar to SUBST, but NEWVAL is an int expression.  Note that substitution
   for the value of a HOST_WIDE_INT value (including CONST_INT) is
   not safe.  */

static void
do_SUBST_INT (int *into, int newval)
{
  struct undo *buf;
  int oldval = *into;

  if (oldval == newval)
    return;

  if (undobuf.frees)
    buf = undobuf.frees, undobuf.frees = buf->next;
  else
    buf = XNEW (struct undo);

  buf->kind = UNDO_INT;
  buf->where.i = into;
  buf->old_contents.i = oldval;
  *into = newval;

  buf->next = undobuf.undos, undobuf.undos = buf;
}

#define SUBST_INT(INTO, NEWVAL)  do_SUBST_INT(&(INTO), (NEWVAL))

/* Similar to SUBST, but just substitute the mode.  This is used when
   changing the mode of a pseudo-register, so that any other
   references to the entry in the regno_reg_rtx array will change as
   well.  */

static void
do_SUBST_MODE (rtx *into, enum machine_mode newval)
{
  struct undo *buf;
  enum machine_mode oldval = GET_MODE (*into);

  if (oldval == newval)
    return;

  if (undobuf.frees)
    buf = undobuf.frees, undobuf.frees = buf->next;
  else
    buf = XNEW (struct undo);

  buf->kind = UNDO_MODE;
  buf->where.r = into;
  buf->old_contents.m = oldval;
  adjust_reg_mode (*into, newval);

  buf->next = undobuf.undos, undobuf.undos = buf;
}

#define SUBST_MODE(INTO, NEWVAL)  do_SUBST_MODE(&(INTO), (NEWVAL))

/* Subroutine of try_combine.  Determine whether the combine replacement
   patterns NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to
   insn_rtx_cost that the original instruction sequence I1, I2, I3 and
   undobuf.other_insn.  Note that I1 and/or NEWI2PAT may be NULL_RTX. 
   NEWOTHERPAT and undobuf.other_insn may also both be NULL_RTX.  This
   function returns false, if the costs of all instructions can be
   estimated, and the replacements are more expensive than the original
   sequence.  */

static bool
combine_validate_cost (rtx i1, rtx i2, rtx i3, rtx newpat, rtx newi2pat,
		       rtx newotherpat)
{
  int i1_cost, i2_cost, i3_cost;
  int new_i2_cost, new_i3_cost;
  int old_cost, new_cost;

  /* Lookup the original insn_rtx_costs.  */
  i2_cost = INSN_COST (i2);
  i3_cost = INSN_COST (i3);

  if (i1)
    {
      i1_cost = INSN_COST (i1);
      old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0)
		 ? i1_cost + i2_cost + i3_cost : 0;
    }
  else
    {
      old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
      i1_cost = 0;
    }

  /* Calculate the replacement insn_rtx_costs.  */
  new_i3_cost = insn_rtx_cost (newpat);
  if (newi2pat)
    {
      new_i2_cost = insn_rtx_cost (newi2pat);
      new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
		 ? new_i2_cost + new_i3_cost : 0;
    }
  else
    {
      new_cost = new_i3_cost;
      new_i2_cost = 0;
    }

  if (undobuf.other_insn)
    {
      int old_other_cost, new_other_cost;

      old_other_cost = INSN_COST (undobuf.other_insn);
      new_other_cost = insn_rtx_cost (newotherpat);
      if (old_other_cost > 0 && new_other_cost > 0)
	{
	  old_cost += old_other_cost;
	  new_cost += new_other_cost;
	}
      else
	old_cost = 0;
    }

  /* Disallow this recombination if both new_cost and old_cost are
     greater than zero, and new_cost is greater than old cost.  */
  if (old_cost > 0
      && new_cost > old_cost)
    {
      if (dump_file)
	{
	  if (i1)
	    {
	      fprintf (dump_file,
		       "rejecting combination of insns %d, %d and %d\n",
		       INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
	      fprintf (dump_file, "original costs %d + %d + %d = %d\n",
		       i1_cost, i2_cost, i3_cost, old_cost);
	    }
	  else
	    {
	      fprintf (dump_file,
		       "rejecting combination of insns %d and %d\n",
		       INSN_UID (i2), INSN_UID (i3));
	      fprintf (dump_file, "original costs %d + %d = %d\n",
		       i2_cost, i3_cost, old_cost);
	    }

	  if (newi2pat)
	    {
	      fprintf (dump_file, "replacement costs %d + %d = %d\n",
		       new_i2_cost, new_i3_cost, new_cost);
	    }
	  else
	    fprintf (dump_file, "replacement cost %d\n", new_cost);
	}

      return false;
    }

  /* Update the uid_insn_cost array with the replacement costs.  */
  INSN_COST (i2) = new_i2_cost;
  INSN_COST (i3) = new_i3_cost;
  if (i1)
    INSN_COST (i1) = 0;

  return true;
}


/* Delete any insns that copy a register to itself.  */

static void
delete_noop_moves (void)
{
  rtx insn, next;
  basic_block bb;

  FOR_EACH_BB (bb)
    {
      for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
	{
	  next = NEXT_INSN (insn);
	  if (INSN_P (insn) && noop_move_p (insn))
	    {
	      rtx note;

	      /* If we're about to remove the first insn of a libcall
		 then move the libcall note to the next real insn and
		 update the retval note.  */
	      if ((note = find_reg_note (insn, REG_LIBCALL, NULL_RTX))
		       && XEXP (note, 0) != insn)
		{
		  rtx new_libcall_insn = next_real_insn (insn);
		  rtx retval_note = find_reg_note (XEXP (note, 0),
						   REG_RETVAL, NULL_RTX);
		  REG_NOTES (new_libcall_insn)
		    = gen_rtx_INSN_LIST (REG_LIBCALL, XEXP (note, 0),
					 REG_NOTES (new_libcall_insn));
		  XEXP (retval_note, 0) = new_libcall_insn;
		}

	      if (dump_file)
		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));

	      delete_insn_and_edges (insn);
	    }
	}
    }
}


/* Fill in log links field for all insns.  */

static void
create_log_links (void)
{
  basic_block bb;
  rtx *next_use, insn;
  struct df_ref **def_vec, **use_vec;

  next_use = XCNEWVEC (rtx, max_reg_num ());

  /* Pass through each block from the end, recording the uses of each
     register and establishing log links when def is encountered.
     Note that we do not clear next_use array in order to save time,
     so we have to test whether the use is in the same basic block as def.
              
     There are a few cases below when we do not consider the definition or
     usage -- these are taken from original flow.c did. Don't ask me why it is
     done this way; I don't know and if it works, I don't want to know.  */

  FOR_EACH_BB (bb)
    {
      FOR_BB_INSNS_REVERSE (bb, insn)
        {
          if (!INSN_P (insn))
            continue;

	  /* Log links are created only once.  */
	  gcc_assert (!LOG_LINKS (insn));

          for (def_vec = DF_INSN_DEFS (insn); *def_vec; def_vec++)
            {
	      struct df_ref *def = *def_vec;
              int regno = DF_REF_REGNO (def);
              rtx use_insn;

              if (!next_use[regno])
                continue;

              /* Do not consider if it is pre/post modification in MEM.  */
              if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
                continue;

              /* Do not make the log link for frame pointer.  */
              if ((regno == FRAME_POINTER_REGNUM
                   && (! reload_completed || frame_pointer_needed))
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
                  || (regno == HARD_FRAME_POINTER_REGNUM
                      && (! reload_completed || frame_pointer_needed))
#endif
#if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
                  || (regno == ARG_POINTER_REGNUM && fixed_regs[regno])
#endif
                  )
                continue;

              use_insn = next_use[regno];
              if (BLOCK_FOR_INSN (use_insn) == bb)
                {
                  /* flow.c claimed:

                     We don't build a LOG_LINK for hard registers contained
                     in ASM_OPERANDs.  If these registers get replaced,
                     we might wind up changing the semantics of the insn,
                     even if reload can make what appear to be valid
                     assignments later.  */
                  if (regno >= FIRST_PSEUDO_REGISTER
                      || asm_noperands (PATTERN (use_insn)) < 0)
		    {
		      /* Don't add duplicate links between instructions.  */
		      rtx links;
		      for (links = LOG_LINKS (use_insn); links;
			   links = XEXP (links, 1))
		        if (insn == XEXP (links, 0))
			  break;

		      if (!links)
			LOG_LINKS (use_insn) =
			  alloc_INSN_LIST (insn, LOG_LINKS (use_insn));
		    }
                }
              next_use[regno] = NULL_RTX;
            }

          for (use_vec = DF_INSN_USES (insn); *use_vec; use_vec++)
            {
	      struct df_ref *use = *use_vec;
	      int regno = DF_REF_REGNO (use);

              /* Do not consider the usage of the stack pointer
		 by function call.  */
              if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
                continue;

              next_use[regno] = insn;
            }
        }
    }

  free (next_use);
}

/* Clear LOG_LINKS fields of insns.  */

static void
clear_log_links (void)
{
  rtx insn;

  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
    if (INSN_P (insn))
      free_INSN_LIST_list (&LOG_LINKS (insn));
}




/* Main entry point for combiner.  F is the first insn of the function.
   NREGS is the first unused pseudo-reg number.

   Return nonzero if the combiner has turned an indirect jump
   instruction into a direct jump.  */
static int
combine_instructions (rtx f, unsigned int nregs)
{
  rtx insn, next;
#ifdef HAVE_cc0
  rtx prev;
#endif
  rtx links, nextlinks;
  rtx first;

  int new_direct_jump_p = 0;

  for (first = f; first && !INSN_P (first); )
    first = NEXT_INSN (first);
  if (!first)
    return 0;

  combine_attempts = 0;
  combine_merges = 0;
  combine_extras = 0;
  combine_successes = 0;

  rtl_hooks = combine_rtl_hooks;

  VEC_safe_grow_cleared (reg_stat_type, heap, reg_stat, nregs);

  init_recog_no_volatile ();

  /* Allocate array for insn info.  */
  max_uid_known = get_max_uid ();
  uid_log_links = XCNEWVEC (rtx, max_uid_known + 1);
  uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);

  nonzero_bits_mode = mode_for_size (HOST_BITS_PER_WIDE_INT, MODE_INT, 0);

  /* Don't use reg_stat[].nonzero_bits when computing it.  This can cause
     problems when, for example, we have j <<= 1 in a loop.  */

  nonzero_sign_valid = 0;

  /* Scan all SETs and see if we can deduce anything about what
     bits are known to be zero for some registers and how many copies
     of the sign bit are known to exist for those registers.

     Also set any known values so that we can use it while searching
     for what bits are known to be set.  */

  label_tick = label_tick_ebb_start = 1;

  setup_incoming_promotions (first);

  create_log_links ();
  FOR_EACH_BB (this_basic_block)
    {
      last_call_luid = 0;
      mem_last_set = -1;
      label_tick++;
      FOR_BB_INSNS (this_basic_block, insn)
        if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
	  {
            subst_low_luid = DF_INSN_LUID (insn);
            subst_insn = insn;

	    note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
		         insn);
	    record_dead_and_set_regs (insn);

#ifdef AUTO_INC_DEC
	    for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
	      if (REG_NOTE_KIND (links) == REG_INC)
	        set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
						  insn);
#endif

	    /* Record the current insn_rtx_cost of this instruction.  */
	    if (NONJUMP_INSN_P (insn))
	      INSN_COST (insn) = insn_rtx_cost (PATTERN (insn));
	    if (dump_file)
	      fprintf(dump_file, "insn_cost %d: %d\n",
		    INSN_UID (insn), INSN_COST (insn));
	  }
	else if (LABEL_P (insn))
	  label_tick_ebb_start = label_tick;
    }

  nonzero_sign_valid = 1;

  /* Now scan all the insns in forward order.  */

  label_tick = label_tick_ebb_start = 1;
  init_reg_last ();
  setup_incoming_promotions (first);

  FOR_EACH_BB (this_basic_block)
    {
      last_call_luid = 0;
      mem_last_set = -1;
      label_tick++;
      for (insn = BB_HEAD (this_basic_block);
	   insn != NEXT_INSN (BB_END (this_basic_block));
	   insn = next ? next : NEXT_INSN (insn))
	{
	  next = 0;
	  if (INSN_P (insn))
	    {
	      /* See if we know about function return values before this
		 insn based upon SUBREG flags.  */
	      check_promoted_subreg (insn, PATTERN (insn));

	      /* See if we can find hardregs and subreg of pseudos in
		 narrower modes.  This could help turning TRUNCATEs
		 into SUBREGs.  */
	      note_uses (&PATTERN (insn), record_truncated_values, NULL);

	      /* Try this insn with each insn it links back to.  */

	      for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
		if ((next = try_combine (insn, XEXP (links, 0),
					 NULL_RTX, &new_direct_jump_p)) != 0)
		  goto retry;

	      /* Try each sequence of three linked insns ending with this one.  */

	      for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
		{
		  rtx link = XEXP (links, 0);

		  /* If the linked insn has been replaced by a note, then there
		     is no point in pursuing this chain any further.  */
		  if (NOTE_P (link))
		    continue;

		  for (nextlinks = LOG_LINKS (link);
		       nextlinks;
		       nextlinks = XEXP (nextlinks, 1))
		    if ((next = try_combine (insn, link,
					     XEXP (nextlinks, 0),
					     &new_direct_jump_p)) != 0)
		      goto retry;
		}

#ifdef HAVE_cc0
	      /* Try to combine a jump insn that uses CC0
		 with a preceding insn that sets CC0, and maybe with its
		 logical predecessor as well.
		 This is how we make decrement-and-branch insns.
		 We need this special code because data flow connections
		 via CC0 do not get entered in LOG_LINKS.  */

	      if (JUMP_P (insn)
		  && (prev = prev_nonnote_insn (insn)) != 0
		  && NONJUMP_INSN_P (prev)
		  && sets_cc0_p (PATTERN (prev)))
		{
		  if ((next = try_combine (insn, prev,
					   NULL_RTX, &new_direct_jump_p)) != 0)
		    goto retry;

		  for (nextlinks = LOG_LINKS (prev); nextlinks;
		       nextlinks = XEXP (nextlinks, 1))
		    if ((next = try_combine (insn, prev,
					     XEXP (nextlinks, 0),
					     &new_direct_jump_p)) != 0)
		      goto retry;
		}

	      /* Do the same for an insn that explicitly references CC0.  */
	      if (NONJUMP_INSN_P (insn)
		  && (prev = prev_nonnote_insn (insn)) != 0
		  && NONJUMP_INSN_P (prev)
		  && sets_cc0_p (PATTERN (prev))
		  && GET_CODE (PATTERN (insn)) == SET
		  && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
		{
		  if ((next = try_combine (insn, prev,
					   NULL_RTX, &new_direct_jump_p)) != 0)
		    goto retry;

		  for (nextlinks = LOG_LINKS (prev); nextlinks;
		       nextlinks = XEXP (nextlinks, 1))
		    if ((next = try_combine (insn, prev,
					     XEXP (nextlinks, 0),
					     &new_direct_jump_p)) != 0)
		      goto retry;
		}

	      /* Finally, see if any of the insns that this insn links to
		 explicitly references CC0.  If so, try this insn, that insn,
		 and its predecessor if it sets CC0.  */
	      for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
		if (NONJUMP_INSN_P (XEXP (links, 0))
		    && GET_CODE (PATTERN (XEXP (links, 0))) == SET
		    && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (XEXP (links, 0))))
		    && (prev = prev_nonnote_insn (XEXP (links, 0))) != 0
		    && NONJUMP_INSN_P (prev)
		    && sets_cc0_p (PATTERN (prev))
		    && (next = try_combine (insn, XEXP (links, 0),
					    prev, &new_direct_jump_p)) != 0)
		  goto retry;
#endif

	      /* Try combining an insn with two different insns whose results it
		 uses.  */
	      for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
		for (nextlinks = XEXP (links, 1); nextlinks;
		     nextlinks = XEXP (nextlinks, 1))
		  if ((next = try_combine (insn, XEXP (links, 0),
					   XEXP (nextlinks, 0),
					   &new_direct_jump_p)) != 0)
		    goto retry;

	      /* Try this insn with each REG_EQUAL note it links back to.  */
	      for (links = LOG_LINKS (insn); links; links = XEXP (links, 1))
		{
		  rtx set, note;
		  rtx temp = XEXP (links, 0);
		  if ((set = single_set (temp)) != 0
		      && (note = find_reg_equal_equiv_note (temp)) != 0
		      && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
		      /* Avoid using a register that may already been marked
			 dead by an earlier instruction.  */
		      && ! unmentioned_reg_p (note, SET_SRC (set))
		      && (GET_MODE (note) == VOIDmode
			  ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
			  : GET_MODE (SET_DEST (set)) == GET_MODE (note)))
		    {
		      /* Temporarily replace the set's source with the
			 contents of the REG_EQUAL note.  The insn will
			 be deleted or recognized by try_combine.  */
		      rtx orig = SET_SRC (set);
		      SET_SRC (set) = note;
		      i2mod = temp;
		      i2mod_old_rhs = copy_rtx (orig);
		      i2mod_new_rhs = copy_rtx (note);
		      next = try_combine (insn, i2mod, NULL_RTX,
					  &new_direct_jump_p);
		      i2mod = NULL_RTX;
		      if (next)
			goto retry;
		      SET_SRC (set) = orig;
		    }
		}

	      if (!NOTE_P (insn))
		record_dead_and_set_regs (insn);

	    retry:
	      ;
	    }
	  else if (LABEL_P (insn))
	    label_tick_ebb_start = label_tick;
	}
    }

  clear_log_links ();
  clear_bb_flags ();
  new_direct_jump_p |= purge_all_dead_edges ();
  delete_noop_moves ();

  /* Clean up.  */
  free (uid_log_links);
  free (uid_insn_cost);
  VEC_free (reg_stat_type, heap, reg_stat);

  {
    struct undo *undo, *next;
    for (undo = undobuf.frees; undo; undo = next)
      {
	next = undo->next;
	free (undo);
      }
    undobuf.frees = 0;
  }

  total_attempts += combine_attempts;
  total_merges += combine_merges;
  total_extras += combine_extras;
  total_successes += combine_successes;

  nonzero_sign_valid = 0;
  rtl_hooks = general_rtl_hooks;

  /* Make recognizer allow volatile MEMs again.  */
  init_recog ();

  return new_direct_jump_p;
}

/* Wipe the last_xxx fields of reg_stat in preparation for another pass.  */

static void
init_reg_last (void)
{
  unsigned int i;
  reg_stat_type *p;

  for (i = 0; VEC_iterate (reg_stat_type, reg_stat, i, p); ++i)
    memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
}

/* Set up any promoted values for incoming argument registers.  */

static void
setup_incoming_promotions (rtx first)
{
  tree arg;
  bool strictly_local = false;

  if (!targetm.calls.promote_function_args (TREE_TYPE (cfun->decl)))
    return;

  for (arg = DECL_ARGUMENTS (current_function_decl); arg;
       arg = TREE_CHAIN (arg))
    {
      rtx reg = DECL_INCOMING_RTL (arg);
      int uns1, uns3;
      enum machine_mode mode1, mode2, mode3, mode4;

      /* Only continue if the incoming argument is in a register.  */
      if (!REG_P (reg))
	continue;

      /* Determine, if possible, whether all call sites of the current
         function lie within the current compilation unit.  (This does
	 take into account the exporting of a function via taking its
	 address, and so forth.)  */
      if (flag_unit_at_a_time)
	strictly_local = cgraph_local_info (current_function_decl)->local;

      /* The mode and signedness of the argument before any promotions happen
         (equal to the mode of the pseudo holding it at that stage).  */
      mode1 = TYPE_MODE (TREE_TYPE (arg));
      uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));

      /* The mode and signedness of the argument after any source language and
         TARGET_PROMOTE_PROTOTYPES-driven promotions.  */
      mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
      uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));

      /* The mode and signedness of the argument as it is actually passed, 
         after any TARGET_PROMOTE_FUNCTION_ARGS-driven ABI promotions.  */
      mode3 = promote_mode (DECL_ARG_TYPE (arg), mode2, &uns3, 1);

      /* The mode of the register in which the argument is being passed.  */
      mode4 = GET_MODE (reg);

      /* Eliminate sign extensions in the callee when possible.  Only
         do this when:
	 (a) a mode promotion has occurred;
	 (b) the mode of the register is the same as the mode of
	     the argument as it is passed; and
	 (c) the signedness does not change across any of the promotions; and
	 (d) when no language-level promotions (which we cannot guarantee
	     will have been done by an external caller) are necessary,
	     unless we know that this function is only ever called from
	     the current compilation unit -- all of whose call sites will
	     do the mode1 --> mode2 promotion.  */
      if (mode1 != mode3
          && mode3 == mode4
          && uns1 == uns3
	  && (mode1 == mode2 || strictly_local))
        {
	  /* Record that the value was promoted from mode1 to mode3,
	     so that any sign extension at the head of the current
	     function may be eliminated.  */
	  rtx x;
	  x = gen_rtx_CLOBBER (mode1, const0_rtx);
	  x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
	  record_value_for_reg (reg, first, x);
	}
    }
}

/* Called via note_stores.  If X is a pseudo that is narrower than
   HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.

   If we are setting only a portion of X and we can't figure out what
   portion, assume all bits will be used since we don't know what will
   be happening.

   Similarly, set how many bits of X are known to be copies of the sign bit
   at all locations in the function.  This is the smallest number implied
   by any set of X.  */

static void
set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
{
  rtx insn = (rtx) data;
  unsigned int num;

  if (REG_P (x)
      && REGNO (x) >= FIRST_PSEUDO_REGISTER
      /* If this register is undefined at the start of the file, we can't
	 say what its contents were.  */
      && ! REGNO_REG_SET_P
           (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x))
      && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
    {
      reg_stat_type *rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));

      if (set == 0 || GET_CODE (set) == CLOBBER)
	{
	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
	  rsp->sign_bit_copies = 1;
	  return;
	}

      /* If this register is being initialized using itself, and the
	 register is uninitialized in this basic block, and there are
	 no LOG_LINKS which set the register, then part of the
	 register is uninitialized.  In that case we can't assume
	 anything about the number of nonzero bits.

	 ??? We could do better if we checked this in
	 reg_{nonzero_bits,num_sign_bit_copies}_for_combine.  Then we
	 could avoid making assumptions about the insn which initially
	 sets the register, while still using the information in other
	 insns.  We would have to be careful to check every insn
	 involved in the combination.  */

      if (insn
	  && reg_referenced_p (x, PATTERN (insn))
	  && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
			       REGNO (x)))
	{
	  rtx link;

	  for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
	    {
	      if (dead_or_set_p (XEXP (link, 0), x))
		break;
	    }
	  if (!link)
	    {
	      rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
	      rsp->sign_bit_copies = 1;
	      return;
	    }
	}

      /* If this is a complex assignment, see if we can convert it into a
	 simple assignment.  */
      set = expand_field_assignment (set);

      /* If this is a simple assignment, or we have a paradoxical SUBREG,
	 set what we know about X.  */

      if (SET_DEST (set) == x
	  || (GET_CODE (SET_DEST (set)) == SUBREG
	      && (GET_MODE_SIZE (GET_MODE (SET_DEST (set)))
		  > GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (set)))))
	      && SUBREG_REG (SET_DEST (set)) == x))
	{
	  rtx src = SET_SRC (set);

#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
	  /* If X is narrower than a word and SRC is a non-negative
	     constant that would appear negative in the mode of X,
	     sign-extend it for use in reg_stat[].nonzero_bits because some
	     machines (maybe most) will actually do the sign-extension
	     and this is the conservative approach.

	     ??? For 2.5, try to tighten up the MD files in this regard
	     instead of this kludge.  */

	  if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
	      && GET_CODE (src) == CONST_INT
	      && INTVAL (src) > 0
	      && 0 != (INTVAL (src)
		       & ((HOST_WIDE_INT) 1
			  << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
	    src = GEN_INT (INTVAL (src)
			   | ((HOST_WIDE_INT) (-1)
			      << GET_MODE_BITSIZE (GET_MODE (x))));
#endif

	  /* Don't call nonzero_bits if it cannot change anything.  */
	  if (rsp->nonzero_bits != ~(unsigned HOST_WIDE_INT) 0)
	    rsp->nonzero_bits |= nonzero_bits (src, nonzero_bits_mode);
	  num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
	  if (rsp->sign_bit_copies == 0
	      || rsp->sign_bit_copies > num)
	    rsp->sign_bit_copies = num;
	}
      else
	{
	  rsp->nonzero_bits = GET_MODE_MASK (GET_MODE (x));
	  rsp->sign_bit_copies = 1;
	}
    }
}

/* See if INSN can be combined into I3.  PRED and SUCC are optionally
   insns that were previously combined into I3 or that will be combined
   into the merger of INSN and I3.

   Return 0 if the combination is not allowed for any reason.

   If the combination is allowed, *PDEST will be set to the single
   destination of INSN and *PSRC to the single source, and this function
   will return 1.  */

static int
can_combine_p (rtx insn, rtx i3, rtx pred ATTRIBUTE_UNUSED, rtx succ,
	       rtx *pdest, rtx *psrc)
{
  int i;
  const_rtx set = 0;
  rtx src, dest;
  rtx p;
#ifdef AUTO_INC_DEC
  rtx link;
#endif
  int all_adjacent = (succ ? (next_active_insn (insn) == succ
			      && next_active_insn (succ) == i3)
		      : next_active_insn (insn) == i3);

  /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
     or a PARALLEL consisting of such a SET and CLOBBERs.

     If INSN has CLOBBER parallel parts, ignore them for our processing.
     By definition, these happen during the execution of the insn.  When it
     is merged with another insn, all bets are off.  If they are, in fact,
     needed and aren't also supplied in I3, they may be added by
     recog_for_combine.  Otherwise, it won't match.

     We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
     note.

     Get the source and destination of INSN.  If more than one, can't
     combine.  */

  if (GET_CODE (PATTERN (insn)) == SET)
    set = PATTERN (insn);
  else if (GET_CODE (PATTERN (insn)) == PARALLEL
	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
    {
      for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
	{
	  rtx elt = XVECEXP (PATTERN (insn), 0, i);
	  rtx note;

	  switch (GET_CODE (elt))
	    {
	    /* This is important to combine floating point insns
	       for the SH4 port.  */
	    case USE:
	      /* Combining an isolated USE doesn't make sense.
		 We depend here on combinable_i3pat to reject them.  */
	      /* The code below this loop only verifies that the inputs of
		 the SET in INSN do not change.  We call reg_set_between_p
		 to verify that the REG in the USE does not change between
		 I3 and INSN.
		 If the USE in INSN was for a pseudo register, the matching
		 insn pattern will likely match any register; combining this
		 with any other USE would only be safe if we knew that the
		 used registers have identical values, or if there was
		 something to tell them apart, e.g. different modes.  For
		 now, we forgo such complicated tests and simply disallow
		 combining of USES of pseudo registers with any other USE.  */
	      if (REG_P (XEXP (elt, 0))
		  && GET_CODE (PATTERN (i3)) == PARALLEL)
		{
		  rtx i3pat = PATTERN (i3);
		  int i = XVECLEN (i3pat, 0) - 1;
		  unsigned int regno = REGNO (XEXP (elt, 0));

		  do
		    {
		      rtx i3elt = XVECEXP (i3pat, 0, i);

		      if (GET_CODE (i3elt) == USE
			  && REG_P (XEXP (i3elt, 0))
			  && (REGNO (XEXP (i3elt, 0)) == regno
			      ? reg_set_between_p (XEXP (elt, 0),
						   PREV_INSN (insn), i3)
			      : regno >= FIRST_PSEUDO_REGISTER))
			return 0;
		    }
		  while (--i >= 0);
		}
	      break;

	      /* We can ignore CLOBBERs.  */
	    case CLOBBER:
	      break;

	    case SET:
	      /* Ignore SETs whose result isn't used but not those that
		 have side-effects.  */
	      if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
		  && (!(note = find_reg_note (insn, REG_EH_REGION, NULL_RTX))
		      || INTVAL (XEXP (note, 0)) <= 0)
		  && ! side_effects_p (elt))
		break;

	      /* If we have already found a SET, this is a second one and
		 so we cannot combine with this insn.  */
	      if (set)
		return 0;

	      set = elt;
	      break;

	    default:
	      /* Anything else means we can't combine.  */
	      return 0;
	    }
	}

      if (set == 0
	  /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
	     so don't do anything with it.  */
	  || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
	return 0;
    }
  else
    return 0;

  if (set == 0)
    return 0;

  set = expand_field_assignment (set);
  src = SET_SRC (set), dest = SET_DEST (set);

  /* Don't eliminate a store in the stack pointer.  */
  if (dest == stack_pointer_rtx
      /* Don't combine with an insn that sets a register to itself if it has
	 a REG_EQUAL note.  This may be part of a LIBCALL sequence.  */
      || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
      /* Can't merge an ASM_OPERANDS.  */
      || GET_CODE (src) == ASM_OPERANDS
      /* Can't merge a function call.  */
      || GET_CODE (src) == CALL
      /* Don't eliminate a function call argument.  */
      || (CALL_P (i3)
	  && (find_reg_fusage (i3, USE, dest)
	      || (REG_P (dest)
		  && REGNO (dest) < FIRST_PSEUDO_REGISTER
		  && global_regs[REGNO (dest)])))
      /* Don't substitute into an incremented register.  */
      || FIND_REG_INC_NOTE (i3, dest)
      || (succ && FIND_REG_INC_NOTE (succ, dest))
      /* Don't substitute into a non-local goto, this confuses CFG.  */
      || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
#if 0
      /* Don't combine the end of a libcall into anything.  */
      /* ??? This gives worse code, and appears to be unnecessary, since no
	 pass after flow uses REG_LIBCALL/REG_RETVAL notes.  Local-alloc does
	 use REG_RETVAL notes for noconflict blocks, but other code here
	 makes sure that those insns don't disappear.  */
      || find_reg_note (insn, REG_RETVAL, NULL_RTX)
#endif
      /* Make sure that DEST is not used after SUCC but before I3.  */
      || (succ && ! all_adjacent
	  && reg_used_between_p (dest, succ, i3))
      /* Make sure that the value that is to be substituted for the register
	 does not use any registers whose values alter in between.  However,
	 If the insns are adjacent, a use can't cross a set even though we
	 think it might (this can happen for a sequence of insns each setting
	 the same destination; last_set of that register might point to
	 a NOTE).  If INSN has a REG_EQUIV note, the register is always
	 equivalent to the memory so the substitution is valid even if there
	 are intervening stores.  Also, don't move a volatile asm or
	 UNSPEC_VOLATILE across any other insns.  */
      || (! all_adjacent
	  && (((!MEM_P (src)
		|| ! find_reg_note (insn, REG_EQUIV, src))
	       && use_crosses_set_p (src, DF_INSN_LUID (insn)))
	      || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
	      || GET_CODE (src) == UNSPEC_VOLATILE))
      /* Don't combine across a CALL_INSN, because that would possibly
	 change whether the life span of some REGs crosses calls or not,
	 and it is a pain to update that information.
	 Exception: if source is a constant, moving it later can't hurt.
	 Accept that as a special case.  */
      || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
    return 0;

  /* DEST must either be a REG or CC0.  */
  if (REG_P (dest))
    {
      /* If register alignment is being enforced for multi-word items in all
	 cases except for parameters, it is possible to have a register copy
	 insn referencing a hard register that is not allowed to contain the
	 mode being copied and which would not be valid as an operand of most
	 insns.  Eliminate this problem by not combining with such an insn.

	 Also, on some machines we don't want to extend the life of a hard
	 register.  */

      if (REG_P (src)
	  && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
	       && ! HARD_REGNO_MODE_OK (REGNO (dest), GET_MODE (dest)))
	      /* Don't extend the life of a hard register unless it is
		 user variable (if we have few registers) or it can't
		 fit into the desired register (meaning something special
		 is going on).
		 Also avoid substituting a return register into I3, because
		 reload can't handle a conflict with constraints of other
		 inputs.  */
	      || (REGNO (src) < FIRST_PSEUDO_REGISTER
		  && ! HARD_REGNO_MODE_OK (REGNO (src), GET_MODE (src)))))
	return 0;
    }
  else if (GET_CODE (dest) != CC0)
    return 0;


  if (GET_CODE (PATTERN (i3)) == PARALLEL)
    for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
      if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
	{
	  /* Don't substitute for a register intended as a clobberable
	     operand.  */
	  rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
	  if (rtx_equal_p (reg, dest))
	    return 0;

	  /* If the clobber represents an earlyclobber operand, we must not
	     substitute an expression containing the clobbered register.
	     As we do not analyze the constraint strings here, we have to
	     make the conservative assumption.  However, if the register is
	     a fixed hard reg, the clobber cannot represent any operand;
	     we leave it up to the machine description to either accept or
	     reject use-and-clobber patterns.  */
	  if (!REG_P (reg)
	      || REGNO (reg) >= FIRST_PSEUDO_REGISTER
	      || !fixed_regs[REGNO (reg)])
	    if (reg_overlap_mentioned_p (reg, src))
	      return 0;
	}

  /* If INSN contains anything volatile, or is an `asm' (whether volatile
     or not), reject, unless nothing volatile comes between it and I3 */

  if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
    {
      /* Make sure succ doesn't contain a volatile reference.  */
      if (succ != 0 && volatile_refs_p (PATTERN (succ)))
	return 0;

      for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
	if (INSN_P (p) && p != succ && volatile_refs_p (PATTERN (p)))
	  return 0;
    }

  /* If INSN is an asm, and DEST is a hard register, reject, since it has
     to be an explicit register variable, and was chosen for a reason.  */

  if (GET_CODE (src) == ASM_OPERANDS
      && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
    return 0;

  /* If there are any volatile insns between INSN and I3, reject, because
     they might affect machine state.  */

  for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
    if (INSN_P (p) && p != succ && volatile_insn_p (PATTERN (p)))
      return 0;

  /* If INSN contains an autoincrement or autodecrement, make sure that
     register is not used between there and I3, and not already used in
     I3 either.  Neither must it be used in PRED or SUCC, if they exist.
     Also insist that I3 not be a jump; if it were one
     and the incremented register were spilled, we would lose.  */

#ifdef AUTO_INC_DEC
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
    if (REG_NOTE_KIND (link) == REG_INC
	&& (JUMP_P (i3)
	    || reg_used_between_p (XEXP (link, 0), insn, i3)
	    || (pred != NULL_RTX
		&& reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
	    || (succ != NULL_RTX
		&& reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
	    || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
      return 0;
#endif

#ifdef HAVE_cc0
  /* Don't combine an insn that follows a CC0-setting insn.
     An insn that uses CC0 must not be separated from the one that sets it.
     We do, however, allow I2 to follow a CC0-setting insn if that insn
     is passed as I1; in that case it will be deleted also.
     We also allow combining in this case if all the insns are adjacent
     because that would leave the two CC0 insns adjacent as well.
     It would be more logical to test whether CC0 occurs inside I1 or I2,
     but that would be much slower, and this ought to be equivalent.  */

  p = prev_nonnote_insn (insn);
  if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
      && ! all_adjacent)
    return 0;
#endif

  /* If we get here, we have passed all the tests and the combination is
     to be allowed.  */

  *pdest = dest;
  *psrc = src;

  return 1;
}

/* LOC is the location within I3 that contains its pattern or the component
   of a PARALLEL of the pattern.  We validate that it is valid for combining.

   One problem is if I3 modifies its output, as opposed to replacing it
   entirely, we can't allow the output to contain I2DEST or I1DEST as doing
   so would produce an insn that is not equivalent to the original insns.

   Consider:

	 (set (reg:DI 101) (reg:DI 100))
	 (set (subreg:SI (reg:DI 101) 0) <foo>)

   This is NOT equivalent to:

	 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
		    (set (reg:DI 101) (reg:DI 100))])

   Not only does this modify 100 (in which case it might still be valid
   if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.

   We can also run into a problem if I2 sets a register that I1
   uses and I1 gets directly substituted into I3 (not via I2).  In that
   case, we would be getting the wrong value of I2DEST into I3, so we
   must reject the combination.  This case occurs when I2 and I1 both
   feed into I3, rather than when I1 feeds into I2, which feeds into I3.
   If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
   of a SET must prevent combination from occurring.

   Before doing the above check, we first try to expand a field assignment
   into a set of logical operations.

   If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
   we place a register that is both set and used within I3.  If more than one
   such register is detected, we fail.

   Return 1 if the combination is valid, zero otherwise.  */

static int
combinable_i3pat (rtx i3, rtx *loc, rtx i2dest, rtx i1dest,
		  int i1_not_in_src, rtx *pi3dest_killed)
{
  rtx x = *loc;

  if (GET_CODE (x) == SET)
    {
      rtx set = x ;
      rtx dest = SET_DEST (set);
      rtx src = SET_SRC (set);
      rtx inner_dest = dest;
      rtx subdest;

      while (GET_CODE (inner_dest) == STRICT_LOW_PART
	     || GET_CODE (inner_dest) == SUBREG
	     || GET_CODE (inner_dest) == ZERO_EXTRACT)
	inner_dest = XEXP (inner_dest, 0);

      /* Check for the case where I3 modifies its output, as discussed
	 above.  We don't want to prevent pseudos from being combined
	 into the address of a MEM, so only prevent the combination if
	 i1 or i2 set the same MEM.  */
      if ((inner_dest != dest &&
	   (!MEM_P (inner_dest)
	    || rtx_equal_p (i2dest, inner_dest)
	    || (i1dest && rtx_equal_p (i1dest, inner_dest)))
	   && (reg_overlap_mentioned_p (i2dest, inner_dest)
	       || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))))

	  /* This is the same test done in can_combine_p except we can't test
	     all_adjacent; we don't have to, since this instruction will stay
	     in place, thus we are not considering increasing the lifetime of
	     INNER_DEST.

	     Also, if this insn sets a function argument, combining it with
	     something that might need a spill could clobber a previous
	     function argument; the all_adjacent test in can_combine_p also
	     checks this; here, we do a more specific test for this case.  */

	  || (REG_P (inner_dest)
	      && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
	      && (! HARD_REGNO_MODE_OK (REGNO (inner_dest),
					GET_MODE (inner_dest))))
	  || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src)))
	return 0;

      /* If DEST is used in I3, it is being killed in this insn, so
	 record that for later.  We have to consider paradoxical
	 subregs here, since they kill the whole register, but we
	 ignore partial subregs, STRICT_LOW_PART, etc.
	 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
	 STACK_POINTER_REGNUM, since these are always considered to be
	 live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
      subdest = dest;
      if (GET_CODE (subdest) == SUBREG
	  && (GET_MODE_SIZE (GET_MODE (subdest))
	      >= GET_MODE_SIZE (GET_MODE (SUBREG_REG (subdest)))))
	subdest = SUBREG_REG (subdest);
      if (pi3dest_killed
	  && REG_P (subdest)
	  && reg_referenced_p (subdest, PATTERN (i3))
	  && REGNO (subdest) != FRAME_POINTER_REGNUM
#if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
	  && REGNO (subdest) != HARD_FRAME_POINTER_REGNUM
#endif
#if ARG_POINTER_REGNUM != FRAME_POINTER_REGNUM
	  && (REGNO (subdest) != ARG_POINTER_REGNUM
	      || ! fixed_regs [REGNO (subdest)])
#endif
	  && REGNO (subdest) != STACK_POINTER_REGNUM)
	{
	  if (*pi3dest_killed)
	    return 0;

	  *pi3dest_killed = subdest;
	}
    }

  else if (GET_CODE (x) == PARALLEL)
    {
      int i;

      for (i = 0; i < XVECLEN (x, 0); i++)
	if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest,
				i1_not_in_src, pi3dest_killed))
	  return 0;
    }

  return 1;
}

/* Return 1 if X is an arithmetic expression that contains a multiplication
   and division.  We don't count multiplications by powers of two here.  */

static int
contains_muldiv (rtx x)
{
  switch (GET_CODE (x))
    {
    case MOD:  case DIV:  case UMOD:  case UDIV:
      return 1;

    case MULT:
      return ! (GET_CODE (XEXP (x, 1)) == CONST_INT
		&& exact_log2 (INTVAL (XEXP (x, 1))) >= 0);
    default:
      if (BINARY_P (x))
	return contains_muldiv (XEXP (x, 0))
	    || contains_muldiv (XEXP (x, 1));

      if (UNARY_P (x))
	return contains_muldiv (XEXP (x, 0));

      return 0;
    }
}

/* Determine whether INSN can be used in a combination.  Return nonzero if
   not.  This is used in try_combine to detect early some cases where we
   can't perform combinations.  */

static int
cant_combine_insn_p (rtx insn)
{
  rtx set;
  rtx src, dest;

  /* If this isn't really an insn, we can't do anything.
     This can occur when flow deletes an insn that it has merged into an
     auto-increment address.  */
  if (! INSN_P (insn))
    return 1;

  /* Never combine loads and stores involving hard regs that are likely
     to be spilled.  The register allocator can usually handle such
     reg-reg moves by tying.  If we allow the combiner to make
     substitutions of likely-spilled regs, reload might die.
     As an exception, we allow combinations involving fixed regs; these are
     not available to the register allocator so there's no risk involved.  */

  set = single_set (insn);
  if (! set)
    return 0;
  src = SET_SRC (set);
  dest = SET_DEST (set);
  if (GET_CODE (src) == SUBREG)
    src = SUBREG_REG (src);
  if (GET_CODE (dest) == SUBREG)
    dest = SUBREG_REG (dest);
  if (REG_P (src) && REG_P (dest)
      && ((REGNO (src) < FIRST_PSEUDO_REGISTER
	   && ! fixed_regs[REGNO (src)]
	   && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (src))))
	  || (REGNO (dest) < FIRST_PSEUDO_REGISTER
	      && ! fixed_regs[REGNO (dest)]
	      && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (REGNO (dest))))))
    return 1;

  return 0;
}

struct likely_spilled_retval_info
{
  unsigned regno, nregs;
  unsigned mask;
};

/* Called via note_stores by likely_spilled_retval_p.  Remove from info->mask
   hard registers that are known to be written to / clobbered in full.  */
static void
likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
{
  struct likely_spilled_retval_info *info = data;
  unsigned regno, nregs;
  unsigned new_mask;

  if (!REG_P (XEXP (set, 0)))
    return;
  regno = REGNO (x);
  if (regno >= info->regno + info->nregs)
    return;
  nregs = hard_regno_nregs[regno][GET_MODE (x)];
  if (regno + nregs <= info->regno)
    return;
  new_mask = (2U << (nregs - 1)) - 1;
  if (regno < info->regno)
    new_mask >>= info->regno - regno;
  else
    new_mask <<= regno - info->regno;
  info->mask &= ~new_mask;
}

/* Return nonzero iff part of the return value is live during INSN, and
   it is likely spilled.  This can happen when more than one insn is needed
   to copy the return value, e.g. when we consider to combine into the
   second copy insn for a complex value.  */

static int
likely_spilled_retval_p (rtx insn)
{
  rtx use = BB_END (this_basic_block);
  rtx reg, p;
  unsigned regno, nregs;
  /* We assume here that no machine mode needs more than
     32 hard registers when the value overlaps with a register
     for which FUNCTION_VALUE_REGNO_P is true.  */
  unsigned mask;
  struct likely_spilled_retval_info info;

  if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
    return 0;
  reg = XEXP (PATTERN (use), 0);
  if (!REG_P (reg) || !FUNCTION_VALUE_REGNO_P (REGNO (reg)))
    return 0;
  regno = REGNO (reg);
  nregs = hard_regno_nregs[regno][GET_MODE (reg)];
  if (nregs == 1)
    return 0;
  mask = (2U << (nregs - 1)) - 1;

  /* Disregard parts of the return value that are set later.  */
  info.regno = regno;
  info.nregs = nregs;
  info.mask = mask;
  for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
    if (INSN_P (p))
      note_stores (PATTERN (p), likely_spilled_retval_1, &info);
  mask = info.mask;

  /* Check if any of the (probably) live return value registers is
     likely spilled.  */
  nregs --;
  do
    {
      if ((mask & 1 << nregs)
	  && CLASS_LIKELY_SPILLED_P (REGNO_REG_CLASS (regno + nregs)))
	return 1;
    } while (nregs--);
  return 0;
}

/* Adjust INSN after we made a change to its destination.

   Changing the destination can invalidate notes that say something about
   the results of the insn and a LOG_LINK pointing to the insn.  */

static void
adjust_for_new_dest (rtx insn)
{
  /* For notes, be conservative and simply remove them.  */
  remove_reg_equal_equiv_notes (insn);

  /* The new insn will have a destination that was previously the destination
     of an insn just above it.  Call distribute_links to make a LOG_LINK from
     the next use of that destination.  */
  distribute_links (gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX));

  df_insn_rescan (insn);
}

/* Return TRUE if combine can reuse reg X in mode MODE.
   ADDED_SETS is nonzero if the original set is still required.  */
static bool
can_change_dest_mode (rtx x, int added_sets, enum machine_mode mode)
{
  unsigned int regno;

  if (!REG_P(x))
    return false;

  regno = REGNO (x);
  /* Allow hard registers if the new mode is legal, and occupies no more
     registers than the old mode.  */
  if (regno < FIRST_PSEUDO_REGISTER)
    return (HARD_REGNO_MODE_OK (regno, mode)
	    && (hard_regno_nregs[regno][GET_MODE (x)]
		>= hard_regno_nregs[regno][mode]));

  /* Or a pseudo that is only used once.  */
  return (REG_N_SETS (regno) == 1 && !added_sets
	  && !REG_USERVAR_P (x));
}


/* Check whether X, the destination of a set, refers to part of
   the register specified by REG.  */

static bool
reg_subword_p (rtx x, rtx reg)
{
  /* Check that reg is an integer mode register.  */
  if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
    return false;

  if (GET_CODE (x) == STRICT_LOW_PART
      || GET_CODE (x) == ZERO_EXTRACT)
    x = XEXP (x, 0);

  return GET_CODE (x) == SUBREG
	 && SUBREG_REG (x) == reg
	 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
}


/* Try to combine the insns I1 and I2 into I3.
   Here I1 and I2 appear earlier than I3.
   I1 can be zero; then we combine just I2 into I3.

   If we are combining three insns and the resulting insn is not recognized,
   try splitting it into two insns.  If that happens, I2 and I3 are retained
   and I1 is pseudo-deleted by turning it into a NOTE.  Otherwise, I1 and I2
   are pseudo-deleted.

   Return 0 if the combination does not work.  Then nothing is changed.
   If we did the combination, return the insn at which combine should
   resume scanning.

   Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
   new direct jump instruction.  */

static rtx
try_combine (rtx i3, rtx i2, rtx i1, int *new_direct_jump_p)
{
  /* New patterns for I3 and I2, respectively.  */
  rtx newpat, newi2pat = 0;
  rtvec newpat_vec_with_clobbers = 0;
  int substed_i2 = 0, substed_i1 = 0;
  /* Indicates need to preserve SET in I1 or I2 in I3 if it is not dead.  */
  int added_sets_1, added_sets_2;
  /* Total number of SETs to put into I3.  */
  int total_sets;
  /* Nonzero if I2's body now appears in I3.  */
  int i2_is_used;
  /* INSN_CODEs for new I3, new I2, and user of condition code.  */
  int insn_code_number, i2_code_number = 0, other_code_number = 0;
  /* Contains I3 if the destination of I3 is used in its source, which means
     that the old life of I3 is being killed.  If that usage is placed into
     I2 and not in I3, a REG_DEAD note must be made.  */
  rtx i3dest_killed = 0;
  /* SET_DEST and SET_SRC of I2 and I1.  */
  rtx i2dest, i2src, i1dest = 0, i1src = 0;
  /* PATTERN (I1) and PATTERN (I2), or a copy of it in certain cases.  */
  rtx i1pat = 0, i2pat = 0;
  /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC.  */
  int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
  int i2dest_killed = 0, i1dest_killed = 0;
  int i1_feeds_i3 = 0;
  /* Notes that must be added to REG_NOTES in I3 and I2.  */
  rtx new_i3_notes, new_i2_notes;
  /* Notes that we substituted I3 into I2 instead of the normal case.  */
  int i3_subst_into_i2 = 0;
  /* Notes that I1, I2 or I3 is a MULT operation.  */
  int have_mult = 0;
  int swap_i2i3 = 0;

  int maxreg;
  rtx temp;
  rtx link;
  rtx other_pat = 0;
  rtx new_other_notes;
  int i;

  /* Exit early if one of the insns involved can't be used for
     combinations.  */
  if (cant_combine_insn_p (i3)
      || cant_combine_insn_p (i2)
      || (i1 && cant_combine_insn_p (i1))
      || likely_spilled_retval_p (i3)
      /* We also can't do anything if I3 has a
	 REG_LIBCALL note since we don't want to disrupt the contiguity of a
	 libcall.  */
#if 0
      /* ??? This gives worse code, and appears to be unnecessary, since no
	 pass after flow uses REG_LIBCALL/REG_RETVAL notes.  */
      || find_reg_note (i3, REG_LIBCALL, NULL_RTX)
#endif
      )
    return 0;

  combine_attempts++;
  undobuf.other_insn = 0;

  /* Reset the hard register usage information.  */
  CLEAR_HARD_REG_SET (newpat_used_regs);

  /* If I1 and I2 both feed I3, they can be in any order.  To simplify the
     code below, set I1 to be the earlier of the two insns.  */
  if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
    temp = i1, i1 = i2, i2 = temp;

  added_links_insn = 0;

  /* First check for one important special-case that the code below will
     not handle.  Namely, the case where I1 is zero, I2 is a PARALLEL
     and I3 is a SET whose SET_SRC is a SET_DEST in I2.  In that case,
     we may be able to replace that destination with the destination of I3.
     This occurs in the common code where we compute both a quotient and
     remainder into a structure, in which case we want to do the computation
     directly into the structure to avoid register-register copies.

     Note that this case handles both multiple sets in I2 and also
     cases where I2 has a number of CLOBBER or PARALLELs.

     We make very conservative checks below and only try to handle the
     most common cases of this.  For example, we only handle the case
     where I2 and I3 are adjacent to avoid making difficult register
     usage tests.  */

  if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
      && REG_P (SET_SRC (PATTERN (i3)))
      && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
      && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
      && GET_CODE (PATTERN (i2)) == PARALLEL
      && ! side_effects_p (SET_DEST (PATTERN (i3)))
      /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
	 below would need to check what is inside (and reg_overlap_mentioned_p
	 doesn't support those codes anyway).  Don't allow those destinations;
	 the resulting insn isn't likely to be recognized anyway.  */
      && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
      && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
      && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
				    SET_DEST (PATTERN (i3)))
      && next_real_insn (i2) == i3)
    {
      rtx p2 = PATTERN (i2);

      /* Make sure that the destination of I3,
	 which we are going to substitute into one output of I2,
	 is not used within another output of I2.  We must avoid making this:
	 (parallel [(set (mem (reg 69)) ...)
		    (set (reg 69) ...)])
	 which is not well-defined as to order of actions.
	 (Besides, reload can't handle output reloads for this.)

	 The problem can also happen if the dest of I3 is a memory ref,
	 if another dest in I2 is an indirect memory ref.  */
      for (i = 0; i < XVECLEN (p2, 0); i++)
	if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
	     || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
	    && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
					SET_DEST (XVECEXP (p2, 0, i))))
	  break;

      if (i == XVECLEN (p2, 0))
	for (i = 0; i < XVECLEN (p2, 0); i++)
	  if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
	       || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
	      && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
	    {
	      combine_merges++;

	      subst_insn = i3;
	      subst_low_luid = DF_INSN_LUID (i2);

	      added_sets_2 = added_sets_1 = 0;
	      i2dest = SET_SRC (PATTERN (i3));
	      i2dest_killed = dead_or_set_p (i2, i2dest);

	      /* Replace the dest in I2 with our dest and make the resulting
		 insn the new pattern for I3.  Then skip to where we
		 validate the pattern.  Everything was set up above.  */
	      SUBST (SET_DEST (XVECEXP (p2, 0, i)),
		     SET_DEST (PATTERN (i3)));

	      newpat = p2;
	      i3_subst_into_i2 = 1;
	      goto validate_replacement;
	    }
    }

  /* If I2 is setting a pseudo to a constant and I3 is setting some
     sub-part of it to another constant, merge them by making a new
     constant.  */
  if (i1 == 0
      && (temp = single_set (i2)) != 0
      && (GET_CODE (SET_SRC (temp)) == CONST_INT
	  || GET_CODE (SET_SRC (temp)) == CONST_DOUBLE)
      && GET_CODE (PATTERN (i3)) == SET
      && (GET_CODE (SET_SRC (PATTERN (i3))) == CONST_INT
	  || GET_CODE (SET_SRC (PATTERN (i3))) == CONST_DOUBLE)
      && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp)))
    {
      rtx dest = SET_DEST (PATTERN (i3));
      int offset = -1;
      int width = 0;

      if (GET_CODE (dest) == ZERO_EXTRACT)
	{
	  if (GET_CODE (XEXP (dest, 1)) == CONST_INT
	      && GET_CODE (XEXP (dest, 2)) == CONST_INT)
	    {
	      width = INTVAL (XEXP (dest, 1));
	      offset = INTVAL (XEXP (dest, 2));
	      dest = XEXP (dest, 0);
	      if (BITS_BIG_ENDIAN)
		offset = GET_MODE_BITSIZE (GET_MODE (dest)) - width - offset;
	    }
	}
      else
	{
	  if (GET_CODE (dest) == STRICT_LOW_PART)
	    dest = XEXP (dest, 0);
	  width = GET_MODE_BITSIZE (GET_MODE (dest));
	  offset = 0;
	}

      if (offset >= 0)
	{
	  /* If this is the low part, we're done.  */
	  if (subreg_lowpart_p (dest))
	    ;
	  /* Handle the case where inner is twice the size of outer.  */
	  else if (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
		   == 2 * GET_MODE_BITSIZE (GET_MODE (dest)))
	    offset += GET_MODE_BITSIZE (GET_MODE (dest));
	  /* Otherwise give up for now.  */
	  else
	    offset = -1;
	}

      if (offset >= 0
	  && (GET_MODE_BITSIZE (GET_MODE (SET_DEST (temp)))
	      <= HOST_BITS_PER_WIDE_INT * 2))
	{
	  HOST_WIDE_INT mhi, ohi, ihi;
	  HOST_WIDE_INT mlo, olo, ilo;
	  rtx inner = SET_SRC (PATTERN (i3));
	  rtx outer = SET_SRC (temp);

	  if (GET_CODE (outer) == CONST_INT)
	    {
	      olo = INTVAL (outer);
	      ohi = olo < 0 ? -1 : 0;
	    }
	  else
	    {
	      olo = CONST_DOUBLE_LOW (outer);
	      ohi = CONST_DOUBLE_HIGH (outer);
	    }

	  if (GET_CODE (inner) == CONST_INT)
	    {
	      ilo = INTVAL (inner);
	      ihi = ilo < 0 ? -1 : 0;
	    }
	  else
	    {
	      ilo = CONST_DOUBLE_LOW (inner);
	      ihi = CONST_DOUBLE_HIGH (inner);
	    }

	  if (width < HOST_BITS_PER_WIDE_INT)
	    {
	      mlo = ((unsigned HOST_WIDE_INT) 1 << width) - 1;
	      mhi = 0;
	    }
	  else if (width < HOST_BITS_PER_WIDE_INT * 2)
	    {
	      mhi = ((unsigned HOST_WIDE_INT) 1
		     << (width - HOST_BITS_PER_WIDE_INT)) - 1;
	      mlo = -1;
	    }
	  else
	    {
	      mlo = -1;
	      mhi = -1;
	    }

	  ilo &= mlo;
	  ihi &= mhi;

	  if (offset >= HOST_BITS_PER_WIDE_INT)
	    {
	      mhi = mlo << (offset - HOST_BITS_PER_WIDE_INT);
	      mlo = 0;
	      ihi = ilo << (offset - HOST_BITS_PER_WIDE_INT);
	      ilo = 0;
	    }
	  else if (offset > 0)
	    {
	      mhi = (mhi << offset) | ((unsigned HOST_WIDE_INT) mlo
		     		       >> (HOST_BITS_PER_WIDE_INT - offset));
	      mlo = mlo << offset;
	      ihi = (ihi << offset) | ((unsigned HOST_WIDE_INT) ilo
		     		       >> (HOST_BITS_PER_WIDE_INT - offset));
	      ilo = ilo << offset;
	    }

	  olo = (olo & ~mlo) | ilo;
	  ohi = (ohi & ~mhi) | ihi;

	  combine_merges++;
	  subst_insn = i3;
	  subst_low_luid = DF_INSN_LUID (i2);
	  added_sets_2 = added_sets_1 = 0;
	  i2dest = SET_DEST (temp);
	  i2dest_killed = dead_or_set_p (i2, i2dest);

	  SUBST (SET_SRC (temp),
		 immed_double_const (olo, ohi, GET_MODE (SET_DEST (temp))));

	  newpat = PATTERN (i2);
	  goto validate_replacement;
	}
    }

#ifndef HAVE_cc0
  /* If we have no I1 and I2 looks like:
	(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
		   (set Y OP)])
     make up a dummy I1 that is
	(set Y OP)
     and change I2 to be
	(set (reg:CC X) (compare:CC Y (const_int 0)))

     (We can ignore any trailing CLOBBERs.)

     This undoes a previous combination and allows us to match a branch-and-
     decrement insn.  */

  if (i1 == 0 && GET_CODE (PATTERN (i2)) == PARALLEL
      && XVECLEN (PATTERN (i2), 0) >= 2
      && GET_CODE (XVECEXP (PATTERN (i2), 0, 0)) == SET
      && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
	  == MODE_CC)
      && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
      && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
      && GET_CODE (XVECEXP (PATTERN (i2), 0, 1)) == SET
      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)))
      && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
		      SET_SRC (XVECEXP (PATTERN (i2), 0, 1))))
    {
      for (i = XVECLEN (PATTERN (i2), 0) - 1; i >= 2; i--)
	if (GET_CODE (XVECEXP (PATTERN (i2), 0, i)) != CLOBBER)
	  break;

      if (i == 1)
	{
	  /* We make I1 with the same INSN_UID as I2.  This gives it
	     the same DF_INSN_LUID for value tracking.  Our fake I1 will
	     never appear in the insn stream so giving it the same INSN_UID
	     as I2 will not cause a problem.  */

	  i1 = gen_rtx_INSN (VOIDmode, INSN_UID (i2), NULL_RTX, i2,
			     BLOCK_FOR_INSN (i2), INSN_LOCATOR (i2),
			     XVECEXP (PATTERN (i2), 0, 1), -1, NULL_RTX);

	  SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
	  SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
		 SET_DEST (PATTERN (i1)));
	}
    }
#endif

  /* Verify that I2 and I1 are valid for combining.  */
  if (! can_combine_p (i2, i3, i1, NULL_RTX, &i2dest, &i2src)
      || (i1 && ! can_combine_p (i1, i3, NULL_RTX, i2, &i1dest, &i1src)))
    {
      undo_all ();
      return 0;
    }

  /* Record whether I2DEST is used in I2SRC and similarly for the other
     cases.  Knowing this will help in register status updating below.  */
  i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
  i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
  i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
  i2dest_killed = dead_or_set_p (i2, i2dest);
  i1dest_killed = i1 && dead_or_set_p (i1, i1dest);

  /* See if I1 directly feeds into I3.  It does if I1DEST is not used
     in I2SRC.  */
  i1_feeds_i3 = i1 && ! reg_overlap_mentioned_p (i1dest, i2src);

  /* Ensure that I3's pattern can be the destination of combines.  */
  if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest,
			  i1 && i2dest_in_i1src && i1_feeds_i3,
			  &i3dest_killed))
    {
      undo_all ();
      return 0;
    }

  /* See if any of the insns is a MULT operation.  Unless one is, we will
     reject a combination that is, since it must be slower.  Be conservative
     here.  */
  if (GET_CODE (i2src) == MULT
      || (i1 != 0 && GET_CODE (i1src) == MULT)
      || (GET_CODE (PATTERN (i3)) == SET
	  && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
    have_mult = 1;

  /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
     We used to do this EXCEPT in one case: I3 has a post-inc in an
     output operand.  However, that exception can give rise to insns like
	mov r3,(r3)+
     which is a famous insn on the PDP-11 where the value of r3 used as the
     source was model-dependent.  Avoid this sort of thing.  */

#if 0
  if (!(GET_CODE (PATTERN (i3)) == SET
	&& REG_P (SET_SRC (PATTERN (i3)))
	&& MEM_P (SET_DEST (PATTERN (i3)))
	&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
	    || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
    /* It's not the exception.  */
#endif
#ifdef AUTO_INC_DEC
    for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
      if (REG_NOTE_KIND (link) == REG_INC
	  && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
	      || (i1 != 0
		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
	{
	  undo_all ();
	  return 0;
	}
#endif

  /* See if the SETs in I1 or I2 need to be kept around in the merged
     instruction: whenever the value set there is still needed past I3.
     For the SETs in I2, this is easy: we see if I2DEST dies or is set in I3.

     For the SET in I1, we have two cases:  If I1 and I2 independently
     feed into I3, the set in I1 needs to be kept around if I1DEST dies
     or is set in I3.  Otherwise (if I1 feeds I2 which feeds I3), the set
     in I1 needs to be kept around unless I1DEST dies or is set in either
     I2 or I3.  We can distinguish these cases by seeing if I2SRC mentions
     I1DEST.  If so, we know I1 feeds into I2.  */

  added_sets_2 = ! dead_or_set_p (i3, i2dest);

  added_sets_1
    = i1 && ! (i1_feeds_i3 ? dead_or_set_p (i3, i1dest)
	       : (dead_or_set_p (i3, i1dest) || dead_or_set_p (i2, i1dest)));

  /* If the set in I2 needs to be kept around, we must make a copy of
     PATTERN (I2), so that when we substitute I1SRC for I1DEST in
     PATTERN (I2), we are only substituting for the original I1DEST, not into
     an already-substituted copy.  This also prevents making self-referential
     rtx.  If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
     I2DEST.  */

  if (added_sets_2)
    {
      if (GET_CODE (PATTERN (i2)) == PARALLEL)
	i2pat = gen_rtx_SET (VOIDmode, i2dest, copy_rtx (i2src));
      else
	i2pat = copy_rtx (PATTERN (i2));
    }

  if (added_sets_1)
    {
      if (GET_CODE (PATTERN (i1)) == PARALLEL)
	i1pat = gen_rtx_SET (VOIDmode, i1dest, copy_rtx (i1src));
      else
	i1pat = copy_rtx (PATTERN (i1));
    }

  combine_merges++;

  /* Substitute in the latest insn for the regs set by the earlier ones.  */

  maxreg = max_reg_num ();

  subst_insn = i3;

#ifndef HAVE_cc0
  /* Many machines that don't use CC0 have insns that can both perform an
     arithmetic operation and set the condition code.  These operations will
     be represented as a PARALLEL with the first element of the vector
     being a COMPARE of an arithmetic operation with the constant zero.
     The second element of the vector will set some pseudo to the result
     of the same arithmetic operation.  If we simplify the COMPARE, we won't
     match such a pattern and so will generate an extra insn.   Here we test
     for this case, where both the comparison and the operation result are
     needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
     I2SRC.  Later we will make the PARALLEL that contains I2.  */

  if (i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
      && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
      && XEXP (SET_SRC (PATTERN (i3)), 1) == const0_rtx
      && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
    {
#ifdef SELECT_CC_MODE
      rtx *cc_use;
      enum machine_mode compare_mode;
#endif

      newpat = PATTERN (i3);
      SUBST (XEXP (SET_SRC (newpat), 0), i2src);

      i2_is_used = 1;

#ifdef SELECT_CC_MODE
      /* See if a COMPARE with the operand we substituted in should be done
	 with the mode that is currently being used.  If not, do the same
	 processing we do in `subst' for a SET; namely, if the destination
	 is used only once, try to replace it with a register of the proper
	 mode and also replace the COMPARE.  */
      if (undobuf.other_insn == 0
	  && (cc_use = find_single_use (SET_DEST (newpat), i3,
					&undobuf.other_insn))
	  && ((compare_mode = SELECT_CC_MODE (GET_CODE (*cc_use),
					      i2src, const0_rtx))
	      != GET_MODE (SET_DEST (newpat))))
	{
	  if (can_change_dest_mode(SET_DEST (newpat), added_sets_2,
				   compare_mode))
	    {
	      unsigned int regno = REGNO (SET_DEST (newpat));
	      rtx new_dest;

	      if (regno < FIRST_PSEUDO_REGISTER)
		new_dest = gen_rtx_REG (compare_mode, regno);
	      else
		{
		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
		  new_dest = regno_reg_rtx[regno];
		}

	      SUBST (SET_DEST (newpat), new_dest);
	      SUBST (XEXP (*cc_use, 0), new_dest);
	      SUBST (SET_SRC (newpat),
		     gen_rtx_COMPARE (compare_mode, i2src, const0_rtx));
	    }
	  else
	    undobuf.other_insn = 0;
	}
#endif
    }
  else
#endif
    {
      /* It is possible that the source of I2 or I1 may be performing
	 an unneeded operation, such as a ZERO_EXTEND of something
	 that is known to have the high part zero.  Handle that case
	 by letting subst look at the innermost one of them.

	 Another way to do this would be to have a function that tries
	 to simplify a single insn instead of merging two or more
	 insns.  We don't do this because of the potential of infinite
	 loops and because of the potential extra memory required.
	 However, doing it the way we are is a bit of a kludge and
	 doesn't catch all cases.

	 But only do this if -fexpensive-optimizations since it slows
	 things down and doesn't usually win.

	 This is not done in the COMPARE case above because the
	 unmodified I2PAT is used in the PARALLEL and so a pattern
	 with a modified I2SRC would not match.  */

      if (flag_expensive_optimizations)
	{
	  /* Pass pc_rtx so no substitutions are done, just
	     simplifications.  */
	  if (i1)
	    {
	      subst_low_luid = DF_INSN_LUID (i1);
	      i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0);
	    }
	  else
	    {
	      subst_low_luid = DF_INSN_LUID (i2);
	      i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0);
	    }
	}

      n_occurrences = 0;		/* `subst' counts here */

      /* If I1 feeds into I2 (not into I3) and I1DEST is in I1SRC, we
	 need to make a unique copy of I2SRC each time we substitute it
	 to avoid self-referential rtl.  */

      subst_low_luid = DF_INSN_LUID (i2);
      newpat = subst (PATTERN (i3), i2dest, i2src, 0,
		      ! i1_feeds_i3 && i1dest_in_i1src);
      substed_i2 = 1;

      /* Record whether i2's body now appears within i3's body.  */
      i2_is_used = n_occurrences;
    }

  /* If we already got a failure, don't try to do more.  Otherwise,
     try to substitute in I1 if we have it.  */

  if (i1 && GET_CODE (newpat) != CLOBBER)
    {
      /* Check that an autoincrement side-effect on I1 has not been lost.
	 This happens if I1DEST is mentioned in I2 and dies there, and
	 has disappeared from the new pattern.  */
      if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
	   && !i1_feeds_i3
	   && dead_or_set_p (i2, i1dest)
	   && !reg_overlap_mentioned_p (i1dest, newpat))
	  /* Before we can do this substitution, we must redo the test done
	     above (see detailed comments there) that ensures  that I1DEST
	     isn't mentioned in any SETs in NEWPAT that are field assignments.  */
          || !combinable_i3pat (NULL_RTX, &newpat, i1dest, NULL_RTX, 0, 0))
	{
	  undo_all ();
	  return 0;
	}

      n_occurrences = 0;
      subst_low_luid = DF_INSN_LUID (i1);
      newpat = subst (newpat, i1dest, i1src, 0, 0);
      substed_i1 = 1;
    }

  /* Fail if an autoincrement side-effect has been duplicated.  Be careful
     to count all the ways that I2SRC and I1SRC can be used.  */
  if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
       && i2_is_used + added_sets_2 > 1)
      || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
	  && (n_occurrences + added_sets_1 + (added_sets_2 && ! i1_feeds_i3)
	      > 1))
      /* Fail if we tried to make a new register.  */
      || max_reg_num () != maxreg
      /* Fail if we couldn't do something and have a CLOBBER.  */
      || GET_CODE (newpat) == CLOBBER
      /* Fail if this new pattern is a MULT and we didn't have one before
	 at the outer level.  */
      || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
	  && ! have_mult))
    {
      undo_all ();
      return 0;
    }

  /* If the actions of the earlier insns must be kept
     in addition to substituting them into the latest one,
     we must make a new PARALLEL for the latest insn
     to hold additional the SETs.  */

  if (added_sets_1 || added_sets_2)
    {
      combine_extras++;

      if (GET_CODE (newpat) == PARALLEL)
	{
	  rtvec old = XVEC (newpat, 0);
	  total_sets = XVECLEN (newpat, 0) + added_sets_1 + added_sets_2;
	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
	  memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
		  sizeof (old->elem[0]) * old->num_elem);
	}
      else
	{
	  rtx old = newpat;
	  total_sets = 1 + added_sets_1 + added_sets_2;
	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
	  XVECEXP (newpat, 0, 0) = old;
	}

      if (added_sets_1)
	XVECEXP (newpat, 0, --total_sets) = i1pat;

      if (added_sets_2)
	{
	  /* If there is no I1, use I2's body as is.  We used to also not do
	     the subst call below if I2 was substituted into I3,
	     but that could lose a simplification.  */
	  if (i1 == 0)
	    XVECEXP (newpat, 0, --total_sets) = i2pat;
	  else
	    /* See comment where i2pat is assigned.  */
	    XVECEXP (newpat, 0, --total_sets)
	      = subst (i2pat, i1dest, i1src, 0, 0);
	}
    }

  /* We come here when we are replacing a destination in I2 with the
     destination of I3.  */
 validate_replacement:

  /* Note which hard regs this insn has as inputs.  */
  mark_used_regs_combine (newpat);

  /* If recog_for_combine fails, it strips existing clobbers.  If we'll
     consider splitting this pattern, we might need these clobbers.  */
  if (i1 && GET_CODE (newpat) == PARALLEL
      && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
    {
      int len = XVECLEN (newpat, 0);

      newpat_vec_with_clobbers = rtvec_alloc (len);
      for (i = 0; i < len; i++)
	RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
    }

  /* Is the result of combination a valid instruction?  */
  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);

  /* If the result isn't valid, see if it is a PARALLEL of two SETs where
     the second SET's destination is a register that is unused and isn't
     marked as an instruction that might trap in an EH region.  In that case,
     we just need the first SET.   This can occur when simplifying a divmod
     insn.  We *must* test for this case here because the code below that
     splits two independent SETs doesn't handle this case correctly when it
     updates the register status.

     It's pointless doing this if we originally had two sets, one from
     i3, and one from i2.  Combining then splitting the parallel results
     in the original i2 again plus an invalid insn (which we delete).
     The net effect is only to move instructions around, which makes
     debug info less accurate.

     Also check the case where the first SET's destination is unused.
     That would not cause incorrect code, but does cause an unneeded
     insn to remain.  */

  if (insn_code_number < 0
      && !(added_sets_2 && i1 == 0)
      && GET_CODE (newpat) == PARALLEL
      && XVECLEN (newpat, 0) == 2
      && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
      && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
      && asm_noperands (newpat) < 0)
    {
      rtx set0 = XVECEXP (newpat, 0, 0);
      rtx set1 = XVECEXP (newpat, 0, 1);
      rtx note;

      if (((REG_P (SET_DEST (set1))
	    && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
	   || (GET_CODE (SET_DEST (set1)) == SUBREG
	       && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
	  && (!(note = find_reg_note (i3, REG_EH_REGION, NULL_RTX))
	      || INTVAL (XEXP (note, 0)) <= 0)
	  && ! side_effects_p (SET_SRC (set1)))
	{
	  newpat = set0;
	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
	}

      else if (((REG_P (SET_DEST (set0))
		 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
		|| (GET_CODE (SET_DEST (set0)) == SUBREG
		    && find_reg_note (i3, REG_UNUSED,
				      SUBREG_REG (SET_DEST (set0)))))
	       && (!(note = find_reg_note (i3, REG_EH_REGION, NULL_RTX))
		   || INTVAL (XEXP (note, 0)) <= 0)
	       && ! side_effects_p (SET_SRC (set0)))
	{
	  newpat = set1;
	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);

	  if (insn_code_number >= 0)
	    {
	      /* If we will be able to accept this, we have made a
		 change to the destination of I3.  This requires us to
		 do a few adjustments.  */

	      PATTERN (i3) = newpat;
	      adjust_for_new_dest (i3);
	    }
	}
    }

  /* If we were combining three insns and the result is a simple SET
     with no ASM_OPERANDS that wasn't recognized, try to split it into two
     insns.  There are two ways to do this.  It can be split using a
     machine-specific method (like when you have an addition of a large
     constant) or by combine in the function find_split_point.  */

  if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
      && asm_noperands (newpat) < 0)
    {
      rtx parallel, m_split, *split;

      /* See if the MD file can split NEWPAT.  If it can't, see if letting it
	 use I2DEST as a scratch register will help.  In the latter case,
	 convert I2DEST to the mode of the source of NEWPAT if we can.  */

      m_split = combine_split_insns (newpat, i3);

      /* We can only use I2DEST as a scratch reg if it doesn't overlap any
	 inputs of NEWPAT.  */

      /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
	 possible to try that as a scratch reg.  This would require adding
	 more code to make it work though.  */

      if (m_split == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
	{
	  enum machine_mode new_mode = GET_MODE (SET_DEST (newpat));

	  /* First try to split using the original register as a
	     scratch register.  */
	  parallel = gen_rtx_PARALLEL (VOIDmode,
				       gen_rtvec (2, newpat,
						  gen_rtx_CLOBBER (VOIDmode,
								   i2dest)));
	  m_split = combine_split_insns (parallel, i3);

	  /* If that didn't work, try changing the mode of I2DEST if
	     we can.  */
	  if (m_split == 0
	      && new_mode != GET_MODE (i2dest)
	      && new_mode != VOIDmode
	      && can_change_dest_mode (i2dest, added_sets_2, new_mode))
	    {
	      enum machine_mode old_mode = GET_MODE (i2dest);
	      rtx ni2dest;

	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
		ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
	      else
		{
		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
		  ni2dest = regno_reg_rtx[REGNO (i2dest)];
		}

	      parallel = (gen_rtx_PARALLEL
			  (VOIDmode,
			   gen_rtvec (2, newpat,
				      gen_rtx_CLOBBER (VOIDmode,
						       ni2dest))));
	      m_split = combine_split_insns (parallel, i3);

	      if (m_split == 0
		  && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
		{
		  struct undo *buf;

		  adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
		  buf = undobuf.undos;
		  undobuf.undos = buf->next;
		  buf->next = undobuf.frees;
		  undobuf.frees = buf;
		}
	    }
	}

      /* If recog_for_combine has discarded clobbers, try to use them
	 again for the split.  */
      if (m_split == 0 && newpat_vec_with_clobbers)
	{
	  parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
	  m_split = combine_split_insns (parallel, i3);
	}

      if (m_split && NEXT_INSN (m_split) == NULL_RTX)
	{
	  m_split = PATTERN (m_split);
	  insn_code_number = recog_for_combine (&m_split, i3, &new_i3_notes);
	  if (insn_code_number >= 0)
	    newpat = m_split;
	}
      else if (m_split && NEXT_INSN (NEXT_INSN (m_split)) == NULL_RTX
	       && (next_real_insn (i2) == i3
		   || ! use_crosses_set_p (PATTERN (m_split), DF_INSN_LUID (i2))))
	{
	  rtx i2set, i3set;
	  rtx newi3pat = PATTERN (NEXT_INSN (m_split));
	  newi2pat = PATTERN (m_split);

	  i3set = single_set (NEXT_INSN (m_split));
	  i2set = single_set (m_split);

	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);

	  /* If I2 or I3 has multiple SETs, we won't know how to track
	     register status, so don't use these insns.  If I2's destination
	     is used between I2 and I3, we also can't use these insns.  */

	  if (i2_code_number >= 0 && i2set && i3set
	      && (next_real_insn (i2) == i3
		  || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
	    insn_code_number = recog_for_combine (&newi3pat, i3,
						  &new_i3_notes);
	  if (insn_code_number >= 0)
	    newpat = newi3pat;

	  /* It is possible that both insns now set the destination of I3.
	     If so, we must show an extra use of it.  */

	  if (insn_code_number >= 0)
	    {
	      rtx new_i3_dest = SET_DEST (i3set);
	      rtx new_i2_dest = SET_DEST (i2set);

	      while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
		     || GET_CODE (new_i3_dest) == STRICT_LOW_PART
		     || GET_CODE (new_i3_dest) == SUBREG)
		new_i3_dest = XEXP (new_i3_dest, 0);

	      while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
		     || GET_CODE (new_i2_dest) == STRICT_LOW_PART
		     || GET_CODE (new_i2_dest) == SUBREG)
		new_i2_dest = XEXP (new_i2_dest, 0);

	      if (REG_P (new_i3_dest)
		  && REG_P (new_i2_dest)
		  && REGNO (new_i3_dest) == REGNO (new_i2_dest))
		INC_REG_N_SETS (REGNO (new_i2_dest), 1);
	    }
	}

      /* If we can split it and use I2DEST, go ahead and see if that
	 helps things be recognized.  Verify that none of the registers
	 are set between I2 and I3.  */
      if (insn_code_number < 0 && (split = find_split_point (&newpat, i3)) != 0
#ifdef HAVE_cc0
	  && REG_P (i2dest)
#endif
	  /* We need I2DEST in the proper mode.  If it is a hard register
	     or the only use of a pseudo, we can change its mode.
	     Make sure we don't change a hard register to have a mode that
	     isn't valid for it, or change the number of registers.  */
	  && (GET_MODE (*split) == GET_MODE (i2dest)
	      || GET_MODE (*split) == VOIDmode
	      || can_change_dest_mode (i2dest, added_sets_2,
				       GET_MODE (*split)))
	  && (next_real_insn (i2) == i3
	      || ! use_crosses_set_p (*split, DF_INSN_LUID (i2)))
	  /* We can't overwrite I2DEST if its value is still used by
	     NEWPAT.  */
	  && ! reg_referenced_p (i2dest, newpat))
	{
	  rtx newdest = i2dest;
	  enum rtx_code split_code = GET_CODE (*split);
	  enum machine_mode split_mode = GET_MODE (*split);
	  bool subst_done = false;
	  newi2pat = NULL_RTX;

	  /* Get NEWDEST as a register in the proper mode.  We have already
	     validated that we can do this.  */
	  if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
	    {
	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
		newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
	      else
		{
		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
		  newdest = regno_reg_rtx[REGNO (i2dest)];
		}
	    }

	  /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
	     an ASHIFT.  This can occur if it was inside a PLUS and hence
	     appeared to be a memory address.  This is a kludge.  */
	  if (split_code == MULT
	      && GET_CODE (XEXP (*split, 1)) == CONST_INT
	      && INTVAL (XEXP (*split, 1)) > 0
	      && (i = exact_log2 (INTVAL (XEXP (*split, 1)))) >= 0)
	    {
	      SUBST (*split, gen_rtx_ASHIFT (split_mode,
					     XEXP (*split, 0), GEN_INT (i)));
	      /* Update split_code because we may not have a multiply
		 anymore.  */
	      split_code = GET_CODE (*split);
	    }

#ifdef INSN_SCHEDULING
	  /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
	     be written as a ZERO_EXTEND.  */
	  if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
	    {
#ifdef LOAD_EXTEND_OP
	      /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
		 what it really is.  */
	      if (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (*split)))
		  == SIGN_EXTEND)
		SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
						    SUBREG_REG (*split)));
	      else
#endif
		SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
						    SUBREG_REG (*split)));
	    }
#endif

	  /* Attempt to split binary operators using arithmetic identities.  */
	  if (BINARY_P (SET_SRC (newpat))
	      && split_mode == GET_MODE (SET_SRC (newpat))
	      && ! side_effects_p (SET_SRC (newpat)))
	    {
	      rtx setsrc = SET_SRC (newpat);
	      enum machine_mode mode = GET_MODE (setsrc);
	      enum rtx_code code = GET_CODE (setsrc);
	      rtx src_op0 = XEXP (setsrc, 0);
	      rtx src_op1 = XEXP (setsrc, 1);

	      /* Split "X = Y op Y" as "Z = Y; X = Z op Z".  */
	      if (rtx_equal_p (src_op0, src_op1))
		{
		  newi2pat = gen_rtx_SET (VOIDmode, newdest, src_op0);
		  SUBST (XEXP (setsrc, 0), newdest);
		  SUBST (XEXP (setsrc, 1), newdest);
		  subst_done = true;
		}
	      /* Split "((P op Q) op R) op S" where op is PLUS or MULT.  */
	      else if ((code == PLUS || code == MULT)
		       && GET_CODE (src_op0) == code
		       && GET_CODE (XEXP (src_op0, 0)) == code
		       && (INTEGRAL_MODE_P (mode)
			   || (FLOAT_MODE_P (mode)
			       && flag_unsafe_math_optimizations)))
		{
		  rtx p = XEXP (XEXP (src_op0, 0), 0);
		  rtx q = XEXP (XEXP (src_op0, 0), 1);
		  rtx r = XEXP (src_op0, 1);
		  rtx s = src_op1;

		  /* Split both "((X op Y) op X) op Y" and
		     "((X op Y) op Y) op X" as "T op T" where T is
		     "X op Y".  */
		  if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
		       || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
		    {
		      newi2pat = gen_rtx_SET (VOIDmode, newdest,
					      XEXP (src_op0, 0));
		      SUBST (XEXP (setsrc, 0), newdest);
		      SUBST (XEXP (setsrc, 1), newdest);
		      subst_done = true;
		    }
		  /* Split "((X op X) op Y) op Y)" as "T op T" where
		     T is "X op Y".  */
		  else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
		    {
		      rtx tmp = simplify_gen_binary (code, mode, p, r);
		      newi2pat = gen_rtx_SET (VOIDmode, newdest, tmp);
		      SUBST (XEXP (setsrc, 0), newdest);
		      SUBST (XEXP (setsrc, 1), newdest);
		      subst_done = true;
		    }
		}
	    }

	  if (!subst_done)
	    {
	      newi2pat = gen_rtx_SET (VOIDmode, newdest, *split);
	      SUBST (*split, newdest);
	    }

	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);

	  /* recog_for_combine might have added CLOBBERs to newi2pat.
	     Make sure NEWPAT does not depend on the clobbered regs.  */
	  if (GET_CODE (newi2pat) == PARALLEL)
	    for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
	      if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
		{
		  rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
		  if (reg_overlap_mentioned_p (reg, newpat))
		    {
		      undo_all ();
		      return 0;
		    }
		}

	  /* If the split point was a MULT and we didn't have one before,
	     don't use one now.  */
	  if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
	    insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
	}
    }

  /* Check for a case where we loaded from memory in a narrow mode and
     then sign extended it, but we need both registers.  In that case,
     we have a PARALLEL with both loads from the same memory location.
     We can split this into a load from memory followed by a register-register
     copy.  This saves at least one insn, more if register allocation can
     eliminate the copy.

     We cannot do this if the destination of the first assignment is a
     condition code register or cc0.  We eliminate this case by making sure
     the SET_DEST and SET_SRC have the same mode.

     We cannot do this if the destination of the second assignment is
     a register that we have already assumed is zero-extended.  Similarly
     for a SUBREG of such a register.  */

  else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
	   && GET_CODE (newpat) == PARALLEL
	   && XVECLEN (newpat, 0) == 2
	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
	   && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
	   && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
	       == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
	   && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
			   XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
	   && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
				   DF_INSN_LUID (i2))
	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
	   && ! (temp = SET_DEST (XVECEXP (newpat, 0, 1)),
		 (REG_P (temp)
		  && VEC_index (reg_stat_type, reg_stat,
				REGNO (temp))->nonzero_bits != 0
		  && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
		  && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
		  && (VEC_index (reg_stat_type, reg_stat,
				 REGNO (temp))->nonzero_bits
		      != GET_MODE_MASK (word_mode))))
	   && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
		 && (temp = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
		     (REG_P (temp)
		      && VEC_index (reg_stat_type, reg_stat,
				    REGNO (temp))->nonzero_bits != 0
		      && GET_MODE_BITSIZE (GET_MODE (temp)) < BITS_PER_WORD
		      && GET_MODE_BITSIZE (GET_MODE (temp)) < HOST_BITS_PER_INT
		      && (VEC_index (reg_stat_type, reg_stat,
				     REGNO (temp))->nonzero_bits
			  != GET_MODE_MASK (word_mode)))))
	   && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
					 SET_SRC (XVECEXP (newpat, 0, 1)))
	   && ! find_reg_note (i3, REG_UNUSED,
			       SET_DEST (XVECEXP (newpat, 0, 0))))
    {
      rtx ni2dest;

      newi2pat = XVECEXP (newpat, 0, 0);
      ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
      newpat = XVECEXP (newpat, 0, 1);
      SUBST (SET_SRC (newpat),
	     gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
      i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);

      if (i2_code_number >= 0)
	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);

      if (insn_code_number >= 0)
	swap_i2i3 = 1;
    }

  /* Similarly, check for a case where we have a PARALLEL of two independent
     SETs but we started with three insns.  In this case, we can do the sets
     as two separate insns.  This case occurs when some SET allows two
     other insns to combine, but the destination of that SET is still live.  */

  else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
	   && GET_CODE (newpat) == PARALLEL
	   && XVECLEN (newpat, 0) == 2
	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
	   && ! use_crosses_set_p (SET_SRC (XVECEXP (newpat, 0, 1)),
				   DF_INSN_LUID (i2))
	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
				  XVECEXP (newpat, 0, 0))
	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
				  XVECEXP (newpat, 0, 1))
	   && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
		 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1))))
#ifdef HAVE_cc0
	   /* We cannot split the parallel into two sets if both sets
	      reference cc0.  */
	   && ! (reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0))
		 && reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 1)))
#endif
	   )
    {
      /* Normally, it doesn't matter which of the two is done first,
	 but it does if one references cc0.  In that case, it has to
	 be first.  */
#ifdef HAVE_cc0
      if (reg_referenced_p (cc0_rtx, XVECEXP (newpat, 0, 0)))
	{
	  newi2pat = XVECEXP (newpat, 0, 0);
	  newpat = XVECEXP (newpat, 0, 1);
	}
      else
#endif
	{
	  newi2pat = XVECEXP (newpat, 0, 1);
	  newpat = XVECEXP (newpat, 0, 0);
	}

      i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);

      if (i2_code_number >= 0)
	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
    }

  /* If it still isn't recognized, fail and change things back the way they
     were.  */
  if ((insn_code_number < 0
       /* Is the result a reasonable ASM_OPERANDS?  */
       && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
    {
      undo_all ();
      return 0;
    }

  /* If we had to change another insn, make sure it is valid also.  */
  if (undobuf.other_insn)
    {
      CLEAR_HARD_REG_SET (newpat_used_regs);

      other_pat = PATTERN (undobuf.other_insn);
      other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
					     &new_other_notes);

      if (other_code_number < 0 && ! check_asm_operands (other_pat))
	{
	  undo_all ();
	  return 0;
	}
    }

#ifdef HAVE_cc0
  /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
     they are adjacent to each other or not.  */
  {
    rtx p = prev_nonnote_insn (i3);
    if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
	&& sets_cc0_p (newi2pat))
      {
	undo_all ();
	return 0;
      }
  }
#endif

  /* Only allow this combination if insn_rtx_costs reports that the
     replacement instructions are cheaper than the originals.  */
  if (!combine_validate_cost (i1, i2, i3, newpat, newi2pat, other_pat))
    {
      undo_all ();
      return 0;
    }

  /* We now know that we can do this combination.  Merge the insns and
     update the status of registers and LOG_LINKS.  */

  if (undobuf.other_insn)
    {
      rtx note, next;

      PATTERN (undobuf.other_insn) = other_pat;

      /* If any of the notes in OTHER_INSN were REG_UNUSED, ensure that they
	 are still valid.  Then add any non-duplicate notes added by
	 recog_for_combine.  */
      for (note = REG_NOTES (undobuf.other_insn); note; note = next)
	{
	  next = XEXP (note, 1);

	  if (REG_NOTE_KIND (note) == REG_UNUSED
	      && ! reg_set_p (XEXP (note, 0), PATTERN (undobuf.other_insn)))
	    remove_note (undobuf.other_insn, note);
	}

      distribute_notes (new_other_notes, undobuf.other_insn,
			undobuf.other_insn, NULL_RTX, NULL_RTX, NULL_RTX);
    }

  if (swap_i2i3)
    {
      rtx insn;
      rtx link;
      rtx ni2dest;

      /* I3 now uses what used to be its destination and which is now
	 I2's destination.  This requires us to do a few adjustments.  */
      PATTERN (i3) = newpat;
      adjust_for_new_dest (i3);

      /* We need a LOG_LINK from I3 to I2.  But we used to have one,
	 so we still will.

	 However, some later insn might be using I2's dest and have
	 a LOG_LINK pointing at I3.  We must remove this link.
	 The simplest way to remove the link is to point it at I1,
	 which we know will be a NOTE.  */

      /* newi2pat is usually a SET here; however, recog_for_combine might
	 have added some clobbers.  */
      if (GET_CODE (newi2pat) == PARALLEL)
	ni2dest = SET_DEST (XVECEXP (newi2pat, 0, 0));
      else
	ni2dest = SET_DEST (newi2pat);

      for (insn = NEXT_INSN (i3);
	   insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR
		    || insn != BB_HEAD (this_basic_block->next_bb));
	   insn = NEXT_INSN (insn))
	{
	  if (INSN_P (insn) && reg_referenced_p (ni2dest, PATTERN (insn)))
	    {
	      for (link = LOG_LINKS (insn); link;
		   link = XEXP (link, 1))
		if (XEXP (link, 0) == i3)
		  XEXP (link, 0) = i1;

	      break;
	    }
	}
    }

  {
    rtx i3notes, i2notes, i1notes = 0;
    rtx i3links, i2links, i1links = 0;
    rtx midnotes = 0;
    unsigned int regno;
    /* Compute which registers we expect to eliminate.  newi2pat may be setting
       either i3dest or i2dest, so we must check it.  Also, i1dest may be the
       same as i3dest, in which case newi2pat may be setting i1dest.  */
    rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
		   || i2dest_in_i2src || i2dest_in_i1src
		   || !i2dest_killed
		   ? 0 : i2dest);
    rtx elim_i1 = (i1 == 0 || i1dest_in_i1src
		   || (newi2pat && reg_set_p (i1dest, newi2pat))
		   || !i1dest_killed
		   ? 0 : i1dest);

    /* Get the old REG_NOTES and LOG_LINKS from all our insns and
       clear them.  */
    i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
    i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
    if (i1)
      i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);

    /* Ensure that we do not have something that should not be shared but
       occurs multiple times in the new insns.  Check this by first
       resetting all the `used' flags and then copying anything is shared.  */

    reset_used_flags (i3notes);
    reset_used_flags (i2notes);
    reset_used_flags (i1notes);
    reset_used_flags (newpat);
    reset_used_flags (newi2pat);
    if (undobuf.other_insn)
      reset_used_flags (PATTERN (undobuf.other_insn));

    i3notes = copy_rtx_if_shared (i3notes);
    i2notes = copy_rtx_if_shared (i2notes);
    i1notes = copy_rtx_if_shared (i1notes);
    newpat = copy_rtx_if_shared (newpat);
    newi2pat = copy_rtx_if_shared (newi2pat);
    if (undobuf.other_insn)
      reset_used_flags (PATTERN (undobuf.other_insn));

    INSN_CODE (i3) = insn_code_number;
    PATTERN (i3) = newpat;

    if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
      {
	rtx call_usage = CALL_INSN_FUNCTION_USAGE (i3);

	reset_used_flags (call_usage);
	call_usage = copy_rtx (call_usage);

	if (substed_i2)
	  replace_rtx (call_usage, i2dest, i2src);

	if (substed_i1)
	  replace_rtx (call_usage, i1dest, i1src);

	CALL_INSN_FUNCTION_USAGE (i3) = call_usage;
      }

    if (undobuf.other_insn)
      INSN_CODE (undobuf.other_insn) = other_code_number;

    /* We had one special case above where I2 had more than one set and
       we replaced a destination of one of those sets with the destination
       of I3.  In that case, we have to update LOG_LINKS of insns later
       in this basic block.  Note that this (expensive) case is rare.

       Also, in this case, we must pretend that all REG_NOTEs for I2
       actually came from I3, so that REG_UNUSED notes from I2 will be
       properly handled.  */

    if (i3_subst_into_i2)
      {
	for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
	  if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
	       || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
	      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
	      && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
	      && ! find_reg_note (i2, REG_UNUSED,
				  SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
	    for (temp = NEXT_INSN (i2);
		 temp && (this_basic_block->next_bb == EXIT_BLOCK_PTR
			  || BB_HEAD (this_basic_block) != temp);
		 temp = NEXT_INSN (temp))
	      if (temp != i3 && INSN_P (temp))
		for (link = LOG_LINKS (temp); link; link = XEXP (link, 1))
		  if (XEXP (link, 0) == i2)
		    XEXP (link, 0) = i3;

	if (i3notes)
	  {
	    rtx link = i3notes;
	    while (XEXP (link, 1))
	      link = XEXP (link, 1);
	    XEXP (link, 1) = i2notes;
	  }
	else
	  i3notes = i2notes;
	i2notes = 0;
      }

    LOG_LINKS (i3) = 0;
    REG_NOTES (i3) = 0;
    LOG_LINKS (i2) = 0;
    REG_NOTES (i2) = 0;

    if (newi2pat)
      {
	INSN_CODE (i2) = i2_code_number;
	PATTERN (i2) = newi2pat;
      }
    else
      SET_INSN_DELETED (i2);

    if (i1)
      {
	LOG_LINKS (i1) = 0;
	REG_NOTES (i1) = 0;
	SET_INSN_DELETED (i1);
      }

    /* Get death notes for everything that is now used in either I3 or
       I2 and used to die in a previous insn.  If we built two new
       patterns, move from I1 to I2 then I2 to I3 so that we get the
       proper movement on registers that I2 modifies.  */

    if (newi2pat)
      {
	move_deaths (newi2pat, NULL_RTX, DF_INSN_LUID (i1), i2, &midnotes);
	move_deaths (newpat, newi2pat, DF_INSN_LUID (i1), i3, &midnotes);
      }
    else
      move_deaths (newpat, NULL_RTX, i1 ? DF_INSN_LUID (i1) : DF_INSN_LUID (i2),
		   i3, &midnotes);

    /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3.  */
    if (i3notes)
      distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL_RTX,
			elim_i2, elim_i1);
    if (i2notes)
      distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL_RTX,
			elim_i2, elim_i1);
    if (i1notes)
      distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL_RTX,
			elim_i2, elim_i1);
    if (midnotes)
      distribute_notes (midnotes, NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
			elim_i2, elim_i1);

    /* Distribute any notes added to I2 or I3 by recog_for_combine.  We
       know these are REG_UNUSED and want them to go to the desired insn,
       so we always pass it as i3.  */

    if (newi2pat && new_i2_notes)
      distribute_notes (new_i2_notes, i2, i2, NULL_RTX, NULL_RTX, NULL_RTX);
    
    if (new_i3_notes)
      distribute_notes (new_i3_notes, i3, i3, NULL_RTX, NULL_RTX, NULL_RTX);

    /* If I3DEST was used in I3SRC, it really died in I3.  We may need to
       put a REG_DEAD note for it somewhere.  If NEWI2PAT exists and sets
       I3DEST, the death must be somewhere before I2, not I3.  If we passed I3
       in that case, it might delete I2.  Similarly for I2 and I1.
       Show an additional death due to the REG_DEAD note we make here.  If
       we discard it in distribute_notes, we will decrement it again.  */

    if (i3dest_killed)
      {
	if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
	  distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
					       NULL_RTX),
			    NULL_RTX, i2, NULL_RTX, elim_i2, elim_i1);
	else
	  distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i3dest_killed,
					       NULL_RTX),
			    NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
			    elim_i2, elim_i1);
      }

    if (i2dest_in_i2src)
      {
	if (newi2pat && reg_set_p (i2dest, newi2pat))
	  distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
			    NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
	else
	  distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i2dest, NULL_RTX),
			    NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
			    NULL_RTX, NULL_RTX);
      }

    if (i1dest_in_i1src)
      {
	if (newi2pat && reg_set_p (i1dest, newi2pat))
	  distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
			    NULL_RTX, i2, NULL_RTX, NULL_RTX, NULL_RTX);
	else
	  distribute_notes (gen_rtx_EXPR_LIST (REG_DEAD, i1dest, NULL_RTX),
			    NULL_RTX, i3, newi2pat ? i2 : NULL_RTX,
			    NULL_RTX, NULL_RTX);
      }

    distribute_links (i3links);
    distribute_links (i2links);
    distribute_links (i1links);

    if (REG_P (i2dest))
      {
	rtx link;
	rtx i2_insn = 0, i2_val = 0, set;

	/* The insn that used to set this register doesn't exist, and
	   this life of the register may not exist either.  See if one of
	   I3's links points to an insn that sets I2DEST.  If it does,
	   that is now the last known value for I2DEST. If we don't update
	   this and I2 set the register to a value that depended on its old
	   contents, we will get confused.  If this insn is used, thing
	   will be set correctly in combine_instructions.  */

	for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
	  if ((set = single_set (XEXP (link, 0))) != 0
	      && rtx_equal_p (i2dest, SET_DEST (set)))
	    i2_insn = XEXP (link, 0), i2_val = SET_SRC (set);

	record_value_for_reg (i2dest, i2_insn, i2_val);

	/* If the reg formerly set in I2 died only once and that was in I3,
	   zero its use count so it won't make `reload' do any work.  */
	if (! added_sets_2
	    && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
	    && ! i2dest_in_i2src)
	  {
	    regno = REGNO (i2dest);
	    INC_REG_N_SETS (regno, -1);
	  }
      }

    if (i1 && REG_P (i1dest))
      {
	rtx link;
	rtx i1_insn = 0, i1_val = 0, set;

	for (link = LOG_LINKS (i3); link; link = XEXP (link, 1))
	  if ((set = single_set (XEXP (link, 0))) != 0
	      && rtx_equal_p (i1dest, SET_DEST (set)))
	    i1_insn = XEXP (link, 0), i1_val = SET_SRC (set);

	record_value_for_reg (i1dest, i1_insn, i1_val);

	regno = REGNO (i1dest);
	if (! added_sets_1 && ! i1dest_in_i1src)
	  INC_REG_N_SETS (regno, -1);
      }

    /* Update reg_stat[].nonzero_bits et al for any changes that may have
       been made to this insn.  The order of
       set_nonzero_bits_and_sign_copies() is important.  Because newi2pat
       can affect nonzero_bits of newpat */
    if (newi2pat)
      note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
    note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);

    /* Set new_direct_jump_p if a new return or simple jump instruction
       has been created.

       If I3 is now an unconditional jump, ensure that it has a
       BARRIER following it since it may have initially been a
       conditional jump.  It may also be the last nonnote insn.  */

    if (returnjump_p (i3) || any_uncondjump_p (i3))
      {
	*new_direct_jump_p = 1;
	mark_jump_label (PATTERN (i3), i3, 0);

	if ((temp = next_nonnote_insn (i3)) == NULL_RTX
	    || !BARRIER_P (temp))
	  emit_barrier_after (i3);
      }

    if (undobuf.other_insn != NULL_RTX
	&& (returnjump_p (undobuf.other_insn)
	    || any_uncondjump_p (undobuf.other_insn)))
      {
	*new_direct_jump_p = 1;

	if ((temp = next_nonnote_insn (undobuf.other_insn)) == NULL_RTX
	    || !BARRIER_P (temp))
	  emit_barrier_after (undobuf.other_insn);
      }

    /* An NOOP jump does not need barrier, but it does need cleaning up
       of CFG.  */
    if (GET_CODE (newpat) == SET
	&& SET_SRC (newpat) == pc_rtx
	&& SET_DEST (newpat) == pc_rtx)
      *new_direct_jump_p = 1;
  }
  
  if (undobuf.other_insn != NULL_RTX)
    {
      if (dump_file)
	{
	  fprintf (dump_file, "modifying other_insn ");
	  dump_insn_slim (dump_file, undobuf.other_insn);
	}
      df_insn_rescan (undobuf.other_insn);
    }

  if (i1 && !(NOTE_P(i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
    {
      if (dump_file)
	{
	  fprintf (dump_file, "modifying insn i1 ");
	  dump_insn_slim (dump_file, i1);
	}
      df_insn_rescan (i1);
    }

  if (i2 && !(NOTE_P(i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
    {
      if (dump_file)
	{
	  fprintf (dump_file, "modifying insn i2 ");
	  dump_insn_slim (dump_file, i2);
	}
      df_insn_rescan (i2);
    }

  if (i3 && !(NOTE_P(i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
    {
      if (dump_file)
	{
	  fprintf (dump_file, "modifying insn i3 ");
	  dump_insn_slim (dump_file, i3);
	}
      df_insn_rescan (i3);
    }
  
  combine_successes++;
  undo_commit ();

  if (added_links_insn
      && (newi2pat == 0 || DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i2))
      && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (i3))
    return added_links_insn;
  else
    return newi2pat ? i2 : i3;
}

/* Undo all the modifications recorded in undobuf.  */

static void
undo_all (void)
{
  struct undo *undo, *next;

  for (undo = undobuf.undos; undo; undo = next)
    {
      next = undo->next;
      switch (undo->kind)
	{
	case UNDO_RTX:
	  *undo->where.r = undo->old_contents.r;
	  break;
	case UNDO_INT:
	  *undo->where.i = undo->old_contents.i;
	  break;
	case UNDO_MODE:
	  adjust_reg_mode (*undo->where.r, undo->old_contents.m);
	  break;
	default:
	  gcc_unreachable ();
	}

      undo->next = undobuf.frees;
      undobuf.frees = undo;
    }

  undobuf.undos = 0;
}

/* We've committed to accepting the changes we made.  Move all
   of the undos to the free list.  */

static void
undo_commit (void)
{
  struct undo *undo, *next;

  for (undo = undobuf.undos; undo; undo = next)
    {
      next = undo->next;
      undo->next = undobuf.frees;
      undobuf.frees = undo;
    }
  undobuf.undos = 0;
}

/* Find the innermost point within the rtx at LOC, possibly LOC itself,
   where we have an arithmetic expression and return that point.  LOC will
   be inside INSN.

   try_combine will call this function to see if an insn can be split into
   two insns.  */

static rtx *
find_split_point (rtx *loc, rtx insn)
{
  rtx x = *loc;
  enum rtx_code code = GET_CODE (x);
  rtx *split;
  unsigned HOST_WIDE_INT len = 0;
  HOST_WIDE_INT pos = 0;
  int unsignedp = 0;
  rtx inner = NULL_RTX;

  /* First special-case some codes.  */
  switch (code)
    {
    case SUBREG:
#ifdef INSN_SCHEDULING
      /* If we are making a paradoxical SUBREG invalid, it becomes a split
	 point.  */
      if (MEM_P (SUBREG_REG (x)))
	return loc;
#endif
      return find_split_point (&SUBREG_REG (x), insn);

    case MEM:
#ifdef HAVE_lo_sum
      /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
	 using LO_SUM and HIGH.  */
      if (GET_CODE (XEXP (x, 0)) == CONST
	  || GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
	{
	  SUBST (XEXP (x, 0),
		 gen_rtx_LO_SUM (Pmode,
				 gen_rtx_HIGH (Pmode, XEXP (x, 0)),
				 XEXP (x, 0)));
	  return &XEXP (XEXP (x, 0), 0);
	}
#endif

      /* If we have a PLUS whose second operand is a constant and the
	 address is not valid, perhaps will can split it up using
	 the machine-specific way to split large constants.  We use
	 the first pseudo-reg (one of the virtual regs) as a placeholder;
	 it will not remain in the result.  */
      if (GET_CODE (XEXP (x, 0)) == PLUS
	  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	  && ! memory_address_p (GET_MODE (x), XEXP (x, 0)))
	{
	  rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
	  rtx seq = combine_split_insns (gen_rtx_SET (VOIDmode, reg,
						      XEXP (x, 0)),
					 subst_insn);

	  /* This should have produced two insns, each of which sets our
	     placeholder.  If the source of the second is a valid address,
	     we can make put both sources together and make a split point
	     in the middle.  */

	  if (seq
	      && NEXT_INSN (seq) != NULL_RTX
	      && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
	      && NONJUMP_INSN_P (seq)
	      && GET_CODE (PATTERN (seq)) == SET
	      && SET_DEST (PATTERN (seq)) == reg
	      && ! reg_mentioned_p (reg,
				    SET_SRC (PATTERN (seq)))
	      && NONJUMP_INSN_P (NEXT_INSN (seq))
	      && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
	      && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
	      && memory_address_p (GET_MODE (x),
				   SET_SRC (PATTERN (NEXT_INSN (seq)))))
	    {
	      rtx src1 = SET_SRC (PATTERN (seq));
	      rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));

	      /* Replace the placeholder in SRC2 with SRC1.  If we can
		 find where in SRC2 it was placed, that can become our
		 split point and we can replace this address with SRC2.
		 Just try two obvious places.  */

	      src2 = replace_rtx (src2, reg, src1);
	      split = 0;
	      if (XEXP (src2, 0) == src1)
		split = &XEXP (src2, 0);
	      else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
		       && XEXP (XEXP (src2, 0), 0) == src1)
		split = &XEXP (XEXP (src2, 0), 0);

	      if (split)
		{
		  SUBST (XEXP (x, 0), src2);
		  return split;
		}
	    }

	  /* If that didn't work, perhaps the first operand is complex and
	     needs to be computed separately, so make a split point there.
	     This will occur on machines that just support REG + CONST
	     and have a constant moved through some previous computation.  */

	  else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
		   && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
			 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
	    return &XEXP (XEXP (x, 0), 0);
	}

      /* If we have a PLUS whose first operand is complex, try computing it
         separately by making a split there.  */
      if (GET_CODE (XEXP (x, 0)) == PLUS
          && ! memory_address_p (GET_MODE (x), XEXP (x, 0))
          && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
          && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
                && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
        return &XEXP (XEXP (x, 0), 0);
      break;

    case SET:
#ifdef HAVE_cc0
      /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
	 ZERO_EXTRACT, the most likely reason why this doesn't match is that
	 we need to put the operand into a register.  So split at that
	 point.  */

      if (SET_DEST (x) == cc0_rtx
	  && GET_CODE (SET_SRC (x)) != COMPARE
	  && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
	  && !OBJECT_P (SET_SRC (x))
	  && ! (GET_CODE (SET_SRC (x)) == SUBREG
		&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
	return &SET_SRC (x);
#endif

      /* See if we can split SET_SRC as it stands.  */
      split = find_split_point (&SET_SRC (x), insn);
      if (split && split != &SET_SRC (x))
	return split;

      /* See if we can split SET_DEST as it stands.  */
      split = find_split_point (&SET_DEST (x), insn);
      if (split && split != &SET_DEST (x))
	return split;

      /* See if this is a bitfield assignment with everything constant.  If
	 so, this is an IOR of an AND, so split it into that.  */
      if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
	  && (GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)))
	      <= HOST_BITS_PER_WIDE_INT)
	  && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT
	  && GET_CODE (XEXP (SET_DEST (x), 2)) == CONST_INT
	  && GET_CODE (SET_SRC (x)) == CONST_INT
	  && ((INTVAL (XEXP (SET_DEST (x), 1))
	       + INTVAL (XEXP (SET_DEST (x), 2)))
	      <= GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0))))
	  && ! side_effects_p (XEXP (SET_DEST (x), 0)))
	{
	  HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
	  unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
	  unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
	  rtx dest = XEXP (SET_DEST (x), 0);
	  enum machine_mode mode = GET_MODE (dest);
	  unsigned HOST_WIDE_INT mask = ((HOST_WIDE_INT) 1 << len) - 1;
	  rtx or_mask;

	  if (BITS_BIG_ENDIAN)
	    pos = GET_MODE_BITSIZE (mode) - len - pos;

	  or_mask = gen_int_mode (src << pos, mode);
	  if (src == mask)
	    SUBST (SET_SRC (x),
		   simplify_gen_binary (IOR, mode, dest, or_mask));
	  else
	    {
	      rtx negmask = gen_int_mode (~(mask << pos), mode);
	      SUBST (SET_SRC (x),
		     simplify_gen_binary (IOR, mode,
					  simplify_gen_binary (AND, mode,
							       dest, negmask),
					  or_mask));
	    }

	  SUBST (SET_DEST (x), dest);

	  split = find_split_point (&SET_SRC (x), insn);
	  if (split && split != &SET_SRC (x))
	    return split;
	}

      /* Otherwise, see if this is an operation that we can split into two.
	 If so, try to split that.  */
      code = GET_CODE (SET_SRC (x));

      switch (code)
	{
	case AND:
	  /* If we are AND'ing with a large constant that is only a single
	     bit and the result is only being used in a context where we
	     need to know if it is zero or nonzero, replace it with a bit
	     extraction.  This will avoid the large constant, which might
	     have taken more than one insn to make.  If the constant were
	     not a valid argument to the AND but took only one insn to make,
	     this is no worse, but if it took more than one insn, it will
	     be better.  */

	  if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
	      && REG_P (XEXP (SET_SRC (x), 0))
	      && (pos = exact_log2 (INTVAL (XEXP (SET_SRC (x), 1)))) >= 7
	      && REG_P (SET_DEST (x))
	      && (split = find_single_use (SET_DEST (x), insn, (rtx*) 0)) != 0
	      && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
	      && XEXP (*split, 0) == SET_DEST (x)
	      && XEXP (*split, 1) == const0_rtx)
	    {
	      rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
						XEXP (SET_SRC (x), 0),
						pos, NULL_RTX, 1, 1, 0, 0);
	      if (extraction != 0)
		{
		  SUBST (SET_SRC (x), extraction);
		  return find_split_point (loc, insn);
		}
	    }
	  break;

	case NE:
	  /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
	     is known to be on, this can be converted into a NEG of a shift.  */
	  if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
	      && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
	      && 1 <= (pos = exact_log2
		       (nonzero_bits (XEXP (SET_SRC (x), 0),
				      GET_MODE (XEXP (SET_SRC (x), 0))))))
	    {
	      enum machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));

	      SUBST (SET_SRC (x),
		     gen_rtx_NEG (mode,
				  gen_rtx_LSHIFTRT (mode,
						    XEXP (SET_SRC (x), 0),
						    GEN_INT (pos))));

	      split = find_split_point (&SET_SRC (x), insn);
	      if (split && split != &SET_SRC (x))
		return split;
	    }
	  break;

	case SIGN_EXTEND:
	  inner = XEXP (SET_SRC (x), 0);

	  /* We can't optimize if either mode is a partial integer
	     mode as we don't know how many bits are significant
	     in those modes.  */
	  if (GET_MODE_CLASS (GET_MODE (inner)) == MODE_PARTIAL_INT
	      || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
	    break;

	  pos = 0;
	  len = GET_MODE_BITSIZE (GET_MODE (inner));
	  unsignedp = 0;
	  break;

	case SIGN_EXTRACT:
	case ZERO_EXTRACT:
	  if (GET_CODE (XEXP (SET_SRC (x), 1)) == CONST_INT
	      && GET_CODE (XEXP (SET_SRC (x), 2)) == CONST_INT)
	    {
	      inner = XEXP (SET_SRC (x), 0);
	      len = INTVAL (XEXP (SET_SRC (x), 1));
	      pos = INTVAL (XEXP (SET_SRC (x), 2));

	      if (BITS_BIG_ENDIAN)
		pos = GET_MODE_BITSIZE (GET_MODE (inner)) - len - pos;
	      unsignedp = (code == ZERO_EXTRACT);
	    }
	  break;

	default:
	  break;
	}

      if (len && pos >= 0 && pos + len <= GET_MODE_BITSIZE (GET_MODE (inner)))
	{
	  enum machine_mode mode = GET_MODE (SET_SRC (x));

	  /* For unsigned, we have a choice of a shift followed by an
	     AND or two shifts.  Use two shifts for field sizes where the
	     constant might be too large.  We assume here that we can
	     always at least get 8-bit constants in an AND insn, which is
	     true for every current RISC.  */

	  if (unsignedp && len <= 8)
	    {
	      SUBST (SET_SRC (x),
		     gen_rtx_AND (mode,
				  gen_rtx_LSHIFTRT
				  (mode, gen_lowpart (mode, inner),
				   GEN_INT (pos)),
				  GEN_INT (((HOST_WIDE_INT) 1 << len) - 1)));

	      split = find_split_point (&SET_SRC (x), insn);
	      if (split && split != &SET_SRC (x))
		return split;
	    }
	  else
	    {
	      SUBST (SET_SRC (x),
		     gen_rtx_fmt_ee
		     (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
		      gen_rtx_ASHIFT (mode,
				      gen_lowpart (mode, inner),
				      GEN_INT (GET_MODE_BITSIZE (mode)
					       - len - pos)),
		      GEN_INT (GET_MODE_BITSIZE (mode) - len)));

	      split = find_split_point (&SET_SRC (x), insn);
	      if (split && split != &SET_SRC (x))
		return split;
	    }
	}

      /* See if this is a simple operation with a constant as the second
	 operand.  It might be that this constant is out of range and hence
	 could be used as a split point.  */
      if (BINARY_P (SET_SRC (x))
	  && CONSTANT_P (XEXP (SET_SRC (x), 1))
	  && (OBJECT_P (XEXP (SET_SRC (x), 0))
	      || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
		  && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
	return &XEXP (SET_SRC (x), 1);

      /* Finally, see if this is a simple operation with its first operand
	 not in a register.  The operation might require this operand in a
	 register, so return it as a split point.  We can always do this
	 because if the first operand were another operation, we would have
	 already found it as a split point.  */
      if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
	  && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
	return &XEXP (SET_SRC (x), 0);

      return 0;

    case AND:
    case IOR:
      /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
	 it is better to write this as (not (ior A B)) so we can split it.
	 Similarly for IOR.  */
      if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
	{
	  SUBST (*loc,
		 gen_rtx_NOT (GET_MODE (x),
			      gen_rtx_fmt_ee (code == IOR ? AND : IOR,
					      GET_MODE (x),
					      XEXP (XEXP (x, 0), 0),
					      XEXP (XEXP (x, 1), 0))));
	  return find_split_point (loc, insn);
	}

      /* Many RISC machines have a large set of logical insns.  If the
	 second operand is a NOT, put it first so we will try to split the
	 other operand first.  */
      if (GET_CODE (XEXP (x, 1)) == NOT)
	{
	  rtx tem = XEXP (x, 0);
	  SUBST (XEXP (x, 0), XEXP (x, 1));
	  SUBST (XEXP (x, 1), tem);
	}
      break;

    default:
      break;
    }

  /* Otherwise, select our actions depending on our rtx class.  */
  switch (GET_RTX_CLASS (code))
    {
    case RTX_BITFIELD_OPS:		/* This is ZERO_EXTRACT and SIGN_EXTRACT.  */
    case RTX_TERNARY:
      split = find_split_point (&XEXP (x, 2), insn);
      if (split)
	return split;
      /* ... fall through ...  */
    case RTX_BIN_ARITH:
    case RTX_COMM_ARITH:
    case RTX_COMPARE:
    case RTX_COMM_COMPARE:
      split = find_split_point (&XEXP (x, 1), insn);
      if (split)
	return split;
      /* ... fall through ...  */
    case RTX_UNARY:
      /* Some machines have (and (shift ...) ...) insns.  If X is not
	 an AND, but XEXP (X, 0) is, use it as our split point.  */
      if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
	return &XEXP (x, 0);

      split = find_split_point (&XEXP (x, 0), insn);
      if (split)
	return split;
      return loc;

    default:
      /* Otherwise, we don't have a split point.  */
      return 0;
    }
}

/* Throughout X, replace FROM with TO, and return the result.
   The result is TO if X is FROM;
   otherwise the result is X, but its contents may have been modified.
   If they were modified, a record was made in undobuf so that
   undo_all will (among other things) return X to its original state.

   If the number of changes necessary is too much to record to undo,
   the excess changes are not made, so the result is invalid.
   The changes already made can still be undone.
   undobuf.num_undo is incremented for such changes, so by testing that
   the caller can tell whether the result is valid.

   `n_occurrences' is incremented each time FROM is replaced.

   IN_DEST is nonzero if we are processing the SET_DEST of a SET.

   UNIQUE_COPY is nonzero if each substitution must be unique.  We do this
   by copying if `n_occurrences' is nonzero.  */

static rtx
subst (rtx x, rtx from, rtx to, int in_dest, int unique_copy)
{
  enum rtx_code code = GET_CODE (x);
  enum machine_mode op0_mode = VOIDmode;
  const char *fmt;
  int len, i;
  rtx new;

/* Two expressions are equal if they are identical copies of a shared
   RTX or if they are both registers with the same register number
   and mode.  */

#define COMBINE_RTX_EQUAL_P(X,Y)			\
  ((X) == (Y)						\
   || (REG_P (X) && REG_P (Y)	\
       && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))

  if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
    {
      n_occurrences++;
      return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
    }

  /* If X and FROM are the same register but different modes, they
     will not have been seen as equal above.  However, the log links code
     will make a LOG_LINKS entry for that case.  If we do nothing, we
     will try to rerecognize our original insn and, when it succeeds,
     we will delete the feeding insn, which is incorrect.

     So force this insn not to match in this (rare) case.  */
  if (! in_dest && code == REG && REG_P (from)
      && reg_overlap_mentioned_p (x, from))
    return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);

  /* If this is an object, we are done unless it is a MEM or LO_SUM, both
     of which may contain things that can be combined.  */
  if (code != MEM && code != LO_SUM && OBJECT_P (x))
    return x;

  /* It is possible to have a subexpression appear twice in the insn.
     Suppose that FROM is a register that appears within TO.
     Then, after that subexpression has been scanned once by `subst',
     the second time it is scanned, TO may be found.  If we were
     to scan TO here, we would find FROM within it and create a
     self-referent rtl structure which is completely wrong.  */
  if (COMBINE_RTX_EQUAL_P (x, to))
    return to;

  /* Parallel asm_operands need special attention because all of the
     inputs are shared across the arms.  Furthermore, unsharing the
     rtl results in recognition failures.  Failure to handle this case
     specially can result in circular rtl.

     Solve this by doing a normal pass across the first entry of the
     parallel, and only processing the SET_DESTs of the subsequent
     entries.  Ug.  */

  if (code == PARALLEL
      && GET_CODE (XVECEXP (x, 0, 0)) == SET
      && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
    {
      new = subst (XVECEXP (x, 0, 0), from, to, 0, unique_copy);

      /* If this substitution failed, this whole thing fails.  */
      if (GET_CODE (new) == CLOBBER
	  && XEXP (new, 0) == const0_rtx)
	return new;

      SUBST (XVECEXP (x, 0, 0), new);

      for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
	{
	  rtx dest = SET_DEST (XVECEXP (x, 0, i));

	  if (!REG_P (dest)
	      && GET_CODE (dest) != CC0
	      && GET_CODE (dest) != PC)
	    {
	      new = subst (dest, from, to, 0, unique_copy);

	      /* If this substitution failed, this whole thing fails.  */
	      if (GET_CODE (new) == CLOBBER
		  && XEXP (new, 0) == const0_rtx)
		return new;

	      SUBST (SET_DEST (XVECEXP (x, 0, i)), new);
	    }
	}
    }
  else
    {
      len = GET_RTX_LENGTH (code);
      fmt = GET_RTX_FORMAT (code);

      /* We don't need to process a SET_DEST that is a register, CC0,
	 or PC, so set up to skip this common case.  All other cases
	 where we want to suppress replacing something inside a
	 SET_SRC are handled via the IN_DEST operand.  */
      if (code == SET
	  && (REG_P (SET_DEST (x))
	      || GET_CODE (SET_DEST (x)) == CC0
	      || GET_CODE (SET_DEST (x)) == PC))
	fmt = "ie";

      /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
	 constant.  */
      if (fmt[0] == 'e')
	op0_mode = GET_MODE (XEXP (x, 0));

      for (i = 0; i < len; i++)
	{
	  if (fmt[i] == 'E')
	    {
	      int j;
	      for (j = XVECLEN (x, i) - 1; j >= 0; j--)
		{
		  if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
		    {
		      new = (unique_copy && n_occurrences
			     ? copy_rtx (to) : to);
		      n_occurrences++;
		    }
		  else
		    {
		      new = subst (XVECEXP (x, i, j), from, to, 0,
				   unique_copy);

		      /* If this substitution failed, this whole thing
			 fails.  */
		      if (GET_CODE (new) == CLOBBER
			  && XEXP (new, 0) == const0_rtx)
			return new;
		    }

		  SUBST (XVECEXP (x, i, j), new);
		}
	    }
	  else if (fmt[i] == 'e')
	    {
	      /* If this is a register being set, ignore it.  */
	      new = XEXP (x, i);
	      if (in_dest
		  && i == 0
		  && (((code == SUBREG || code == ZERO_EXTRACT)
		       && REG_P (new))
		      || code == STRICT_LOW_PART))
		;

	      else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
		{
		  /* In general, don't install a subreg involving two
		     modes not tieable.  It can worsen register
		     allocation, and can even make invalid reload
		     insns, since the reg inside may need to be copied
		     from in the outside mode, and that may be invalid
		     if it is an fp reg copied in integer mode.

		     We allow two exceptions to this: It is valid if
		     it is inside another SUBREG and the mode of that
		     SUBREG and the mode of the inside of TO is
		     tieable and it is valid if X is a SET that copies
		     FROM to CC0.  */

		  if (GET_CODE (to) == SUBREG
		      && ! MODES_TIEABLE_P (GET_MODE (to),
					    GET_MODE (SUBREG_REG (to)))
		      && ! (code == SUBREG
			    && MODES_TIEABLE_P (GET_MODE (x),
						GET_MODE (SUBREG_REG (to))))
#ifdef HAVE_cc0
		      && ! (code == SET && i == 1 && XEXP (x, 0) == cc0_rtx)
#endif
		      )
		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);

#ifdef CANNOT_CHANGE_MODE_CLASS
		  if (code == SUBREG
		      && REG_P (to)
		      && REGNO (to) < FIRST_PSEUDO_REGISTER
		      && REG_CANNOT_CHANGE_MODE_P (REGNO (to),
						   GET_MODE (to),
						   GET_MODE (x)))
		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
#endif

		  new = (unique_copy && n_occurrences ? copy_rtx (to) : to);
		  n_occurrences++;
		}
	      else
		/* If we are in a SET_DEST, suppress most cases unless we
		   have gone inside a MEM, in which case we want to
		   simplify the address.  We assume here that things that
		   are actually part of the destination have their inner
		   parts in the first expression.  This is true for SUBREG,
		   STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
		   things aside from REG and MEM that should appear in a
		   SET_DEST.  */
		new = subst (XEXP (x, i), from, to,
			     (((in_dest
				&& (code == SUBREG || code == STRICT_LOW_PART
				    || code == ZERO_EXTRACT))
			       || code == SET)
			      && i == 0), unique_copy);

	      /* If we found that we will have to reject this combination,
		 indicate that by returning the CLOBBER ourselves, rather than
		 an expression containing it.  This will speed things up as
		 well as prevent accidents where two CLOBBERs are considered
		 to be equal, thus producing an incorrect simplification.  */

	      if (GET_CODE (new) == CLOBBER && XEXP (new, 0) == const0_rtx)
		return new;

	      if (GET_CODE (x) == SUBREG
		  && (GET_CODE (new) == CONST_INT
		      || GET_CODE (new) == CONST_DOUBLE))
		{
		  enum machine_mode mode = GET_MODE (x);

		  x = simplify_subreg (GET_MODE (x), new,
				       GET_MODE (SUBREG_REG (x)),
				       SUBREG_BYTE (x));
		  if (! x)
		    x = gen_rtx_CLOBBER (mode, const0_rtx);
		}
	      else if (GET_CODE (new) == CONST_INT
		       && GET_CODE (x) == ZERO_EXTEND)
		{
		  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
						new, GET_MODE (XEXP (x, 0)));
		  gcc_assert (x);
		}
	      else
		SUBST (XEXP (x, i), new);
	    }
	}
    }

  /* Check if we are loading something from the constant pool via float
     extension; in this case we would undo compress_float_constant
     optimization and degenerate constant load to an immediate value.  */
  if (GET_CODE (x) == FLOAT_EXTEND
      && MEM_P (XEXP (x, 0))
      && MEM_READONLY_P (XEXP (x, 0)))
    {
      rtx tmp = avoid_constant_pool_reference (x);
      if (x != tmp)
        return x;
    }

  /* Try to simplify X.  If the simplification changed the code, it is likely
     that further simplification will help, so loop, but limit the number
     of repetitions that will be performed.  */

  for (i = 0; i < 4; i++)
    {
      /* If X is sufficiently simple, don't bother trying to do anything
	 with it.  */
      if (code != CONST_INT && code != REG && code != CLOBBER)
	x = combine_simplify_rtx (x, op0_mode, in_dest);

      if (GET_CODE (x) == code)
	break;

      code = GET_CODE (x);

      /* We no longer know the original mode of operand 0 since we
	 have changed the form of X)  */
      op0_mode = VOIDmode;
    }

  return x;
}

/* Simplify X, a piece of RTL.  We just operate on the expression at the
   outer level; call `subst' to simplify recursively.  Return the new
   expression.

   OP0_MODE is the original mode of XEXP (x, 0).  IN_DEST is nonzero
   if we are inside a SET_DEST.  */

static rtx
combine_simplify_rtx (rtx x, enum machine_mode op0_mode, int in_dest)
{
  enum rtx_code code = GET_CODE (x);
  enum machine_mode mode = GET_MODE (x);
  rtx temp;
  int i;

  /* If this is a commutative operation, put a constant last and a complex
     expression first.  We don't need to do this for comparisons here.  */
  if (COMMUTATIVE_ARITH_P (x)
      && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
    {
      temp = XEXP (x, 0);
      SUBST (XEXP (x, 0), XEXP (x, 1));
      SUBST (XEXP (x, 1), temp);
    }

  /* If this is a simple operation applied to an IF_THEN_ELSE, try
     applying it to the arms of the IF_THEN_ELSE.  This often simplifies
     things.  Check for cases where both arms are testing the same
     condition.

     Don't do anything if all operands are very simple.  */

  if ((BINARY_P (x)
       && ((!OBJECT_P (XEXP (x, 0))
	    && ! (GET_CODE (XEXP (x, 0)) == SUBREG
		  && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
	   || (!OBJECT_P (XEXP (x, 1))
	       && ! (GET_CODE (XEXP (x, 1)) == SUBREG
		     && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
      || (UNARY_P (x)
	  && (!OBJECT_P (XEXP (x, 0))
	       && ! (GET_CODE (XEXP (x, 0)) == SUBREG
		     && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
    {
      rtx cond, true_rtx, false_rtx;

      cond = if_then_else_cond (x, &true_rtx, &false_rtx);
      if (cond != 0
	  /* If everything is a comparison, what we have is highly unlikely
	     to be simpler, so don't use it.  */
	  && ! (COMPARISON_P (x)
		&& (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx))))
	{
	  rtx cop1 = const0_rtx;
	  enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);

	  if (cond_code == NE && COMPARISON_P (cond))
	    return x;

	  /* Simplify the alternative arms; this may collapse the true and
	     false arms to store-flag values.  Be careful to use copy_rtx
	     here since true_rtx or false_rtx might share RTL with x as a
	     result of the if_then_else_cond call above.  */
	  true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0);
	  false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0);

	  /* If true_rtx and false_rtx are not general_operands, an if_then_else
	     is unlikely to be simpler.  */
	  if (general_operand (true_rtx, VOIDmode)
	      && general_operand (false_rtx, VOIDmode))
	    {
	      enum rtx_code reversed;

	      /* Restarting if we generate a store-flag expression will cause
		 us to loop.  Just drop through in this case.  */

	      /* If the result values are STORE_FLAG_VALUE and zero, we can
		 just make the comparison operation.  */
	      if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
		x = simplify_gen_relational (cond_code, mode, VOIDmode,
					     cond, cop1);
	      else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
		       && ((reversed = reversed_comparison_code_parts
					(cond_code, cond, cop1, NULL))
			   != UNKNOWN))
		x = simplify_gen_relational (reversed, mode, VOIDmode,
					     cond, cop1);

	      /* Likewise, we can make the negate of a comparison operation
		 if the result values are - STORE_FLAG_VALUE and zero.  */
	      else if (GET_CODE (true_rtx) == CONST_INT
		       && INTVAL (true_rtx) == - STORE_FLAG_VALUE
		       && false_rtx == const0_rtx)
		x = simplify_gen_unary (NEG, mode,
					simplify_gen_relational (cond_code,
								 mode, VOIDmode,
								 cond, cop1),
					mode);
	      else if (GET_CODE (false_rtx) == CONST_INT
		       && INTVAL (false_rtx) == - STORE_FLAG_VALUE
		       && true_rtx == const0_rtx
		       && ((reversed = reversed_comparison_code_parts
					(cond_code, cond, cop1, NULL))
			   != UNKNOWN))
		x = simplify_gen_unary (NEG, mode,
					simplify_gen_relational (reversed,
								 mode, VOIDmode,
								 cond, cop1),
					mode);
	      else
		return gen_rtx_IF_THEN_ELSE (mode,
					     simplify_gen_relational (cond_code,
								      mode,
								      VOIDmode,
								      cond,
								      cop1),
					     true_rtx, false_rtx);

	      code = GET_CODE (x);
	      op0_mode = VOIDmode;
	    }
	}
    }

  /* Try to fold this expression in case we have constants that weren't
     present before.  */
  temp = 0;
  switch (GET_RTX_CLASS (code))
    {
    case RTX_UNARY:
      if (op0_mode == VOIDmode)
	op0_mode = GET_MODE (XEXP (x, 0));
      temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
      break;
    case RTX_COMPARE:
    case RTX_COMM_COMPARE:
      {
	enum machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
	if (cmp_mode == VOIDmode)
	  {
	    cmp_mode = GET_MODE (XEXP (x, 1));
	    if (cmp_mode == VOIDmode)
	      cmp_mode = op0_mode;
	  }
	temp = simplify_relational_operation (code, mode, cmp_mode,
					      XEXP (x, 0), XEXP (x, 1));
      }
      break;
    case RTX_COMM_ARITH:
    case RTX_BIN_ARITH:
      temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
      break;
    case RTX_BITFIELD_OPS:
    case RTX_TERNARY:
      temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
					 XEXP (x, 1), XEXP (x, 2));
      break;
    default:
      break;
    }

  if (temp)
    {
      x = temp;
      code = GET_CODE (temp);
      op0_mode = VOIDmode;
      mode = GET_MODE (temp);
    }

  /* First see if we can apply the inverse distributive law.  */
  if (code == PLUS || code == MINUS
      || code == AND || code == IOR || code == XOR)
    {
      x = apply_distributive_law (x);
      code = GET_CODE (x);
      op0_mode = VOIDmode;
    }

  /* If CODE is an associative operation not otherwise handled, see if we
     can associate some operands.  This can win if they are constants or
     if they are logically related (i.e. (a & b) & a).  */
  if ((code == PLUS || code == MINUS || code == MULT || code == DIV
       || code == AND || code == IOR || code == XOR
       || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
      && ((INTEGRAL_MODE_P (mode) && code != DIV)
	  || (flag_associative_math && FLOAT_MODE_P (mode))))
    {
      if (GET_CODE (XEXP (x, 0)) == code)
	{
	  rtx other = XEXP (XEXP (x, 0), 0);
	  rtx inner_op0 = XEXP (XEXP (x, 0), 1);
	  rtx inner_op1 = XEXP (x, 1);
	  rtx inner;

	  /* Make sure we pass the constant operand if any as the second
	     one if this is a commutative operation.  */
	  if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
	    {
	      rtx tem = inner_op0;
	      inner_op0 = inner_op1;
	      inner_op1 = tem;
	    }
	  inner = simplify_binary_operation (code == MINUS ? PLUS
					     : code == DIV ? MULT
					     : code,
					     mode, inner_op0, inner_op1);

	  /* For commutative operations, try the other pair if that one
	     didn't simplify.  */
	  if (inner == 0 && COMMUTATIVE_ARITH_P (x))
	    {
	      other = XEXP (XEXP (x, 0), 1);
	      inner = simplify_binary_operation (code, mode,
						 XEXP (XEXP (x, 0), 0),
						 XEXP (x, 1));
	    }

	  if (inner)
	    return simplify_gen_binary (code, mode, other, inner);
	}
    }

  /* A little bit of algebraic simplification here.  */
  switch (code)
    {
    case MEM:
      /* Ensure that our address has any ASHIFTs converted to MULT in case
	 address-recognizing predicates are called later.  */
      temp = make_compound_operation (XEXP (x, 0), MEM);
      SUBST (XEXP (x, 0), temp);
      break;

    case SUBREG:
      if (op0_mode == VOIDmode)
	op0_mode = GET_MODE (SUBREG_REG (x));

      /* See if this can be moved to simplify_subreg.  */
      if (CONSTANT_P (SUBREG_REG (x))
	  && subreg_lowpart_offset (mode, op0_mode) == SUBREG_BYTE (x)
	     /* Don't call gen_lowpart if the inner mode
		is VOIDmode and we cannot simplify it, as SUBREG without
		inner mode is invalid.  */
	  && (GET_MODE (SUBREG_REG (x)) != VOIDmode
	      || gen_lowpart_common (mode, SUBREG_REG (x))))
	return gen_lowpart (mode, SUBREG_REG (x));

      if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
	break;
      {
	rtx temp;
	temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
				SUBREG_BYTE (x));
	if (temp)
	  return temp;
      }

      /* Don't change the mode of the MEM if that would change the meaning
	 of the address.  */
      if (MEM_P (SUBREG_REG (x))
	  && (MEM_VOLATILE_P (SUBREG_REG (x))
	      || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0))))
	return gen_rtx_CLOBBER (mode, const0_rtx);

      /* Note that we cannot do any narrowing for non-constants since
	 we might have been counting on using the fact that some bits were
	 zero.  We now do this in the SET.  */

      break;

    case NEG:
      temp = expand_compound_operation (XEXP (x, 0));

      /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
	 replaced by (lshiftrt X C).  This will convert
	 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y).  */

      if (GET_CODE (temp) == ASHIFTRT
	  && GET_CODE (XEXP (temp, 1)) == CONST_INT
	  && INTVAL (XEXP (temp, 1)) == GET_MODE_BITSIZE (mode) - 1)
	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
				     INTVAL (XEXP (temp, 1)));

      /* If X has only a single bit that might be nonzero, say, bit I, convert
	 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
	 MODE minus 1.  This will convert (neg (zero_extract X 1 Y)) to
	 (sign_extract X 1 Y).  But only do this if TEMP isn't a register
	 or a SUBREG of one since we'd be making the expression more
	 complex if it was just a register.  */

      if (!REG_P (temp)
	  && ! (GET_CODE (temp) == SUBREG
		&& REG_P (SUBREG_REG (temp)))
	  && (i = exact_log2 (nonzero_bits (temp, mode))) >= 0)
	{
	  rtx temp1 = simplify_shift_const
	    (NULL_RTX, ASHIFTRT, mode,
	     simplify_shift_const (NULL_RTX, ASHIFT, mode, temp,
				   GET_MODE_BITSIZE (mode) - 1 - i),
	     GET_MODE_BITSIZE (mode) - 1 - i);

	  /* If all we did was surround TEMP with the two shifts, we
	     haven't improved anything, so don't use it.  Otherwise,
	     we are better off with TEMP1.  */
	  if (GET_CODE (temp1) != ASHIFTRT
	      || GET_CODE (XEXP (temp1, 0)) != ASHIFT
	      || XEXP (XEXP (temp1, 0), 0) != temp)
	    return temp1;
	}
      break;

    case TRUNCATE:
      /* We can't handle truncation to a partial integer mode here
	 because we don't know the real bitsize of the partial
	 integer mode.  */
      if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
	break;

      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	  && TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
				    GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))))
	SUBST (XEXP (x, 0),
	       force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
			      GET_MODE_MASK (mode), 0));

      /* Similarly to what we do in simplify-rtx.c, a truncate of a register
	 whose value is a comparison can be replaced with a subreg if
	 STORE_FLAG_VALUE permits.  */
      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	  && ((HOST_WIDE_INT) STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
	  && (temp = get_last_value (XEXP (x, 0)))
	  && COMPARISON_P (temp))
	return gen_lowpart (mode, XEXP (x, 0));
      break;

#ifdef HAVE_cc0
    case COMPARE:
      /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
	 using cc0, in which case we want to leave it as a COMPARE
	 so we can distinguish it from a register-register-copy.  */
      if (XEXP (x, 1) == const0_rtx)
	return XEXP (x, 0);

      /* x - 0 is the same as x unless x's mode has signed zeros and
	 allows rounding towards -infinity.  Under those conditions,
	 0 - 0 is -0.  */
      if (!(HONOR_SIGNED_ZEROS (GET_MODE (XEXP (x, 0)))
	    && HONOR_SIGN_DEPENDENT_ROUNDING (GET_MODE (XEXP (x, 0))))
	  && XEXP (x, 1) == CONST0_RTX (GET_MODE (XEXP (x, 0))))
	return XEXP (x, 0);
      break;
#endif

    case CONST:
      /* (const (const X)) can become (const X).  Do it this way rather than
	 returning the inner CONST since CONST can be shared with a
	 REG_EQUAL note.  */
      if (GET_CODE (XEXP (x, 0)) == CONST)
	SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
      break;

#ifdef HAVE_lo_sum
    case LO_SUM:
      /* Convert (lo_sum (high FOO) FOO) to FOO.  This is necessary so we
	 can add in an offset.  find_split_point will split this address up
	 again if it doesn't match.  */
      if (GET_CODE (XEXP (x, 0)) == HIGH
	  && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
	return XEXP (x, 1);
      break;
#endif

    case PLUS:
      /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
	 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
	 bit-field and can be replaced by either a sign_extend or a
	 sign_extract.  The `and' may be a zero_extend and the two
	 <c>, -<c> constants may be reversed.  */
      if (GET_CODE (XEXP (x, 0)) == XOR
	  && GET_CODE (XEXP (x, 1)) == CONST_INT
	  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
	  && ((i = exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
	      || (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0)
	  && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	  && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
	       && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
	       && (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
		   == ((HOST_WIDE_INT) 1 << (i + 1)) - 1))
	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
		  && (GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))
		      == (unsigned int) i + 1))))
	return simplify_shift_const
	  (NULL_RTX, ASHIFTRT, mode,
	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
				 XEXP (XEXP (XEXP (x, 0), 0), 0),
				 GET_MODE_BITSIZE (mode) - (i + 1)),
	   GET_MODE_BITSIZE (mode) - (i + 1));

      /* If only the low-order bit of X is possibly nonzero, (plus x -1)
	 can become (ashiftrt (ashift (xor x 1) C) C) where C is
	 the bitsize of the mode - 1.  This allows simplification of
	 "a = (b & 8) == 0;"  */
      if (XEXP (x, 1) == constm1_rtx
	  && !REG_P (XEXP (x, 0))
	  && ! (GET_CODE (XEXP (x, 0)) == SUBREG
		&& REG_P (SUBREG_REG (XEXP (x, 0))))
	  && nonzero_bits (XEXP (x, 0), mode) == 1)
	return simplify_shift_const (NULL_RTX, ASHIFTRT, mode,
	   simplify_shift_const (NULL_RTX, ASHIFT, mode,
				 gen_rtx_XOR (mode, XEXP (x, 0), const1_rtx),
				 GET_MODE_BITSIZE (mode) - 1),
	   GET_MODE_BITSIZE (mode) - 1);

      /* If we are adding two things that have no bits in common, convert
	 the addition into an IOR.  This will often be further simplified,
	 for example in cases like ((a & 1) + (a & 2)), which can
	 become a & 3.  */

      if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	  && (nonzero_bits (XEXP (x, 0), mode)
	      & nonzero_bits (XEXP (x, 1), mode)) == 0)
	{
	  /* Try to simplify the expression further.  */
	  rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
	  temp = combine_simplify_rtx (tor, mode, in_dest);

	  /* If we could, great.  If not, do not go ahead with the IOR
	     replacement, since PLUS appears in many special purpose
	     address arithmetic instructions.  */
	  if (GET_CODE (temp) != CLOBBER && temp != tor)
	    return temp;
	}
      break;

    case MINUS:
      /* (minus <foo> (and <foo> (const_int -pow2))) becomes
	 (and <foo> (const_int pow2-1))  */
      if (GET_CODE (XEXP (x, 1)) == AND
	  && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
	  && exact_log2 (-INTVAL (XEXP (XEXP (x, 1), 1))) >= 0
	  && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
	return simplify_and_const_int (NULL_RTX, mode, XEXP (x, 0),
				       -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
      break;

    case MULT:
      /* If we have (mult (plus A B) C), apply the distributive law and then
	 the inverse distributive law to see if things simplify.  This
	 occurs mostly in addresses, often when unrolling loops.  */

      if (GET_CODE (XEXP (x, 0)) == PLUS)
	{
	  rtx result = distribute_and_simplify_rtx (x, 0);
	  if (result)
	    return result;
	}

      /* Try simplify a*(b/c) as (a*b)/c.  */
      if (FLOAT_MODE_P (mode) && flag_associative_math 
	  && GET_CODE (XEXP (x, 0)) == DIV)
	{
	  rtx tem = simplify_binary_operation (MULT, mode,
					       XEXP (XEXP (x, 0), 0),
					       XEXP (x, 1));
	  if (tem)
	    return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
	}
      break;

    case UDIV:
      /* If this is a divide by a power of two, treat it as a shift if
	 its first operand is a shift.  */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0
	  && (GET_CODE (XEXP (x, 0)) == ASHIFT
	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT
	      || GET_CODE (XEXP (x, 0)) == ASHIFTRT
	      || GET_CODE (XEXP (x, 0)) == ROTATE
	      || GET_CODE (XEXP (x, 0)) == ROTATERT))
	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (x, 0), i);
      break;

    case EQ:  case NE:
    case GT:  case GTU:  case GE:  case GEU:
    case LT:  case LTU:  case LE:  case LEU:
    case UNEQ:  case LTGT:
    case UNGT:  case UNGE:
    case UNLT:  case UNLE:
    case UNORDERED: case ORDERED:
      /* If the first operand is a condition code, we can't do anything
	 with it.  */
      if (GET_CODE (XEXP (x, 0)) == COMPARE
	  || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
	      && ! CC0_P (XEXP (x, 0))))
	{
	  rtx op0 = XEXP (x, 0);
	  rtx op1 = XEXP (x, 1);
	  enum rtx_code new_code;

	  if (GET_CODE (op0) == COMPARE)
	    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);

	  /* Simplify our comparison, if possible.  */
	  new_code = simplify_comparison (code, &op0, &op1);

	  /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
	     if only the low-order bit is possibly nonzero in X (such as when
	     X is a ZERO_EXTRACT of one bit).  Similarly, we can convert EQ to
	     (xor X 1) or (minus 1 X); we use the former.  Finally, if X is
	     known to be either 0 or -1, NE becomes a NEG and EQ becomes
	     (plus X 1).

	     Remove any ZERO_EXTRACT we made when thinking this was a
	     comparison.  It may now be simpler to use, e.g., an AND.  If a
	     ZERO_EXTRACT is indeed appropriate, it will be placed back by
	     the call to make_compound_operation in the SET case.  */

	  if (STORE_FLAG_VALUE == 1
	      && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
	      && op1 == const0_rtx
	      && mode == GET_MODE (op0)
	      && nonzero_bits (op0, mode) == 1)
	    return gen_lowpart (mode,
				expand_compound_operation (op0));

	  else if (STORE_FLAG_VALUE == 1
		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
		   && op1 == const0_rtx
		   && mode == GET_MODE (op0)
		   && (num_sign_bit_copies (op0, mode)
		       == GET_MODE_BITSIZE (mode)))
	    {
	      op0 = expand_compound_operation (op0);
	      return simplify_gen_unary (NEG, mode,
					 gen_lowpart (mode, op0),
					 mode);
	    }

	  else if (STORE_FLAG_VALUE == 1
		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
		   && op1 == const0_rtx
		   && mode == GET_MODE (op0)
		   && nonzero_bits (op0, mode) == 1)
	    {
	      op0 = expand_compound_operation (op0);
	      return simplify_gen_binary (XOR, mode,
					  gen_lowpart (mode, op0),
					  const1_rtx);
	    }

	  else if (STORE_FLAG_VALUE == 1
		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
		   && op1 == const0_rtx
		   && mode == GET_MODE (op0)
		   && (num_sign_bit_copies (op0, mode)
		       == GET_MODE_BITSIZE (mode)))
	    {
	      op0 = expand_compound_operation (op0);
	      return plus_constant (gen_lowpart (mode, op0), 1);
	    }

	  /* If STORE_FLAG_VALUE is -1, we have cases similar to
	     those above.  */
	  if (STORE_FLAG_VALUE == -1
	      && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
	      && op1 == const0_rtx
	      && (num_sign_bit_copies (op0, mode)
		  == GET_MODE_BITSIZE (mode)))
	    return gen_lowpart (mode,
				expand_compound_operation (op0));

	  else if (STORE_FLAG_VALUE == -1
		   && new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
		   && op1 == const0_rtx
		   && mode == GET_MODE (op0)
		   && nonzero_bits (op0, mode) == 1)
	    {
	      op0 = expand_compound_operation (op0);
	      return simplify_gen_unary (NEG, mode,
					 gen_lowpart (mode, op0),
					 mode);
	    }

	  else if (STORE_FLAG_VALUE == -1
		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
		   && op1 == const0_rtx
		   && mode == GET_MODE (op0)
		   && (num_sign_bit_copies (op0, mode)
		       == GET_MODE_BITSIZE (mode)))
	    {
	      op0 = expand_compound_operation (op0);
	      return simplify_gen_unary (NOT, mode,
					 gen_lowpart (mode, op0),
					 mode);
	    }

	  /* If X is 0/1, (eq X 0) is X-1.  */
	  else if (STORE_FLAG_VALUE == -1
		   && new_code == EQ && GET_MODE_CLASS (mode) == MODE_INT
		   && op1 == const0_rtx
		   && mode == GET_MODE (op0)
		   && nonzero_bits (op0, mode) == 1)
	    {
	      op0 = expand_compound_operation (op0);
	      return plus_constant (gen_lowpart (mode, op0), -1);
	    }

	  /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
	     one bit that might be nonzero, we can convert (ne x 0) to
	     (ashift x c) where C puts the bit in the sign bit.  Remove any
	     AND with STORE_FLAG_VALUE when we are done, since we are only
	     going to test the sign bit.  */
	  if (new_code == NE && GET_MODE_CLASS (mode) == MODE_INT
	      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	      && ((STORE_FLAG_VALUE & GET_MODE_MASK (mode))
		  == (unsigned HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (mode) - 1))
	      && op1 == const0_rtx
	      && mode == GET_MODE (op0)
	      && (i = exact_log2 (nonzero_bits (op0, mode))) >= 0)
	    {
	      x = simplify_shift_const (NULL_RTX, ASHIFT, mode,
					expand_compound_operation (op0),
					GET_MODE_BITSIZE (mode) - 1 - i);
	      if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
		return XEXP (x, 0);
	      else
		return x;
	    }

	  /* If the code changed, return a whole new comparison.  */
	  if (new_code != code)
	    return gen_rtx_fmt_ee (new_code, mode, op0, op1);

	  /* Otherwise, keep this operation, but maybe change its operands.
	     This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR).  */
	  SUBST (XEXP (x, 0), op0);
	  SUBST (XEXP (x, 1), op1);
	}
      break;

    case IF_THEN_ELSE:
      return simplify_if_then_else (x);

    case ZERO_EXTRACT:
    case SIGN_EXTRACT:
    case ZERO_EXTEND:
    case SIGN_EXTEND:
      /* If we are processing SET_DEST, we are done.  */
      if (in_dest)
	return x;

      return expand_compound_operation (x);

    case SET:
      return simplify_set (x);

    case AND:
    case IOR:
      return simplify_logical (x);

    case ASHIFT:
    case LSHIFTRT:
    case ASHIFTRT:
    case ROTATE:
    case ROTATERT:
      /* If this is a shift by a constant amount, simplify it.  */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
	return simplify_shift_const (x, code, mode, XEXP (x, 0),
				     INTVAL (XEXP (x, 1)));

      else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
	SUBST (XEXP (x, 1),
	       force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
			      ((HOST_WIDE_INT) 1
			       << exact_log2 (GET_MODE_BITSIZE (GET_MODE (x))))
			      - 1,
			      0));
      break;

    default:
      break;
    }

  return x;
}

/* Simplify X, an IF_THEN_ELSE expression.  Return the new expression.  */

static rtx
simplify_if_then_else (rtx x)
{
  enum machine_mode mode = GET_MODE (x);
  rtx cond = XEXP (x, 0);
  rtx true_rtx = XEXP (x, 1);
  rtx false_rtx = XEXP (x, 2);
  enum rtx_code true_code = GET_CODE (cond);
  int comparison_p = COMPARISON_P (cond);
  rtx temp;
  int i;
  enum rtx_code false_code;
  rtx reversed;

  /* Simplify storing of the truth value.  */
  if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
    return simplify_gen_relational (true_code, mode, VOIDmode,
				    XEXP (cond, 0), XEXP (cond, 1));

  /* Also when the truth value has to be reversed.  */
  if (comparison_p
      && true_rtx == const0_rtx && false_rtx == const_true_rtx
      && (reversed = reversed_comparison (cond, mode)))
    return reversed;

  /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
     in it is being compared against certain values.  Get the true and false
     comparisons and see if that says anything about the value of each arm.  */

  if (comparison_p
      && ((false_code = reversed_comparison_code (cond, NULL))
	  != UNKNOWN)
      && REG_P (XEXP (cond, 0)))
    {
      HOST_WIDE_INT nzb;
      rtx from = XEXP (cond, 0);
      rtx true_val = XEXP (cond, 1);
      rtx false_val = true_val;
      int swapped = 0;

      /* If FALSE_CODE is EQ, swap the codes and arms.  */

      if (false_code == EQ)
	{
	  swapped = 1, true_code = EQ, false_code = NE;
	  temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
	}

      /* If we are comparing against zero and the expression being tested has
	 only a single bit that might be nonzero, that is its value when it is
	 not equal to zero.  Similarly if it is known to be -1 or 0.  */

      if (true_code == EQ && true_val == const0_rtx
	  && exact_log2 (nzb = nonzero_bits (from, GET_MODE (from))) >= 0)
	{
	  false_code = EQ;
	  false_val = GEN_INT (trunc_int_for_mode (nzb, GET_MODE (from)));
	}
      else if (true_code == EQ && true_val == const0_rtx
	       && (num_sign_bit_copies (from, GET_MODE (from))
		   == GET_MODE_BITSIZE (GET_MODE (from))))
	{
	  false_code = EQ;
	  false_val = constm1_rtx;
	}

      /* Now simplify an arm if we know the value of the register in the
	 branch and it is used in the arm.  Be careful due to the potential
	 of locally-shared RTL.  */

      if (reg_mentioned_p (from, true_rtx))
	true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
				      from, true_val),
		      pc_rtx, pc_rtx, 0, 0);
      if (reg_mentioned_p (from, false_rtx))
	false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
				   from, false_val),
		       pc_rtx, pc_rtx, 0, 0);

      SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
      SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);

      true_rtx = XEXP (x, 1);
      false_rtx = XEXP (x, 2);
      true_code = GET_CODE (cond);
    }

  /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
     reversed, do so to avoid needing two sets of patterns for
     subtract-and-branch insns.  Similarly if we have a constant in the true
     arm, the false arm is the same as the first operand of the comparison, or
     the false arm is more complicated than the true arm.  */

  if (comparison_p
      && reversed_comparison_code (cond, NULL) != UNKNOWN
      && (true_rtx == pc_rtx
	  || (CONSTANT_P (true_rtx)
	      && GET_CODE (false_rtx) != CONST_INT && false_rtx != pc_rtx)
	  || true_rtx == const0_rtx
	  || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
	  || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
	      && !OBJECT_P (false_rtx))
	  || reg_mentioned_p (true_rtx, false_rtx)
	  || rtx_equal_p (false_rtx, XEXP (cond, 0))))
    {
      true_code = reversed_comparison_code (cond, NULL);
      SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
      SUBST (XEXP (x, 1), false_rtx);
      SUBST (XEXP (x, 2), true_rtx);

      temp = true_rtx, true_rtx = false_rtx, false_rtx = temp;
      cond = XEXP (x, 0);

      /* It is possible that the conditional has been simplified out.  */
      true_code = GET_CODE (cond);
      comparison_p = COMPARISON_P (cond);
    }

  /* If the two arms are identical, we don't need the comparison.  */

  if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
    return true_rtx;

  /* Convert a == b ? b : a to "a".  */
  if (true_code == EQ && ! side_effects_p (cond)
      && !HONOR_NANS (mode)
      && rtx_equal_p (XEXP (cond, 0), false_rtx)
      && rtx_equal_p (XEXP (cond, 1), true_rtx))
    return false_rtx;
  else if (true_code == NE && ! side_effects_p (cond)
	   && !HONOR_NANS (mode)
	   && rtx_equal_p (XEXP (cond, 0), true_rtx)
	   && rtx_equal_p (XEXP (cond, 1), false_rtx))
    return true_rtx;

  /* Look for cases where we have (abs x) or (neg (abs X)).  */

  if (GET_MODE_CLASS (mode) == MODE_INT
      && comparison_p
      && XEXP (cond, 1) == const0_rtx
      && GET_CODE (false_rtx) == NEG
      && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
      && rtx_equal_p (true_rtx, XEXP (cond, 0))
      && ! side_effects_p (true_rtx))
    switch (true_code)
      {
      case GT:
      case GE:
	return simplify_gen_unary (ABS, mode, true_rtx, mode);
      case LT:
      case LE:
	return
	  simplify_gen_unary (NEG, mode,
			      simplify_gen_unary (ABS, mode, true_rtx, mode),
			      mode);
      default:
	break;
      }

  /* Look for MIN or MAX.  */

  if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
      && comparison_p
      && rtx_equal_p (XEXP (cond, 0), true_rtx)
      && rtx_equal_p (XEXP (cond, 1), false_rtx)
      && ! side_effects_p (cond))
    switch (true_code)
      {
      case GE:
      case GT:
	return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
      case LE:
      case LT:
	return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
      case GEU:
      case GTU:
	return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
      case LEU:
      case LTU:
	return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
      default:
	break;
      }

  /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
     second operand is zero, this can be done as (OP Z (mult COND C2)) where
     C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
     SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
     We can do this kind of thing in some cases when STORE_FLAG_VALUE is
     neither 1 or -1, but it isn't worth checking for.  */

  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
      && comparison_p
      && GET_MODE_CLASS (mode) == MODE_INT
      && ! side_effects_p (x))
    {
      rtx t = make_compound_operation (true_rtx, SET);
      rtx f = make_compound_operation (false_rtx, SET);
      rtx cond_op0 = XEXP (cond, 0);
      rtx cond_op1 = XEXP (cond, 1);
      enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
      enum machine_mode m = mode;
      rtx z = 0, c1 = NULL_RTX;

      if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
	   || GET_CODE (t) == IOR || GET_CODE (t) == XOR
	   || GET_CODE (t) == ASHIFT
	   || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
	  && rtx_equal_p (XEXP (t, 0), f))
	c1 = XEXP (t, 1), op = GET_CODE (t), z = f;

      /* If an identity-zero op is commutative, check whether there
	 would be a match if we swapped the operands.  */
      else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
		|| GET_CODE (t) == XOR)
	       && rtx_equal_p (XEXP (t, 1), f))
	c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
      else if (GET_CODE (t) == SIGN_EXTEND
	       && (GET_CODE (XEXP (t, 0)) == PLUS
		   || GET_CODE (XEXP (t, 0)) == MINUS
		   || GET_CODE (XEXP (t, 0)) == IOR
		   || GET_CODE (XEXP (t, 0)) == XOR
		   || GET_CODE (XEXP (t, 0)) == ASHIFT
		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
	       && (num_sign_bit_copies (f, GET_MODE (f))
		   > (unsigned int)
		     (GET_MODE_BITSIZE (mode)
		      - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 0))))))
	{
	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
	  extend_op = SIGN_EXTEND;
	  m = GET_MODE (XEXP (t, 0));
	}
      else if (GET_CODE (t) == SIGN_EXTEND
	       && (GET_CODE (XEXP (t, 0)) == PLUS
		   || GET_CODE (XEXP (t, 0)) == IOR
		   || GET_CODE (XEXP (t, 0)) == XOR)
	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
	       && (num_sign_bit_copies (f, GET_MODE (f))
		   > (unsigned int)
		     (GET_MODE_BITSIZE (mode)
		      - GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (t, 0), 1))))))
	{
	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
	  extend_op = SIGN_EXTEND;
	  m = GET_MODE (XEXP (t, 0));
	}
      else if (GET_CODE (t) == ZERO_EXTEND
	       && (GET_CODE (XEXP (t, 0)) == PLUS
		   || GET_CODE (XEXP (t, 0)) == MINUS
		   || GET_CODE (XEXP (t, 0)) == IOR
		   || GET_CODE (XEXP (t, 0)) == XOR
		   || GET_CODE (XEXP (t, 0)) == ASHIFT
		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
	       && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
	       && ((nonzero_bits (f, GET_MODE (f))
		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 0))))
		   == 0))
	{
	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
	  extend_op = ZERO_EXTEND;
	  m = GET_MODE (XEXP (t, 0));
	}
      else if (GET_CODE (t) == ZERO_EXTEND
	       && (GET_CODE (XEXP (t, 0)) == PLUS
		   || GET_CODE (XEXP (t, 0)) == IOR
		   || GET_CODE (XEXP (t, 0)) == XOR)
	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
	       && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
	       && ((nonzero_bits (f, GET_MODE (f))
		    & ~GET_MODE_MASK (GET_MODE (XEXP (XEXP (t, 0), 1))))
		   == 0))
	{
	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
	  extend_op = ZERO_EXTEND;
	  m = GET_MODE (XEXP (t, 0));
	}

      if (z)
	{
	  temp = subst (simplify_gen_relational (true_code, m, VOIDmode,
						 cond_op0, cond_op1),
			pc_rtx, pc_rtx, 0, 0);
	  temp = simplify_gen_binary (MULT, m, temp,
				      simplify_gen_binary (MULT, m, c1,
							   const_true_rtx));
	  temp = subst (temp, pc_rtx, pc_rtx, 0, 0);
	  temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);

	  if (extend_op != UNKNOWN)
	    temp = simplify_gen_unary (extend_op, mode, temp, m);

	  return temp;
	}
    }

  /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
     1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
     negation of a single bit, we can convert this operation to a shift.  We
     can actually do this more generally, but it doesn't seem worth it.  */

  if (true_code == NE && XEXP (cond, 1) == const0_rtx
      && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT
      && ((1 == nonzero_bits (XEXP (cond, 0), mode)
	   && (i = exact_log2 (INTVAL (true_rtx))) >= 0)
	  || ((num_sign_bit_copies (XEXP (cond, 0), mode)
	       == GET_MODE_BITSIZE (mode))
	      && (i = exact_log2 (-INTVAL (true_rtx))) >= 0)))
    return
      simplify_shift_const (NULL_RTX, ASHIFT, mode,
			    gen_lowpart (mode, XEXP (cond, 0)), i);

  /* (IF_THEN_ELSE (NE REG 0) (0) (8)) is REG for nonzero_bits (REG) == 8.  */
  if (true_code == NE && XEXP (cond, 1) == const0_rtx
      && false_rtx == const0_rtx && GET_CODE (true_rtx) == CONST_INT
      && GET_MODE (XEXP (cond, 0)) == mode
      && (INTVAL (true_rtx) & GET_MODE_MASK (mode))
	  == nonzero_bits (XEXP (cond, 0), mode)
      && (i = exact_log2 (INTVAL (true_rtx) & GET_MODE_MASK (mode))) >= 0)
    return XEXP (cond, 0);

  return x;
}

/* Simplify X, a SET expression.  Return the new expression.  */

static rtx
simplify_set (rtx x)
{
  rtx src = SET_SRC (x);
  rtx dest = SET_DEST (x);
  enum machine_mode mode
    = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
  rtx other_insn;
  rtx *cc_use;

  /* (set (pc) (return)) gets written as (return).  */
  if (GET_CODE (dest) == PC && GET_CODE (src) == RETURN)
    return src;

  /* Now that we know for sure which bits of SRC we are using, see if we can
     simplify the expression for the object knowing that we only need the
     low-order bits.  */

  if (GET_MODE_CLASS (mode) == MODE_INT
      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
    {
      src = force_to_mode (src, mode, ~(HOST_WIDE_INT) 0, 0);
      SUBST (SET_SRC (x), src);
    }

  /* If we are setting CC0 or if the source is a COMPARE, look for the use of
     the comparison result and try to simplify it unless we already have used
     undobuf.other_insn.  */
  if ((GET_MODE_CLASS (mode) == MODE_CC
       || GET_CODE (src) == COMPARE
       || CC0_P (dest))
      && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
      && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
      && COMPARISON_P (*cc_use)
      && rtx_equal_p (XEXP (*cc_use, 0), dest))
    {
      enum rtx_code old_code = GET_CODE (*cc_use);
      enum rtx_code new_code;
      rtx op0, op1, tmp;
      int other_changed = 0;
      enum machine_mode compare_mode = GET_MODE (dest);

      if (GET_CODE (src) == COMPARE)
	op0 = XEXP (src, 0), op1 = XEXP (src, 1);
      else
	op0 = src, op1 = CONST0_RTX (GET_MODE (src));

      tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
					   op0, op1);
      if (!tmp)
	new_code = old_code;
      else if (!CONSTANT_P (tmp))
	{
	  new_code = GET_CODE (tmp);
	  op0 = XEXP (tmp, 0);
	  op1 = XEXP (tmp, 1);
	}
      else
	{
	  rtx pat = PATTERN (other_insn);
	  undobuf.other_insn = other_insn;
	  SUBST (*cc_use, tmp);

	  /* Attempt to simplify CC user.  */
	  if (GET_CODE (pat) == SET)
	    {
	      rtx new = simplify_rtx (SET_SRC (pat));
	      if (new != NULL_RTX)
		SUBST (SET_SRC (pat), new);
	    }

	  /* Convert X into a no-op move.  */
	  SUBST (SET_DEST (x), pc_rtx);
	  SUBST (SET_SRC (x), pc_rtx);
	  return x;
	}

      /* Simplify our comparison, if possible.  */
      new_code = simplify_comparison (new_code, &op0, &op1);

#ifdef SELECT_CC_MODE
      /* If this machine has CC modes other than CCmode, check to see if we
	 need to use a different CC mode here.  */
      if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
	compare_mode = GET_MODE (op0);
      else
	compare_mode = SELECT_CC_MODE (new_code, op0, op1);

#ifndef HAVE_cc0
      /* If the mode changed, we have to change SET_DEST, the mode in the
	 compare, and the mode in the place SET_DEST is used.  If SET_DEST is
	 a hard register, just build new versions with the proper mode.  If it
	 is a pseudo, we lose unless it is only time we set the pseudo, in
	 which case we can safely change its mode.  */
      if (compare_mode != GET_MODE (dest))
	{
	  if (can_change_dest_mode (dest, 0, compare_mode))
	    {
	      unsigned int regno = REGNO (dest);
	      rtx new_dest;

	      if (regno < FIRST_PSEUDO_REGISTER)
		new_dest = gen_rtx_REG (compare_mode, regno);
	      else
		{
		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
		  new_dest = regno_reg_rtx[regno];
		}

	      SUBST (SET_DEST (x), new_dest);
	      SUBST (XEXP (*cc_use, 0), new_dest);
	      other_changed = 1;

	      dest = new_dest;
	    }
	}
#endif  /* cc0 */
#endif  /* SELECT_CC_MODE */

      /* If the code changed, we have to build a new comparison in
	 undobuf.other_insn.  */
      if (new_code != old_code)
	{
	  int other_changed_previously = other_changed;
	  unsigned HOST_WIDE_INT mask;

	  SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
					  dest, const0_rtx));
	  other_changed = 1;

	  /* If the only change we made was to change an EQ into an NE or
	     vice versa, OP0 has only one bit that might be nonzero, and OP1
	     is zero, check if changing the user of the condition code will
	     produce a valid insn.  If it won't, we can keep the original code
	     in that insn by surrounding our operation with an XOR.  */

	  if (((old_code == NE && new_code == EQ)
	       || (old_code == EQ && new_code == NE))
	      && ! other_changed_previously && op1 == const0_rtx
	      && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT
	      && exact_log2 (mask = nonzero_bits (op0, GET_MODE (op0))) >= 0)
	    {
	      rtx pat = PATTERN (other_insn), note = 0;

	      if ((recog_for_combine (&pat, other_insn, &note) < 0
		   && ! check_asm_operands (pat)))
		{
		  PUT_CODE (*cc_use, old_code);
		  other_changed = 0;

		  op0 = simplify_gen_binary (XOR, GET_MODE (op0),
					     op0, GEN_INT (mask));
		}
	    }
	}

      if (other_changed)
	undobuf.other_insn = other_insn;

#ifdef HAVE_cc0
      /* If we are now comparing against zero, change our source if
	 needed.  If we do not use cc0, we always have a COMPARE.  */
      if (op1 == const0_rtx && dest == cc0_rtx)
	{
	  SUBST (SET_SRC (x), op0);
	  src = op0;
	}
      else
#endif

      /* Otherwise, if we didn't previously have a COMPARE in the
	 correct mode, we need one.  */
      if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode)
	{
	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
	  src = SET_SRC (x);
	}
      else if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
	{
	  SUBST (SET_SRC (x), op0);
	  src = SET_SRC (x);
	}
      /* Otherwise, update the COMPARE if needed.  */
      else if (XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
	{
	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
	  src = SET_SRC (x);
	}
    }
  else
    {
      /* Get SET_SRC in a form where we have placed back any
	 compound expressions.  Then do the checks below.  */
      src = make_compound_operation (src, SET);
      SUBST (SET_SRC (x), src);
    }

  /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
     and X being a REG or (subreg (reg)), we may be able to convert this to
     (set (subreg:m2 x) (op)).

     We can always do this if M1 is narrower than M2 because that means that
     we only care about the low bits of the result.

     However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
     perform a narrower operation than requested since the high-order bits will
     be undefined.  On machine where it is defined, this transformation is safe
     as long as M1 and M2 have the same number of words.  */

  if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
      && !OBJECT_P (SUBREG_REG (src))
      && (((GET_MODE_SIZE (GET_MODE (src)) + (UNITS_PER_WORD - 1))
	   / UNITS_PER_WORD)
	  == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))
	       + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD))
#ifndef WORD_REGISTER_OPERATIONS
      && (GET_MODE_SIZE (GET_MODE (src))
	< GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
#endif
#ifdef CANNOT_CHANGE_MODE_CLASS
      && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
	    && REG_CANNOT_CHANGE_MODE_P (REGNO (dest),
					 GET_MODE (SUBREG_REG (src)),
					 GET_MODE (src)))
#endif
      && (REG_P (dest)
	  || (GET_CODE (dest) == SUBREG
	      && REG_P (SUBREG_REG (dest)))))
    {
      SUBST (SET_DEST (x),
	     gen_lowpart (GET_MODE (SUBREG_REG (src)),
				      dest));
      SUBST (SET_SRC (x), SUBREG_REG (src));

      src = SET_SRC (x), dest = SET_DEST (x);
    }

#ifdef HAVE_cc0
  /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
     in SRC.  */
  if (dest == cc0_rtx
      && GET_CODE (src) == SUBREG
      && subreg_lowpart_p (src)
      && (GET_MODE_BITSIZE (GET_MODE (src))
	  < GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (src)))))
    {
      rtx inner = SUBREG_REG (src);
      enum machine_mode inner_mode = GET_MODE (inner);

      /* Here we make sure that we don't have a sign bit on.  */
      if (GET_MODE_BITSIZE (inner_mode) <= HOST_BITS_PER_WIDE_INT
	  && (nonzero_bits (inner, inner_mode)
	      < ((unsigned HOST_WIDE_INT) 1
		 << (GET_MODE_BITSIZE (GET_MODE (src)) - 1))))
	{
	  SUBST (SET_SRC (x), inner);
	  src = SET_SRC (x);
	}
    }
#endif

#ifdef LOAD_EXTEND_OP
  /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
     would require a paradoxical subreg.  Replace the subreg with a
     zero_extend to avoid the reload that would otherwise be required.  */

  if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
      && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))) != UNKNOWN
      && SUBREG_BYTE (src) == 0
      && (GET_MODE_SIZE (GET_MODE (src))
	  > GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))))
      && MEM_P (SUBREG_REG (src)))
    {
      SUBST (SET_SRC (x),
	     gen_rtx_fmt_e (LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (src))),
			    GET_MODE (src), SUBREG_REG (src)));

      src = SET_SRC (x);
    }
#endif

  /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
     are comparing an item known to be 0 or -1 against 0, use a logical
     operation instead. Check for one of the arms being an IOR of the other
     arm with some value.  We compute three terms to be IOR'ed together.  In
     practice, at most two will be nonzero.  Then we do the IOR's.  */

  if (GET_CODE (dest) != PC
      && GET_CODE (src) == IF_THEN_ELSE
      && GET_MODE_CLASS (GET_MODE (src)) == MODE_INT
      && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
      && XEXP (XEXP (src, 0), 1) == const0_rtx
      && GET_MODE (src) == GET_MODE (XEXP (XEXP (src, 0), 0))
#ifdef HAVE_conditional_move
      && ! can_conditionally_move_p (GET_MODE (src))
#endif
      && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0),
			       GET_MODE (XEXP (XEXP (src, 0), 0)))
	  == GET_MODE_BITSIZE (GET_MODE (XEXP (XEXP (src, 0), 0))))
      && ! side_effects_p (src))
    {
      rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
		      ? XEXP (src, 1) : XEXP (src, 2));
      rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
		   ? XEXP (src, 2) : XEXP (src, 1));
      rtx term1 = const0_rtx, term2, term3;

      if (GET_CODE (true_rtx) == IOR
	  && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
	term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
      else if (GET_CODE (true_rtx) == IOR
	       && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
	term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
      else if (GET_CODE (false_rtx) == IOR
	       && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
	term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
      else if (GET_CODE (false_rtx) == IOR
	       && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
	term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;

      term2 = simplify_gen_binary (AND, GET_MODE (src),
				   XEXP (XEXP (src, 0), 0), true_rtx);
      term3 = simplify_gen_binary (AND, GET_MODE (src),
				   simplify_gen_unary (NOT, GET_MODE (src),
						       XEXP (XEXP (src, 0), 0),
						       GET_MODE (src)),
				   false_rtx);

      SUBST (SET_SRC (x),
	     simplify_gen_binary (IOR, GET_MODE (src),
				  simplify_gen_binary (IOR, GET_MODE (src),
						       term1, term2),
				  term3));

      src = SET_SRC (x);
    }

  /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
     whole thing fail.  */
  if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
    return src;
  else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
    return dest;
  else
    /* Convert this into a field assignment operation, if possible.  */
    return make_field_assignment (x);
}

/* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
   result.  */

static rtx
simplify_logical (rtx x)
{
  enum machine_mode mode = GET_MODE (x);
  rtx op0 = XEXP (x, 0);
  rtx op1 = XEXP (x, 1);

  switch (GET_CODE (x))
    {
    case AND:
      /* We can call simplify_and_const_int only if we don't lose
	 any (sign) bits when converting INTVAL (op1) to
	 "unsigned HOST_WIDE_INT".  */
      if (GET_CODE (op1) == CONST_INT
	  && (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	      || INTVAL (op1) > 0))
	{
	  x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
	  if (GET_CODE (x) != AND)
	    return x;

	  op0 = XEXP (x, 0);
	  op1 = XEXP (x, 1);
	}

      /* If we have any of (and (ior A B) C) or (and (xor A B) C),
	 apply the distributive law and then the inverse distributive
	 law to see if things simplify.  */
      if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
	{
	  rtx result = distribute_and_simplify_rtx (x, 0);
	  if (result)
	    return result;
	}
      if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
	{
	  rtx result = distribute_and_simplify_rtx (x, 1);
	  if (result)
	    return result;
	}
      break;

    case IOR:
      /* If we have (ior (and A B) C), apply the distributive law and then
	 the inverse distributive law to see if things simplify.  */

      if (GET_CODE (op0) == AND)
	{
	  rtx result = distribute_and_simplify_rtx (x, 0);
	  if (result)
	    return result;
	}

      if (GET_CODE (op1) == AND)
	{
	  rtx result = distribute_and_simplify_rtx (x, 1);
	  if (result)
	    return result;
	}
      break;

    default:
      gcc_unreachable ();
    }

  return x;
}

/* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
   operations" because they can be replaced with two more basic operations.
   ZERO_EXTEND is also considered "compound" because it can be replaced with
   an AND operation, which is simpler, though only one operation.

   The function expand_compound_operation is called with an rtx expression
   and will convert it to the appropriate shifts and AND operations,
   simplifying at each stage.

   The function make_compound_operation is called to convert an expression
   consisting of shifts and ANDs into the equivalent compound expression.
   It is the inverse of this function, loosely speaking.  */

static rtx
expand_compound_operation (rtx x)
{
  unsigned HOST_WIDE_INT pos = 0, len;
  int unsignedp = 0;
  unsigned int modewidth;
  rtx tem;

  switch (GET_CODE (x))
    {
    case ZERO_EXTEND:
      unsignedp = 1;
    case SIGN_EXTEND:
      /* We can't necessarily use a const_int for a multiword mode;
	 it depends on implicitly extending the value.
	 Since we don't know the right way to extend it,
	 we can't tell whether the implicit way is right.

	 Even for a mode that is no wider than a const_int,
	 we can't win, because we need to sign extend one of its bits through
	 the rest of it, and we don't know which bit.  */
      if (GET_CODE (XEXP (x, 0)) == CONST_INT)
	return x;

      /* Return if (subreg:MODE FROM 0) is not a safe replacement for
	 (zero_extend:MODE FROM) or (sign_extend:MODE FROM).  It is for any MEM
	 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
	 reloaded. If not for that, MEM's would very rarely be safe.

	 Reject MODEs bigger than a word, because we might not be able
	 to reference a two-register group starting with an arbitrary register
	 (and currently gen_lowpart might crash for a SUBREG).  */

      if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) > UNITS_PER_WORD)
	return x;

      /* Reject MODEs that aren't scalar integers because turning vector
	 or complex modes into shifts causes problems.  */

      if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
	return x;

      len = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)));
      /* If the inner object has VOIDmode (the only way this can happen
	 is if it is an ASM_OPERANDS), we can't do anything since we don't
	 know how much masking to do.  */
      if (len == 0)
	return x;

      break;

    case ZERO_EXTRACT:
      unsignedp = 1;

      /* ... fall through ...  */

    case SIGN_EXTRACT:
      /* If the operand is a CLOBBER, just return it.  */
      if (GET_CODE (XEXP (x, 0)) == CLOBBER)
	return XEXP (x, 0);

      if (GET_CODE (XEXP (x, 1)) != CONST_INT
	  || GET_CODE (XEXP (x, 2)) != CONST_INT
	  || GET_MODE (XEXP (x, 0)) == VOIDmode)
	return x;

      /* Reject MODEs that aren't scalar integers because turning vector
	 or complex modes into shifts causes problems.  */

      if (! SCALAR_INT_MODE_P (GET_MODE (XEXP (x, 0))))
	return x;

      len = INTVAL (XEXP (x, 1));
      pos = INTVAL (XEXP (x, 2));

      /* This should stay within the object being extracted, fail otherwise.  */
      if (len + pos > GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))))
	return x;

      if (BITS_BIG_ENDIAN)
	pos = GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - len - pos;

      break;

    default:
      return x;
    }
  /* Convert sign extension to zero extension, if we know that the high
     bit is not set, as this is easier to optimize.  It will be converted
     back to cheaper alternative in make_extraction.  */
  if (GET_CODE (x) == SIGN_EXTEND
      && (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
	  && ((nonzero_bits (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
		& ~(((unsigned HOST_WIDE_INT)
		      GET_MODE_MASK (GET_MODE (XEXP (x, 0))))
		     >> 1))
	       == 0)))
    {
      rtx temp = gen_rtx_ZERO_EXTEND (GET_MODE (x), XEXP (x, 0));
      rtx temp2 = expand_compound_operation (temp);

      /* Make sure this is a profitable operation.  */
      if (rtx_cost (x, SET) > rtx_cost (temp2, SET))
       return temp2;
      else if (rtx_cost (x, SET) > rtx_cost (temp, SET))
       return temp;
      else
       return x;
    }

  /* We can optimize some special cases of ZERO_EXTEND.  */
  if (GET_CODE (x) == ZERO_EXTEND)
    {
      /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
	 know that the last value didn't have any inappropriate bits
	 set.  */
      if (GET_CODE (XEXP (x, 0)) == TRUNCATE
	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
	  && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
	  && (nonzero_bits (XEXP (XEXP (x, 0), 0), GET_MODE (x))
	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
	return XEXP (XEXP (x, 0), 0);

      /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
      if (GET_CODE (XEXP (x, 0)) == SUBREG
	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
	  && subreg_lowpart_p (XEXP (x, 0))
	  && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
	  && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), GET_MODE (x))
	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
	return SUBREG_REG (XEXP (x, 0));

      /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
	 is a comparison and STORE_FLAG_VALUE permits.  This is like
	 the first case, but it works even when GET_MODE (x) is larger
	 than HOST_WIDE_INT.  */
      if (GET_CODE (XEXP (x, 0)) == TRUNCATE
	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == GET_MODE (x)
	  && COMPARISON_P (XEXP (XEXP (x, 0), 0))
	  && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
	      <= HOST_BITS_PER_WIDE_INT)
	  && ((HOST_WIDE_INT) STORE_FLAG_VALUE
	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
	return XEXP (XEXP (x, 0), 0);

      /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
      if (GET_CODE (XEXP (x, 0)) == SUBREG
	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == GET_MODE (x)
	  && subreg_lowpart_p (XEXP (x, 0))
	  && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
	  && (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
	      <= HOST_BITS_PER_WIDE_INT)
	  && ((HOST_WIDE_INT) STORE_FLAG_VALUE
	      & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))) == 0)
	return SUBREG_REG (XEXP (x, 0));

    }

  /* If we reach here, we want to return a pair of shifts.  The inner
     shift is a left shift of BITSIZE - POS - LEN bits.  The outer
     shift is a right shift of BITSIZE - LEN bits.  It is arithmetic or
     logical depending on the value of UNSIGNEDP.

     If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
     converted into an AND of a shift.

     We must check for the case where the left shift would have a negative
     count.  This can happen in a case like (x >> 31) & 255 on machines
     that can't shift by a constant.  On those machines, we would first
     combine the shift with the AND to produce a variable-position
     extraction.  Then the constant of 31 would be substituted in to produce
     a such a position.  */

  modewidth = GET_MODE_BITSIZE (GET_MODE (x));
  if (modewidth + len >= pos)
    {
      enum machine_mode mode = GET_MODE (x);
      tem = gen_lowpart (mode, XEXP (x, 0));
      if (!tem || GET_CODE (tem) == CLOBBER)
	return x;
      tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
				  tem, modewidth - pos - len);
      tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
				  mode, tem, modewidth - len);
    }
  else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
    tem = simplify_and_const_int (NULL_RTX, GET_MODE (x),
				  simplify_shift_const (NULL_RTX, LSHIFTRT,
							GET_MODE (x),
							XEXP (x, 0), pos),
				  ((HOST_WIDE_INT) 1 << len) - 1);
  else
    /* Any other cases we can't handle.  */
    return x;

  /* If we couldn't do this for some reason, return the original
     expression.  */
  if (GET_CODE (tem) == CLOBBER)
    return x;

  return tem;
}

/* X is a SET which contains an assignment of one object into
   a part of another (such as a bit-field assignment, STRICT_LOW_PART,
   or certain SUBREGS). If possible, convert it into a series of
   logical operations.

   We half-heartedly support variable positions, but do not at all
   support variable lengths.  */

static const_rtx
expand_field_assignment (const_rtx x)
{
  rtx inner;
  rtx pos;			/* Always counts from low bit.  */
  int len;
  rtx mask, cleared, masked;
  enum machine_mode compute_mode;

  /* Loop until we find something we can't simplify.  */
  while (1)
    {
      if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
	  && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
	{
	  inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
	  len = GET_MODE_BITSIZE (GET_MODE (XEXP (SET_DEST (x), 0)));
	  pos = GEN_INT (subreg_lsb (XEXP (SET_DEST (x), 0)));
	}
      else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
	       && GET_CODE (XEXP (SET_DEST (x), 1)) == CONST_INT)
	{
	  inner = XEXP (SET_DEST (x), 0);
	  len = INTVAL (XEXP (SET_DEST (x), 1));
	  pos = XEXP (SET_DEST (x), 2);

	  /* A constant position should stay within the width of INNER.  */
	  if (GET_CODE (pos) == CONST_INT
	      && INTVAL (pos) + len > GET_MODE_BITSIZE (GET_MODE (inner)))
	    break;

	  if (BITS_BIG_ENDIAN)
	    {
	      if (GET_CODE (pos) == CONST_INT)
		pos = GEN_INT (GET_MODE_BITSIZE (GET_MODE (inner)) - len
			       - INTVAL (pos));
	      else if (GET_CODE (pos) == MINUS
		       && GET_CODE (XEXP (pos, 1)) == CONST_INT
		       && (INTVAL (XEXP (pos, 1))
			   == GET_MODE_BITSIZE (GET_MODE (inner)) - len))
		/* If position is ADJUST - X, new position is X.  */
		pos = XEXP (pos, 0);
	      else
		pos = simplify_gen_binary (MINUS, GET_MODE (pos),
					   GEN_INT (GET_MODE_BITSIZE (
						    GET_MODE (inner))
						    - len),
					   pos);
	    }
	}

      /* A SUBREG between two modes that occupy the same numbers of words
	 can be done by moving the SUBREG to the source.  */
      else if (GET_CODE (SET_DEST (x)) == SUBREG
	       /* We need SUBREGs to compute nonzero_bits properly.  */
	       && nonzero_sign_valid
	       && (((GET_MODE_SIZE (GET_MODE (SET_DEST (x)))
		     + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
		   == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (x))))
			+ (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
	{
	  x = gen_rtx_SET (VOIDmode, SUBREG_REG (SET_DEST (x)),
			   gen_lowpart
			   (GET_MODE (SUBREG_REG (SET_DEST (x))),
			    SET_SRC (x)));
	  continue;
	}
      else
	break;

      while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
	inner = SUBREG_REG (inner);

      compute_mode = GET_MODE (inner);

      /* Don't attempt bitwise arithmetic on non scalar integer modes.  */
      if (! SCALAR_INT_MODE_P (compute_mode))
	{
	  enum machine_mode imode;

	  /* Don't do anything for vector or complex integral types.  */
	  if (! FLOAT_MODE_P (compute_mode))
	    break;

	  /* Try to find an integral mode to pun with.  */
	  imode = mode_for_size (GET_MODE_BITSIZE (compute_mode), MODE_INT, 0);
	  if (imode == BLKmode)
	    break;

	  compute_mode = imode;
	  inner = gen_lowpart (imode, inner);
	}

      /* Compute a mask of LEN bits, if we can do this on the host machine.  */
      if (len >= HOST_BITS_PER_WIDE_INT)
	break;

      /* Now compute the equivalent expression.  Make a copy of INNER
	 for the SET_DEST in case it is a MEM into which we will substitute;
	 we don't want shared RTL in that case.  */
      mask = GEN_INT (((HOST_WIDE_INT) 1 << len) - 1);
      cleared = simplify_gen_binary (AND, compute_mode,
				     simplify_gen_unary (NOT, compute_mode,
				       simplify_gen_binary (ASHIFT,
							    compute_mode,
							    mask, pos),
				       compute_mode),
				     inner);
      masked = simplify_gen_binary (ASHIFT, compute_mode,
				    simplify_gen_binary (
				      AND, compute_mode,
				      gen_lowpart (compute_mode, SET_SRC (x)),
				      mask),
				    pos);

      x = gen_rtx_SET (VOIDmode, copy_rtx (inner),
		       simplify_gen_binary (IOR, compute_mode,
					    cleared, masked));
    }

  return x;
}

/* Return an RTX for a reference to LEN bits of INNER.  If POS_RTX is nonzero,
   it is an RTX that represents a variable starting position; otherwise,
   POS is the (constant) starting bit position (counted from the LSB).

   UNSIGNEDP is nonzero for an unsigned reference and zero for a
   signed reference.

   IN_DEST is nonzero if this is a reference in the destination of a
   SET.  This is used when a ZERO_ or SIGN_EXTRACT isn't needed.  If nonzero,
   a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
   be used.

   IN_COMPARE is nonzero if we are in a COMPARE.  This means that a
   ZERO_EXTRACT should be built even for bits starting at bit 0.

   MODE is the desired mode of the result (if IN_DEST == 0).

   The result is an RTX for the extraction or NULL_RTX if the target
   can't handle it.  */

static rtx
make_extraction (enum machine_mode mode, rtx inner, HOST_WIDE_INT pos,
		 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
		 int in_dest, int in_compare)
{
  /* This mode describes the size of the storage area
     to fetch the overall value from.  Within that, we
     ignore the POS lowest bits, etc.  */
  enum machine_mode is_mode = GET_MODE (inner);
  enum machine_mode inner_mode;
  enum machine_mode wanted_inner_mode;
  enum machine_mode wanted_inner_reg_mode = word_mode;
  enum machine_mode pos_mode = word_mode;
  enum machine_mode extraction_mode = word_mode;
  enum machine_mode tmode = mode_for_size (len, MODE_INT, 1);
  rtx new = 0;
  rtx orig_pos_rtx = pos_rtx;
  HOST_WIDE_INT orig_pos;

  if (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
    {
      /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
	 consider just the QI as the memory to extract from.
	 The subreg adds or removes high bits; its mode is
	 irrelevant to the meaning of this extraction,
	 since POS and LEN count from the lsb.  */
      if (MEM_P (SUBREG_REG (inner)))
	is_mode = GET_MODE (SUBREG_REG (inner));
      inner = SUBREG_REG (inner);
    }
  else if (GET_CODE (inner) == ASHIFT
	   && GET_CODE (XEXP (inner, 1)) == CONST_INT
	   && pos_rtx == 0 && pos == 0
	   && len > (unsigned HOST_WIDE_INT) INTVAL (XEXP (inner, 1)))
    {
      /* We're extracting the least significant bits of an rtx
	 (ashift X (const_int C)), where LEN > C.  Extract the
	 least significant (LEN - C) bits of X, giving an rtx
	 whose mode is MODE, then shift it left C times.  */
      new = make_extraction (mode, XEXP (inner, 0),
			     0, 0, len - INTVAL (XEXP (inner, 1)),
			     unsignedp, in_dest, in_compare);
      if (new != 0)
	return gen_rtx_ASHIFT (mode, new, XEXP (inner, 1));
    }

  inner_mode = GET_MODE (inner);

  if (pos_rtx && GET_CODE (pos_rtx) == CONST_INT)
    pos = INTVAL (pos_rtx), pos_rtx = 0;

  /* See if this can be done without an extraction.  We never can if the
     width of the field is not the same as that of some integer mode. For
     registers, we can only avoid the extraction if the position is at the
     low-order bit and this is either not in the destination or we have the
     appropriate STRICT_LOW_PART operation available.

     For MEM, we can avoid an extract if the field starts on an appropriate
     boundary and we can change the mode of the memory reference.  */

  if (tmode != BLKmode
      && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
	   && !MEM_P (inner)
	   && (inner_mode == tmode
	       || !REG_P (inner)
	       || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (tmode),
					 GET_MODE_BITSIZE (inner_mode))
	       || reg_truncated_to_mode (tmode, inner))
	   && (! in_dest
	       || (REG_P (inner)
		   && have_insn_for (STRICT_LOW_PART, tmode))))
	  || (MEM_P (inner) && pos_rtx == 0
	      && (pos
		  % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
		     : BITS_PER_UNIT)) == 0
	      /* We can't do this if we are widening INNER_MODE (it
		 may not be aligned, for one thing).  */
	      && GET_MODE_BITSIZE (inner_mode) >= GET_MODE_BITSIZE (tmode)
	      && (inner_mode == tmode
		  || (! mode_dependent_address_p (XEXP (inner, 0))
		      && ! MEM_VOLATILE_P (inner))))))
    {
      /* If INNER is a MEM, make a new MEM that encompasses just the desired
	 field.  If the original and current mode are the same, we need not
	 adjust the offset.  Otherwise, we do if bytes big endian.

	 If INNER is not a MEM, get a piece consisting of just the field
	 of interest (in this case POS % BITS_PER_WORD must be 0).  */

      if (MEM_P (inner))
	{
	  HOST_WIDE_INT offset;

	  /* POS counts from lsb, but make OFFSET count in memory order.  */
	  if (BYTES_BIG_ENDIAN)
	    offset = (GET_MODE_BITSIZE (is_mode) - len - pos) / BITS_PER_UNIT;
	  else
	    offset = pos / BITS_PER_UNIT;

	  new = adjust_address_nv (inner, tmode, offset);
	}
      else if (REG_P (inner))
	{
	  if (tmode != inner_mode)
	    {
	      /* We can't call gen_lowpart in a DEST since we
		 always want a SUBREG (see below) and it would sometimes
		 return a new hard register.  */
	      if (pos || in_dest)
		{
		  HOST_WIDE_INT final_word = pos / BITS_PER_WORD;

		  if (WORDS_BIG_ENDIAN
		      && GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
		    final_word = ((GET_MODE_SIZE (inner_mode)
				   - GET_MODE_SIZE (tmode))
				  / UNITS_PER_WORD) - final_word;

		  final_word *= UNITS_PER_WORD;
		  if (BYTES_BIG_ENDIAN &&
		      GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (tmode))
		    final_word += (GET_MODE_SIZE (inner_mode)
				   - GET_MODE_SIZE (tmode)) % UNITS_PER_WORD;

		  /* Avoid creating invalid subregs, for example when
		     simplifying (x>>32)&255.  */
		  if (!validate_subreg (tmode, inner_mode, inner, final_word))
		    return NULL_RTX;

		  new = gen_rtx_SUBREG (tmode, inner, final_word);
		}
	      else
		new = gen_lowpart (tmode, inner);
	    }
	  else
	    new = inner;
	}
      else
	new = force_to_mode (inner, tmode,
			     len >= HOST_BITS_PER_WIDE_INT
			     ? ~(unsigned HOST_WIDE_INT) 0
			     : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
			     0);

      /* If this extraction is going into the destination of a SET,
	 make a STRICT_LOW_PART unless we made a MEM.  */

      if (in_dest)
	return (MEM_P (new) ? new
		: (GET_CODE (new) != SUBREG
		   ? gen_rtx_CLOBBER (tmode, const0_rtx)
		   : gen_rtx_STRICT_LOW_PART (VOIDmode, new)));

      if (mode == tmode)
	return new;

      if (GET_CODE (new) == CONST_INT)
	return gen_int_mode (INTVAL (new), mode);

      /* If we know that no extraneous bits are set, and that the high
	 bit is not set, convert the extraction to the cheaper of
	 sign and zero extension, that are equivalent in these cases.  */
      if (flag_expensive_optimizations
	  && (GET_MODE_BITSIZE (tmode) <= HOST_BITS_PER_WIDE_INT
	      && ((nonzero_bits (new, tmode)
		   & ~(((unsigned HOST_WIDE_INT)
			GET_MODE_MASK (tmode))
		       >> 1))
		  == 0)))
	{
	  rtx temp = gen_rtx_ZERO_EXTEND (mode, new);
	  rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new);

	  /* Prefer ZERO_EXTENSION, since it gives more information to
	     backends.  */
	  if (rtx_cost (temp, SET) <= rtx_cost (temp1, SET))
	    return temp;
	  return temp1;
	}

      /* Otherwise, sign- or zero-extend unless we already are in the
	 proper mode.  */

      return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
			     mode, new));
    }

  /* Unless this is a COMPARE or we have a funny memory reference,
     don't do anything with zero-extending field extracts starting at
     the low-order bit since they are simple AND operations.  */
  if (pos_rtx == 0 && pos == 0 && ! in_dest
      && ! in_compare && unsignedp)
    return 0;

  /* Unless INNER is not MEM, reject this if we would be spanning bytes or
     if the position is not a constant and the length is not 1.  In all
     other cases, we would only be going outside our object in cases when
     an original shift would have been undefined.  */
  if (MEM_P (inner)
      && ((pos_rtx == 0 && pos + len > GET_MODE_BITSIZE (is_mode))
	  || (pos_rtx != 0 && len != 1)))
    return 0;

  /* Get the mode to use should INNER not be a MEM, the mode for the position,
     and the mode for the result.  */
  if (in_dest && mode_for_extraction (EP_insv, -1) != MAX_MACHINE_MODE)
    {
      wanted_inner_reg_mode = mode_for_extraction (EP_insv, 0);
      pos_mode = mode_for_extraction (EP_insv, 2);
      extraction_mode = mode_for_extraction (EP_insv, 3);
    }

  if (! in_dest && unsignedp
      && mode_for_extraction (EP_extzv, -1) != MAX_MACHINE_MODE)
    {
      wanted_inner_reg_mode = mode_for_extraction (EP_extzv, 1);
      pos_mode = mode_for_extraction (EP_extzv, 3);
      extraction_mode = mode_for_extraction (EP_extzv, 0);
    }

  if (! in_dest && ! unsignedp
      && mode_for_extraction (EP_extv, -1) != MAX_MACHINE_MODE)
    {
      wanted_inner_reg_mode = mode_for_extraction (EP_extv, 1);
      pos_mode = mode_for_extraction (EP_extv, 3);
      extraction_mode = mode_for_extraction (EP_extv, 0);
    }

  /* Never narrow an object, since that might not be safe.  */

  if (mode != VOIDmode
      && GET_MODE_SIZE (extraction_mode) < GET_MODE_SIZE (mode))
    extraction_mode = mode;

  if (pos_rtx && GET_MODE (pos_rtx) != VOIDmode
      && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
    pos_mode = GET_MODE (pos_rtx);

  /* If this is not from memory, the desired mode is the preferred mode
     for an extraction pattern's first input operand, or word_mode if there
     is none.  */
  if (!MEM_P (inner))
    wanted_inner_mode = wanted_inner_reg_mode;
  else
    {
      /* Be careful not to go beyond the extracted object and maintain the
	 natural alignment of the memory.  */
      wanted_inner_mode = smallest_mode_for_size (len, MODE_INT);
      while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
	     > GET_MODE_BITSIZE (wanted_inner_mode))
	{
	  wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode);
	  gcc_assert (wanted_inner_mode != VOIDmode);
	}

      /* If we have to change the mode of memory and cannot, the desired mode
	 is EXTRACTION_MODE.  */
      if (inner_mode != wanted_inner_mode
	  && (mode_dependent_address_p (XEXP (inner, 0))
	      || MEM_VOLATILE_P (inner)
	      || pos_rtx))
	wanted_inner_mode = extraction_mode;
    }

  orig_pos = pos;

  if (BITS_BIG_ENDIAN)
    {
      /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
	 BITS_BIG_ENDIAN style.  If position is constant, compute new
	 position.  Otherwise, build subtraction.
	 Note that POS is relative to the mode of the original argument.
	 If it's a MEM we need to recompute POS relative to that.
	 However, if we're extracting from (or inserting into) a register,
	 we want to recompute POS relative to wanted_inner_mode.  */
      int width = (MEM_P (inner)
		   ? GET_MODE_BITSIZE (is_mode)
		   : GET_MODE_BITSIZE (wanted_inner_mode));

      if (pos_rtx == 0)
	pos = width - len - pos;
      else
	pos_rtx
	  = gen_rtx_MINUS (GET_MODE (pos_rtx), GEN_INT (width - len), pos_rtx);
      /* POS may be less than 0 now, but we check for that below.
	 Note that it can only be less than 0 if !MEM_P (inner).  */
    }

  /* If INNER has a wider mode, and this is a constant extraction, try to
     make it smaller and adjust the byte to point to the byte containing
     the value.  */
  if (wanted_inner_mode != VOIDmode
      && inner_mode != wanted_inner_mode
      && ! pos_rtx
      && GET_MODE_SIZE (wanted_inner_mode) < GET_MODE_SIZE (is_mode)
      && MEM_P (inner)
      && ! mode_dependent_address_p (XEXP (inner, 0))
      && ! MEM_VOLATILE_P (inner))
    {
      int offset = 0;

      /* The computations below will be correct if the machine is big
	 endian in both bits and bytes or little endian in bits and bytes.
	 If it is mixed, we must adjust.  */

      /* If bytes are big endian and we had a paradoxical SUBREG, we must
	 adjust OFFSET to compensate.  */
      if (BYTES_BIG_ENDIAN
	  && GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (is_mode))
	offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);

      /* We can now move to the desired byte.  */
      offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
		* GET_MODE_SIZE (wanted_inner_mode);
      pos %= GET_MODE_BITSIZE (wanted_inner_mode);

      if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
	  && is_mode != wanted_inner_mode)
	offset = (GET_MODE_SIZE (is_mode)
		  - GET_MODE_SIZE (wanted_inner_mode) - offset);

      inner = adjust_address_nv (inner, wanted_inner_mode, offset);
    }

  /* If INNER is not memory, we can always get it into the proper mode.  If we
     are changing its mode, POS must be a constant and smaller than the size
     of the new mode.  */
  else if (!MEM_P (inner))
    {
      if (GET_MODE (inner) != wanted_inner_mode
	  && (pos_rtx != 0
	      || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
	return 0;

      if (orig_pos < 0)
	return 0;

      inner = force_to_mode (inner, wanted_inner_mode,
			     pos_rtx
			     || len + orig_pos >= HOST_BITS_PER_WIDE_INT
			     ? ~(unsigned HOST_WIDE_INT) 0
			     : ((((unsigned HOST_WIDE_INT) 1 << len) - 1)
				<< orig_pos),
			     0);
    }

  /* Adjust mode of POS_RTX, if needed.  If we want a wider mode, we
     have to zero extend.  Otherwise, we can just use a SUBREG.  */
  if (pos_rtx != 0
      && GET_MODE_SIZE (pos_mode) > GET_MODE_SIZE (GET_MODE (pos_rtx)))
    {
      rtx temp = gen_rtx_ZERO_EXTEND (pos_mode, pos_rtx);

      /* If we know that no extraneous bits are set, and that the high
	 bit is not set, convert extraction to cheaper one - either
	 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
	 cases.  */
      if (flag_expensive_optimizations
	  && (GET_MODE_BITSIZE (GET_MODE (pos_rtx)) <= HOST_BITS_PER_WIDE_INT
	      && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
		   & ~(((unsigned HOST_WIDE_INT)
			GET_MODE_MASK (GET_MODE (pos_rtx)))
		       >> 1))
		  == 0)))
	{
	  rtx temp1 = gen_rtx_SIGN_EXTEND (pos_mode, pos_rtx);

	  /* Prefer ZERO_EXTENSION, since it gives more information to
	     backends.  */
	  if (rtx_cost (temp1, SET) < rtx_cost (temp, SET))
	    temp = temp1;
	}
      pos_rtx = temp;
    }
  else if (pos_rtx != 0
	   && GET_MODE_SIZE (pos_mode) < GET_MODE_SIZE (GET_MODE (pos_rtx)))
    pos_rtx = gen_lowpart (pos_mode, pos_rtx);

  /* Make POS_RTX unless we already have it and it is correct.  If we don't
     have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
     be a CONST_INT.  */
  if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
    pos_rtx = orig_pos_rtx;

  else if (pos_rtx == 0)
    pos_rtx = GEN_INT (pos);

  /* Make the required operation.  See if we can use existing rtx.  */
  new = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
			 extraction_mode, inner, GEN_INT (len), pos_rtx);
  if (! in_dest)
    new = gen_lowpart (mode, new);

  return new;
}

/* See if X contains an ASHIFT of COUNT or more bits that can be commuted
   with any other operations in X.  Return X without that shift if so.  */

static rtx
extract_left_shift (rtx x, int count)
{
  enum rtx_code code = GET_CODE (x);
  enum machine_mode mode = GET_MODE (x);
  rtx tem;

  switch (code)
    {
    case ASHIFT:
      /* This is the shift itself.  If it is wide enough, we will return
	 either the value being shifted if the shift count is equal to
	 COUNT or a shift for the difference.  */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) >= count)
	return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
				     INTVAL (XEXP (x, 1)) - count);
      break;

    case NEG:  case NOT:
      if ((tem = extract_left_shift (XEXP (x, 0), count)) != 0)
	return simplify_gen_unary (code, mode, tem, mode);

      break;

    case PLUS:  case IOR:  case XOR:  case AND:
      /* If we can safely shift this constant and we find the inner shift,
	 make a new operation.  */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && (INTVAL (XEXP (x, 1)) & ((((HOST_WIDE_INT) 1 << count)) - 1)) == 0
	  && (tem = extract_left_shift (XEXP (x, 0), count)) != 0)
	return simplify_gen_binary (code, mode, tem,
				    GEN_INT (INTVAL (XEXP (x, 1)) >> count));

      break;

    default:
      break;
    }

  return 0;
}

/* Look at the expression rooted at X.  Look for expressions
   equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
   Form these expressions.

   Return the new rtx, usually just X.

   Also, for machines like the VAX that don't have logical shift insns,
   try to convert logical to arithmetic shift operations in cases where
   they are equivalent.  This undoes the canonicalizations to logical
   shifts done elsewhere.

   We try, as much as possible, to re-use rtl expressions to save memory.

   IN_CODE says what kind of expression we are processing.  Normally, it is
   SET.  In a memory address (inside a MEM, PLUS or minus, the latter two
   being kludges), it is MEM.  When processing the arguments of a comparison
   or a COMPARE against zero, it is COMPARE.  */

static rtx
make_compound_operation (rtx x, enum rtx_code in_code)
{
  enum rtx_code code = GET_CODE (x);
  enum machine_mode mode = GET_MODE (x);
  int mode_width = GET_MODE_BITSIZE (mode);
  rtx rhs, lhs;
  enum rtx_code next_code;
  int i;
  rtx new = 0;
  rtx tem;
  const char *fmt;

  /* Select the code to be used in recursive calls.  Once we are inside an
     address, we stay there.  If we have a comparison, set to COMPARE,
     but once inside, go back to our default of SET.  */

  next_code = (code == MEM || code == PLUS || code == MINUS ? MEM
	       : ((code == COMPARE || COMPARISON_P (x))
		  && XEXP (x, 1) == const0_rtx) ? COMPARE
	       : in_code == COMPARE ? SET : in_code);

  /* Process depending on the code of this operation.  If NEW is set
     nonzero, it will be returned.  */

  switch (code)
    {
    case ASHIFT:
      /* Convert shifts by constants into multiplications if inside
	 an address.  */
      if (in_code == MEM && GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
	  && INTVAL (XEXP (x, 1)) >= 0)
	{
	  new = make_compound_operation (XEXP (x, 0), next_code);
	  new = gen_rtx_MULT (mode, new,
			      GEN_INT ((HOST_WIDE_INT) 1
				       << INTVAL (XEXP (x, 1))));
	}
      break;

    case AND:
      /* If the second operand is not a constant, we can't do anything
	 with it.  */
      if (GET_CODE (XEXP (x, 1)) != CONST_INT)
	break;

      /* If the constant is a power of two minus one and the first operand
	 is a logical right shift, make an extraction.  */
      if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
	  && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
	{
	  new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
	  new = make_extraction (mode, new, 0, XEXP (XEXP (x, 0), 1), i, 1,
				 0, in_code == COMPARE);
	}

      /* Same as previous, but for (subreg (lshiftrt ...)) in first op.  */
      else if (GET_CODE (XEXP (x, 0)) == SUBREG
	       && subreg_lowpart_p (XEXP (x, 0))
	       && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
	       && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
	{
	  new = make_compound_operation (XEXP (SUBREG_REG (XEXP (x, 0)), 0),
					 next_code);
	  new = make_extraction (GET_MODE (SUBREG_REG (XEXP (x, 0))), new, 0,
				 XEXP (SUBREG_REG (XEXP (x, 0)), 1), i, 1,
				 0, in_code == COMPARE);
	}
      /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)).  */
      else if ((GET_CODE (XEXP (x, 0)) == XOR
		|| GET_CODE (XEXP (x, 0)) == IOR)
	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
	       && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
	{
	  /* Apply the distributive law, and then try to make extractions.  */
	  new = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
					     XEXP (x, 1)),
				gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
					     XEXP (x, 1)));
	  new = make_compound_operation (new, in_code);
	}

      /* If we are have (and (rotate X C) M) and C is larger than the number
	 of bits in M, this is an extraction.  */

      else if (GET_CODE (XEXP (x, 0)) == ROTATE
	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	       && (i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0
	       && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
	{
	  new = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
	  new = make_extraction (mode, new,
				 (GET_MODE_BITSIZE (mode)
				  - INTVAL (XEXP (XEXP (x, 0), 1))),
				 NULL_RTX, i, 1, 0, in_code == COMPARE);
	}

      /* On machines without logical shifts, if the operand of the AND is
	 a logical shift and our mask turns off all the propagated sign
	 bits, we can replace the logical shift with an arithmetic shift.  */
      else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
	       && !have_insn_for (LSHIFTRT, mode)
	       && have_insn_for (ASHIFTRT, mode)
	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	       && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
	       && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
	       && mode_width <= HOST_BITS_PER_WIDE_INT)
	{
	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);

	  mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
	  if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
	    SUBST (XEXP (x, 0),
		   gen_rtx_ASHIFTRT (mode,
				     make_compound_operation
				     (XEXP (XEXP (x, 0), 0), next_code),
				     XEXP (XEXP (x, 0), 1)));
	}

      /* If the constant is one less than a power of two, this might be
	 representable by an extraction even if no shift is present.
	 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
	 we are in a COMPARE.  */
      else if ((i = exact_log2 (INTVAL (XEXP (x, 1)) + 1)) >= 0)
	new = make_extraction (mode,
			       make_compound_operation (XEXP (x, 0),
							next_code),
			       0, NULL_RTX, i, 1, 0, in_code == COMPARE);

      /* If we are in a comparison and this is an AND with a power of two,
	 convert this into the appropriate bit extract.  */
      else if (in_code == COMPARE
	       && (i = exact_log2 (INTVAL (XEXP (x, 1)))) >= 0)
	new = make_extraction (mode,
			       make_compound_operation (XEXP (x, 0),
							next_code),
			       i, NULL_RTX, 1, 1, 0, 1);

      break;

    case LSHIFTRT:
      /* If the sign bit is known to be zero, replace this with an
	 arithmetic shift.  */
      if (have_insn_for (ASHIFTRT, mode)
	  && ! have_insn_for (LSHIFTRT, mode)
	  && mode_width <= HOST_BITS_PER_WIDE_INT
	  && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
	{
	  new = gen_rtx_ASHIFTRT (mode,
				  make_compound_operation (XEXP (x, 0),
							   next_code),
				  XEXP (x, 1));
	  break;
	}

      /* ... fall through ...  */

    case ASHIFTRT:
      lhs = XEXP (x, 0);
      rhs = XEXP (x, 1);

      /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
	 this is a SIGN_EXTRACT.  */
      if (GET_CODE (rhs) == CONST_INT
	  && GET_CODE (lhs) == ASHIFT
	  && GET_CODE (XEXP (lhs, 1)) == CONST_INT
	  && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1)))
	{
	  new = make_compound_operation (XEXP (lhs, 0), next_code);
	  new = make_extraction (mode, new,
				 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
				 NULL_RTX, mode_width - INTVAL (rhs),
				 code == LSHIFTRT, 0, in_code == COMPARE);
	  break;
	}

      /* See if we have operations between an ASHIFTRT and an ASHIFT.
	 If so, try to merge the shifts into a SIGN_EXTEND.  We could
	 also do this for some cases of SIGN_EXTRACT, but it doesn't
	 seem worth the effort; the case checked for occurs on Alpha.  */

      if (!OBJECT_P (lhs)
	  && ! (GET_CODE (lhs) == SUBREG
		&& (OBJECT_P (SUBREG_REG (lhs))))
	  && GET_CODE (rhs) == CONST_INT
	  && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
	  && (new = extract_left_shift (lhs, INTVAL (rhs))) != 0)
	new = make_extraction (mode, make_compound_operation (new, next_code),
			       0, NULL_RTX, mode_width - INTVAL (rhs),
			       code == LSHIFTRT, 0, in_code == COMPARE);

      break;

    case SUBREG:
      /* Call ourselves recursively on the inner expression.  If we are
	 narrowing the object and it has a different RTL code from
	 what it originally did, do this SUBREG as a force_to_mode.  */

      tem = make_compound_operation (SUBREG_REG (x), in_code);

      {
	rtx simplified;
	simplified = simplify_subreg (GET_MODE (x), tem, GET_MODE (tem),
				      SUBREG_BYTE (x));

	if (simplified)
	  tem = simplified;

	if (GET_CODE (tem) != GET_CODE (SUBREG_REG (x))
	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (GET_MODE (tem))
	    && subreg_lowpart_p (x))
	  {
	    rtx newer = force_to_mode (tem, mode, ~(HOST_WIDE_INT) 0,
				       0);

	    /* If we have something other than a SUBREG, we might have
	       done an expansion, so rerun ourselves.  */
	    if (GET_CODE (newer) != SUBREG)
	      newer = make_compound_operation (newer, in_code);

	    return newer;
	  }

	if (simplified)
	  return tem;
      }
      break;

    default:
      break;
    }

  if (new)
    {
      x = gen_lowpart (mode, new);
      code = GET_CODE (x);
    }

  /* Now recursively process each operand of this operation.  */
  fmt = GET_RTX_FORMAT (code);
  for (i = 0; i < GET_RTX_LENGTH (code); i++)
    if (fmt[i] == 'e')
      {
	new = make_compound_operation (XEXP (x, i), next_code);
	SUBST (XEXP (x, i), new);
      }

  /* If this is a commutative operation, the changes to the operands
     may have made it noncanonical.  */
  if (COMMUTATIVE_ARITH_P (x)
      && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
    {
      tem = XEXP (x, 0);
      SUBST (XEXP (x, 0), XEXP (x, 1));
      SUBST (XEXP (x, 1), tem);
    }

  return x;
}

/* Given M see if it is a value that would select a field of bits
   within an item, but not the entire word.  Return -1 if not.
   Otherwise, return the starting position of the field, where 0 is the
   low-order bit.

   *PLEN is set to the length of the field.  */

static int
get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
{
  /* Get the bit number of the first 1 bit from the right, -1 if none.  */
  int pos = exact_log2 (m & -m);
  int len = 0;

  if (pos >= 0)
    /* Now shift off the low-order zero bits and see if we have a
       power of two minus 1.  */
    len = exact_log2 ((m >> pos) + 1);

  if (len <= 0)
    pos = -1;

  *plen = len;
  return pos;
}

/* If X refers to a register that equals REG in value, replace these
   references with REG.  */
static rtx
canon_reg_for_combine (rtx x, rtx reg)
{
  rtx op0, op1, op2;
  const char *fmt;
  int i;
  bool copied;

  enum rtx_code code = GET_CODE (x);
  switch (GET_RTX_CLASS (code))
    {
    case RTX_UNARY:
      op0 = canon_reg_for_combine (XEXP (x, 0), reg);
      if (op0 != XEXP (x, 0))
	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
				   GET_MODE (reg));
      break;

    case RTX_BIN_ARITH:
    case RTX_COMM_ARITH:
      op0 = canon_reg_for_combine (XEXP (x, 0), reg);
      op1 = canon_reg_for_combine (XEXP (x, 1), reg);
      if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
	return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
      break;

    case RTX_COMPARE:
    case RTX_COMM_COMPARE:
      op0 = canon_reg_for_combine (XEXP (x, 0), reg);
      op1 = canon_reg_for_combine (XEXP (x, 1), reg);
      if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
	return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
					GET_MODE (op0), op0, op1);
      break;

    case RTX_TERNARY:
    case RTX_BITFIELD_OPS:
      op0 = canon_reg_for_combine (XEXP (x, 0), reg);
      op1 = canon_reg_for_combine (XEXP (x, 1), reg);
      op2 = canon_reg_for_combine (XEXP (x, 2), reg);
      if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
				     GET_MODE (op0), op0, op1, op2);

    case RTX_OBJ:
      if (REG_P (x))
	{
	  if (rtx_equal_p (get_last_value (reg), x)
	      || rtx_equal_p (reg, get_last_value (x)))
	    return reg;
	  else
	    break;
	}

      /* fall through */

    default:
      fmt = GET_RTX_FORMAT (code);
      copied = false;
      for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
	if (fmt[i] == 'e')
	  {
	    rtx op = canon_reg_for_combine (XEXP (x, i), reg);
	    if (op != XEXP (x, i))
	      {
		if (!copied)
		  {
		    copied = true;
		    x = copy_rtx (x);
		  }
		XEXP (x, i) = op;
	      }
	  }
	else if (fmt[i] == 'E')
	  {
	    int j;
	    for (j = 0; j < XVECLEN (x, i); j++)
	      {
		rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
		if (op != XVECEXP (x, i, j))
		  {
		    if (!copied)
		      {
			copied = true;
			x = copy_rtx (x);
		      }
		    XVECEXP (x, i, j) = op;
		  }
	      }
	  }

      break;
    }

  return x;
}

/* Return X converted to MODE.  If the value is already truncated to
   MODE we can just return a subreg even though in the general case we
   would need an explicit truncation.  */

static rtx
gen_lowpart_or_truncate (enum machine_mode mode, rtx x)
{
  if (GET_MODE_SIZE (GET_MODE (x)) <= GET_MODE_SIZE (mode)
      || TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode),
				GET_MODE_BITSIZE (GET_MODE (x)))
      || (REG_P (x) && reg_truncated_to_mode (mode, x)))
    return gen_lowpart (mode, x);
  else
    return simplify_gen_unary (TRUNCATE, mode, x, GET_MODE (x));
}

/* See if X can be simplified knowing that we will only refer to it in
   MODE and will only refer to those bits that are nonzero in MASK.
   If other bits are being computed or if masking operations are done
   that select a superset of the bits in MASK, they can sometimes be
   ignored.

   Return a possibly simplified expression, but always convert X to
   MODE.  If X is a CONST_INT, AND the CONST_INT with MASK.

   If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
   are all off in X.  This is used when X will be complemented, by either
   NOT, NEG, or XOR.  */

static rtx
force_to_mode (rtx x, enum machine_mode mode, unsigned HOST_WIDE_INT mask,
	       int just_select)
{
  enum rtx_code code = GET_CODE (x);
  int next_select = just_select || code == XOR || code == NOT || code == NEG;
  enum machine_mode op_mode;
  unsigned HOST_WIDE_INT fuller_mask, nonzero;
  rtx op0, op1, temp;

  /* If this is a CALL or ASM_OPERANDS, don't do anything.  Some of the
     code below will do the wrong thing since the mode of such an
     expression is VOIDmode.

     Also do nothing if X is a CLOBBER; this can happen if X was
     the return value from a call to gen_lowpart.  */
  if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
    return x;

  /* We want to perform the operation is its present mode unless we know
     that the operation is valid in MODE, in which case we do the operation
     in MODE.  */
  op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
	      && have_insn_for (code, mode))
	     ? mode : GET_MODE (x));

  /* It is not valid to do a right-shift in a narrower mode
     than the one it came in with.  */
  if ((code == LSHIFTRT || code == ASHIFTRT)
      && GET_MODE_BITSIZE (mode) < GET_MODE_BITSIZE (GET_MODE (x)))
    op_mode = GET_MODE (x);

  /* Truncate MASK to fit OP_MODE.  */
  if (op_mode)
    mask &= GET_MODE_MASK (op_mode);

  /* When we have an arithmetic operation, or a shift whose count we
     do not know, we need to assume that all bits up to the highest-order
     bit in MASK will be needed.  This is how we form such a mask.  */
  if (mask & ((unsigned HOST_WIDE_INT) 1 << (HOST_BITS_PER_WIDE_INT - 1)))
    fuller_mask = ~(unsigned HOST_WIDE_INT) 0;
  else
    fuller_mask = (((unsigned HOST_WIDE_INT) 1 << (floor_log2 (mask) + 1))
		   - 1);

  /* Determine what bits of X are guaranteed to be (non)zero.  */
  nonzero = nonzero_bits (x, mode);

  /* If none of the bits in X are needed, return a zero.  */
  if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
    x = const0_rtx;

  /* If X is a CONST_INT, return a new one.  Do this here since the
     test below will fail.  */
  if (GET_CODE (x) == CONST_INT)
    {
      if (SCALAR_INT_MODE_P (mode))
	return gen_int_mode (INTVAL (x) & mask, mode);
      else
	{
	  x = GEN_INT (INTVAL (x) & mask);
	  return gen_lowpart_common (mode, x);
	}
    }

  /* If X is narrower than MODE and we want all the bits in X's mode, just
     get X in the proper mode.  */
  if (GET_MODE_SIZE (GET_MODE (x)) < GET_MODE_SIZE (mode)
      && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
    return gen_lowpart (mode, x);

  switch (code)
    {
    case CLOBBER:
      /* If X is a (clobber (const_int)), return it since we know we are
	 generating something that won't match.  */
      return x;

    case SIGN_EXTEND:
    case ZERO_EXTEND:
    case ZERO_EXTRACT:
    case SIGN_EXTRACT:
      x = expand_compound_operation (x);
      if (GET_CODE (x) != code)
	return force_to_mode (x, mode, mask, next_select);
      break;

    case SUBREG:
      if (subreg_lowpart_p (x)
	  /* We can ignore the effect of this SUBREG if it narrows the mode or
	     if the constant masks to zero all the bits the mode doesn't
	     have.  */
	  && ((GET_MODE_SIZE (GET_MODE (x))
	       < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
	      || (0 == (mask
			& GET_MODE_MASK (GET_MODE (x))
			& ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))))))
	return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
      break;

    case AND:
      /* If this is an AND with a constant, convert it into an AND
	 whose constant is the AND of that constant with MASK.  If it
	 remains an AND of MASK, delete it since it is redundant.  */

      if (GET_CODE (XEXP (x, 1)) == CONST_INT)
	{
	  x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
				      mask & INTVAL (XEXP (x, 1)));

	  /* If X is still an AND, see if it is an AND with a mask that
	     is just some low-order bits.  If so, and it is MASK, we don't
	     need it.  */

	  if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
	      && ((INTVAL (XEXP (x, 1)) & GET_MODE_MASK (GET_MODE (x)))
		  == mask))
	    x = XEXP (x, 0);

	  /* If it remains an AND, try making another AND with the bits
	     in the mode mask that aren't in MASK turned on.  If the
	     constant in the AND is wide enough, this might make a
	     cheaper constant.  */

	  if (GET_CODE (x) == AND && GET_CODE (XEXP (x, 1)) == CONST_INT
	      && GET_MODE_MASK (GET_MODE (x)) != mask
	      && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT)
	    {
	      HOST_WIDE_INT cval = (INTVAL (XEXP (x, 1))
				    | (GET_MODE_MASK (GET_MODE (x)) & ~mask));
	      int width = GET_MODE_BITSIZE (GET_MODE (x));
	      rtx y;

	      /* If MODE is narrower than HOST_WIDE_INT and CVAL is a negative
		 number, sign extend it.  */
	      if (width > 0 && width < HOST_BITS_PER_WIDE_INT
		  && (cval & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
		cval |= (HOST_WIDE_INT) -1 << width;

	      y = simplify_gen_binary (AND, GET_MODE (x),
				       XEXP (x, 0), GEN_INT (cval));
	      if (rtx_cost (y, SET) < rtx_cost (x, SET))
		x = y;
	    }

	  break;
	}

      goto binop;

    case PLUS:
      /* In (and (plus FOO C1) M), if M is a mask that just turns off
	 low-order bits (as in an alignment operation) and FOO is already
	 aligned to that boundary, mask C1 to that boundary as well.
	 This may eliminate that PLUS and, later, the AND.  */

      {
	unsigned int width = GET_MODE_BITSIZE (mode);
	unsigned HOST_WIDE_INT smask = mask;

	/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
	   number, sign extend it.  */

	if (width < HOST_BITS_PER_WIDE_INT
	    && (smask & ((HOST_WIDE_INT) 1 << (width - 1))) != 0)
	  smask |= (HOST_WIDE_INT) -1 << width;

	if (GET_CODE (XEXP (x, 1)) == CONST_INT
	    && exact_log2 (- smask) >= 0
	    && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
	    && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
	  return force_to_mode (plus_constant (XEXP (x, 0),
					       (INTVAL (XEXP (x, 1)) & smask)),
				mode, smask, next_select);
      }

      /* ... fall through ...  */

    case MULT:
      /* For PLUS, MINUS and MULT, we need any bits less significant than the
	 most significant bit in MASK since carries from those bits will
	 affect the bits we are interested in.  */
      mask = fuller_mask;
      goto binop;

    case MINUS:
      /* If X is (minus C Y) where C's least set bit is larger than any bit
	 in the mask, then we may replace with (neg Y).  */
      if (GET_CODE (XEXP (x, 0)) == CONST_INT
	  && (((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 0))
					& -INTVAL (XEXP (x, 0))))
	      > mask))
	{
	  x = simplify_gen_unary (NEG, GET_MODE (x), XEXP (x, 1),
				  GET_MODE (x));
	  return force_to_mode (x, mode, mask, next_select);
	}

      /* Similarly, if C contains every bit in the fuller_mask, then we may
	 replace with (not Y).  */
      if (GET_CODE (XEXP (x, 0)) == CONST_INT
	  && ((INTVAL (XEXP (x, 0)) | (HOST_WIDE_INT) fuller_mask)
	      == INTVAL (XEXP (x, 0))))
	{
	  x = simplify_gen_unary (NOT, GET_MODE (x),
				  XEXP (x, 1), GET_MODE (x));
	  return force_to_mode (x, mode, mask, next_select);
	}

      mask = fuller_mask;
      goto binop;

    case IOR:
    case XOR:
      /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
	 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
	 operation which may be a bitfield extraction.  Ensure that the
	 constant we form is not wider than the mode of X.  */

      if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
	  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
	  && GET_CODE (XEXP (x, 1)) == CONST_INT
	  && ((INTVAL (XEXP (XEXP (x, 0), 1))
	       + floor_log2 (INTVAL (XEXP (x, 1))))
	      < GET_MODE_BITSIZE (GET_MODE (x)))
	  && (INTVAL (XEXP (x, 1))
	      & ~nonzero_bits (XEXP (x, 0), GET_MODE (x))) == 0)
	{
	  temp = GEN_INT ((INTVAL (XEXP (x, 1)) & mask)
			  << INTVAL (XEXP (XEXP (x, 0), 1)));
	  temp = simplify_gen_binary (GET_CODE (x), GET_MODE (x),
				      XEXP (XEXP (x, 0), 0), temp);
	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), temp,
				   XEXP (XEXP (x, 0), 1));
	  return force_to_mode (x, mode, mask, next_select);
	}

    binop:
      /* For most binary operations, just propagate into the operation and
	 change the mode if we have an operation of that mode.  */

      op0 = gen_lowpart_or_truncate (op_mode,
				     force_to_mode (XEXP (x, 0), mode, mask,
						    next_select));
      op1 = gen_lowpart_or_truncate (op_mode,
				     force_to_mode (XEXP (x, 1), mode, mask,
					next_select));

      if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
	x = simplify_gen_binary (code, op_mode, op0, op1);
      break;

    case ASHIFT:
      /* For left shifts, do the same, but just for the first operand.
	 However, we cannot do anything with shifts where we cannot
	 guarantee that the counts are smaller than the size of the mode
	 because such a count will have a different meaning in a
	 wider mode.  */

      if (! (GET_CODE (XEXP (x, 1)) == CONST_INT
	     && INTVAL (XEXP (x, 1)) >= 0
	     && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (mode))
	  && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
		&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
		    < (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (mode))))
	break;

      /* If the shift count is a constant and we can do arithmetic in
	 the mode of the shift, refine which bits we need.  Otherwise, use the
	 conservative form of the mask.  */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) >= 0
	  && INTVAL (XEXP (x, 1)) < GET_MODE_BITSIZE (op_mode)
	  && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
	mask >>= INTVAL (XEXP (x, 1));
      else
	mask = fuller_mask;

      op0 = gen_lowpart_or_truncate (op_mode,
				     force_to_mode (XEXP (x, 0), op_mode,
						    mask, next_select));

      if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
	x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
      break;

    case LSHIFTRT:
      /* Here we can only do something if the shift count is a constant,
	 this shift constant is valid for the host, and we can do arithmetic
	 in OP_MODE.  */

      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
	  && GET_MODE_BITSIZE (op_mode) <= HOST_BITS_PER_WIDE_INT)
	{
	  rtx inner = XEXP (x, 0);
	  unsigned HOST_WIDE_INT inner_mask;

	  /* Select the mask of the bits we need for the shift operand.  */
	  inner_mask = mask << INTVAL (XEXP (x, 1));

	  /* We can only change the mode of the shift if we can do arithmetic
	     in the mode of the shift and INNER_MASK is no wider than the
	     width of X's mode.  */
	  if ((inner_mask & ~GET_MODE_MASK (GET_MODE (x))) != 0)
	    op_mode = GET_MODE (x);

	  inner = force_to_mode (inner, op_mode, inner_mask, next_select);

	  if (GET_MODE (x) != op_mode || inner != XEXP (x, 0))
	    x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
	}

      /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
	 shift and AND produces only copies of the sign bit (C2 is one less
	 than a power of two), we can do this with just a shift.  */

      if (GET_CODE (x) == LSHIFTRT
	  && GET_CODE (XEXP (x, 1)) == CONST_INT
	  /* The shift puts one of the sign bit copies in the least significant
	     bit.  */
	  && ((INTVAL (XEXP (x, 1))
	       + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
	      >= GET_MODE_BITSIZE (GET_MODE (x)))
	  && exact_log2 (mask + 1) >= 0
	  /* Number of bits left after the shift must be more than the mask
	     needs.  */
	  && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
	      <= GET_MODE_BITSIZE (GET_MODE (x)))
	  /* Must be more sign bit copies than the mask needs.  */
	  && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
	      >= exact_log2 (mask + 1)))
	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x), XEXP (x, 0),
				 GEN_INT (GET_MODE_BITSIZE (GET_MODE (x))
					  - exact_log2 (mask + 1)));

      goto shiftrt;

    case ASHIFTRT:
      /* If we are just looking for the sign bit, we don't need this shift at
	 all, even if it has a variable count.  */
      if (GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
	  && (mask == ((unsigned HOST_WIDE_INT) 1
		       << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
	return force_to_mode (XEXP (x, 0), mode, mask, next_select);

      /* If this is a shift by a constant, get a mask that contains those bits
	 that are not copies of the sign bit.  We then have two cases:  If
	 MASK only includes those bits, this can be a logical shift, which may
	 allow simplifications.  If MASK is a single-bit field not within
	 those bits, we are requesting a copy of the sign bit and hence can
	 shift the sign bit to the appropriate location.  */

      if (GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) >= 0
	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
	{
	  int i;

	  /* If the considered data is wider than HOST_WIDE_INT, we can't
	     represent a mask for all its bits in a single scalar.
	     But we only care about the lower bits, so calculate these.  */

	  if (GET_MODE_BITSIZE (GET_MODE (x)) > HOST_BITS_PER_WIDE_INT)
	    {
	      nonzero = ~(HOST_WIDE_INT) 0;

	      /* GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
		 is the number of bits a full-width mask would have set.
		 We need only shift if these are fewer than nonzero can
		 hold.  If not, we must keep all bits set in nonzero.  */

	      if (GET_MODE_BITSIZE (GET_MODE (x)) - INTVAL (XEXP (x, 1))
		  < HOST_BITS_PER_WIDE_INT)
		nonzero >>= INTVAL (XEXP (x, 1))
			    + HOST_BITS_PER_WIDE_INT
			    - GET_MODE_BITSIZE (GET_MODE (x)) ;
	    }
	  else
	    {
	      nonzero = GET_MODE_MASK (GET_MODE (x));
	      nonzero >>= INTVAL (XEXP (x, 1));
	    }

	  if ((mask & ~nonzero) == 0)
	    {
	      x = simplify_shift_const (NULL_RTX, LSHIFTRT, GET_MODE (x),
					XEXP (x, 0), INTVAL (XEXP (x, 1)));
	      if (GET_CODE (x) != ASHIFTRT)
		return force_to_mode (x, mode, mask, next_select);
	    }

	  else if ((i = exact_log2 (mask)) >= 0)
	    {
	      x = simplify_shift_const
		  (NULL_RTX, LSHIFTRT, GET_MODE (x), XEXP (x, 0),
		   GET_MODE_BITSIZE (GET_MODE (x)) - 1 - i);

	      if (GET_CODE (x) != ASHIFTRT)
		return force_to_mode (x, mode, mask, next_select);
	    }
	}

      /* If MASK is 1, convert this to an LSHIFTRT.  This can be done
	 even if the shift count isn't a constant.  */
      if (mask == 1)
	x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
				 XEXP (x, 0), XEXP (x, 1));

    shiftrt:

      /* If this is a zero- or sign-extension operation that just affects bits
	 we don't care about, remove it.  Be sure the call above returned
	 something that is still a shift.  */

      if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
	  && GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) >= 0
	  && (INTVAL (XEXP (x, 1))
	      <= GET_MODE_BITSIZE (GET_MODE (x)) - (floor_log2 (mask) + 1))
	  && GET_CODE (XEXP (x, 0)) == ASHIFT
	  && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
	return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
			      next_select);

      break;

    case ROTATE:
    case ROTATERT:
      /* If the shift count is constant and we can do computations
	 in the mode of X, compute where the bits we care about are.
	 Otherwise, we can't do anything.  Don't change the mode of
	 the shift or propagate MODE into the shift, though.  */
      if (GET_CODE (XEXP (x, 1)) == CONST_INT
	  && INTVAL (XEXP (x, 1)) >= 0)
	{
	  temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
					    GET_MODE (x), GEN_INT (mask),
					    XEXP (x, 1));
	  if (temp && GET_CODE (temp) == CONST_INT)
	    SUBST (XEXP (x, 0),
		   force_to_mode (XEXP (x, 0), GET_MODE (x),
				  INTVAL (temp), next_select));
	}
      break;

    case NEG:
      /* If we just want the low-order bit, the NEG isn't needed since it
	 won't change the low-order bit.  */
      if (mask == 1)
	return force_to_mode (XEXP (x, 0), mode, mask, just_select);

      /* We need any bits less significant than the most significant bit in
	 MASK since carries from those bits will affect the bits we are
	 interested in.  */
      mask = fuller_mask;
      goto unop;

    case NOT:
      /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
	 same as the XOR case above.  Ensure that the constant we form is not
	 wider than the mode of X.  */

      if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
	  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
	  && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
	      < GET_MODE_BITSIZE (GET_MODE (x)))
	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
	{
	  temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)),
			       GET_MODE (x));
	  temp = simplify_gen_binary (XOR, GET_MODE (x),
				      XEXP (XEXP (x, 0), 0), temp);
	  x = simplify_gen_binary (LSHIFTRT, GET_MODE (x),
				   temp, XEXP (XEXP (x, 0), 1));

	  return force_to_mode (x, mode, mask, next_select);
	}

      /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
	 use the full mask inside the NOT.  */
      mask = fuller_mask;

    unop:
      op0 = gen_lowpart_or_truncate (op_mode,
				     force_to_mode (XEXP (x, 0), mode, mask,
						    next_select));
      if (op_mode != GET_MODE (x) || op0 != XEXP (x, 0))
	x = simplify_gen_unary (code, op_mode, op0, op_mode);
      break;

    case NE:
      /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
	 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
	 which is equal to STORE_FLAG_VALUE.  */
      if ((mask & ~STORE_FLAG_VALUE) == 0 && XEXP (x, 1) == const0_rtx
	  && GET_MODE (XEXP (x, 0)) == mode
	  && exact_log2 (nonzero_bits (XEXP (x, 0), mode)) >= 0
	  && (nonzero_bits (XEXP (x, 0), mode)
	      == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
	return force_to_mode (XEXP (x, 0), mode, mask, next_select);

      break;

    case IF_THEN_ELSE:
      /* We have no way of knowing if the IF_THEN_ELSE can itself be
	 written in a narrower mode.  We play it safe and do not do so.  */

      SUBST (XEXP (x, 1),
	     gen_lowpart_or_truncate (GET_MODE (x),
				      force_to_mode (XEXP (x, 1), mode,
						     mask, next_select)));
      SUBST (XEXP (x, 2),
	     gen_lowpart_or_truncate (GET_MODE (x),
				      force_to_mode (XEXP (x, 2), mode,
						     mask, next_select)));
      break;

    default:
      break;
    }

  /* Ensure we return a value of the proper mode.  */
  return gen_lowpart_or_truncate (mode, x);
}

/* Return nonzero if X is an expression that has one of two values depending on
   whether some other value is zero or nonzero.  In that case, we return the
   value that is being tested, *PTRUE is set to the value if the rtx being
   returned has a nonzero value, and *PFALSE is set to the other alternative.

   If we return zero, we set *PTRUE and *PFALSE to X.  */

static rtx
if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
{
  enum machine_mode mode = GET_MODE (x);
  enum rtx_code code = GET_CODE (x);
  rtx cond0, cond1, true0, true1, false0, false1;
  unsigned HOST_WIDE_INT nz;

  /* If we are comparing a value against zero, we are done.  */
  if ((code == NE || code == EQ)
      && XEXP (x, 1) == const0_rtx)
    {
      *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
      *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
      return XEXP (x, 0);
    }

  /* If this is a unary operation whose operand has one of two values, apply
     our opcode to compute those values.  */
  else if (UNARY_P (x)
	   && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
    {
      *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
      *pfalse = simplify_gen_unary (code, mode, false0,
				    GET_MODE (XEXP (x, 0)));
      return cond0;
    }

  /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
     make can't possibly match and would suppress other optimizations.  */
  else if (code == COMPARE)
    ;

  /* If this is a binary operation, see if either side has only one of two
     values.  If either one does or if both do and they are conditional on
     the same value, compute the new true and false values.  */
  else if (BINARY_P (x))
    {
      cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0);
      cond1 = if_then_else_cond (XEXP (x, 1), &true1, &false1);

      if ((cond0 != 0 || cond1 != 0)
	  && ! (cond0 != 0 && cond1 != 0 && ! rtx_equal_p (cond0, cond1)))
	{
	  /* If if_then_else_cond returned zero, then true/false are the
	     same rtl.  We must copy one of them to prevent invalid rtl
	     sharing.  */
	  if (cond0 == 0)
	    true0 = copy_rtx (true0);
	  else if (cond1 == 0)
	    true1 = copy_rtx (true1);

	  if (COMPARISON_P (x))
	    {
	      *ptrue = simplify_gen_relational (code, mode, VOIDmode,
						true0, true1);
	      *pfalse = simplify_gen_relational (code, mode, VOIDmode,
						 false0, false1);
	     }
	  else
	    {
	      *ptrue = simplify_gen_binary (code, mode, true0, true1);
	      *pfalse = simplify_gen_binary (code, mode, false0, false1);
	    }

	  return cond0 ? cond0 : cond1;
	}

      /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
	 operands is zero when the other is nonzero, and vice-versa,
	 and STORE_FLAG_VALUE is 1 or -1.  */

      if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
	  && (code == PLUS || code == IOR || code == XOR || code == MINUS
	      || code == UMAX)
	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
	{
	  rtx op0 = XEXP (XEXP (x, 0), 1);
	  rtx op1 = XEXP (XEXP (x, 1), 1);

	  cond0 = XEXP (XEXP (x, 0), 0);
	  cond1 = XEXP (XEXP (x, 1), 0);

	  if (COMPARISON_P (cond0)
	      && COMPARISON_P (cond1)
	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
		  || ((swap_condition (GET_CODE (cond0))
		       == reversed_comparison_code (cond1, NULL))
		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
	      && ! side_effects_p (x))
	    {
	      *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
	      *pfalse = simplify_gen_binary (MULT, mode,
					     (code == MINUS
					      ? simplify_gen_unary (NEG, mode,
								    op1, mode)
					      : op1),
					      const_true_rtx);
	      return cond0;
	    }
	}

      /* Similarly for MULT, AND and UMIN, except that for these the result
	 is always zero.  */
      if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
	  && (code == MULT || code == AND || code == UMIN)
	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
	{
	  cond0 = XEXP (XEXP (x, 0), 0);
	  cond1 = XEXP (XEXP (x, 1), 0);

	  if (COMPARISON_P (cond0)
	      && COMPARISON_P (cond1)
	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
		  || ((swap_condition (GET_CODE (cond0))
		       == reversed_comparison_code (cond1, NULL))
		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
	      && ! side_effects_p (x))
	    {
	      *ptrue = *pfalse = const0_rtx;
	      return cond0;
	    }
	}
    }

  else if (code == IF_THEN_ELSE)
    {
      /* If we have IF_THEN_ELSE already, extract the condition and
	 canonicalize it if it is NE or EQ.  */
      cond0 = XEXP (x, 0);
      *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
      if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
	return XEXP (cond0, 0);
      else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
	{
	  *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
	  return XEXP (cond0, 0);
	}
      else
	return cond0;
    }

  /* If X is a SUBREG, we can narrow both the true and false values
     if the inner expression, if there is a condition.  */
  else if (code == SUBREG
	   && 0 != (cond0 = if_then_else_cond (SUBREG_REG (x),
					       &true0, &false0)))
    {
      true0 = simplify_gen_subreg (mode, true0,
				   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
      false0 = simplify_gen_subreg (mode, false0,
				    GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
      if (true0 && false0)
	{
	  *ptrue = true0;
	  *pfalse = false0;
	  return cond0;
	}
    }

  /* If X is a constant, this isn't special and will cause confusions
     if we treat it as such.  Likewise if it is equivalent to a constant.  */
  else if (CONSTANT_P (x)
	   || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
    ;

  /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
     will be least confusing to the rest of the compiler.  */
  else if (mode == BImode)
    {
      *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
      return x;
    }

  /* If X is known to be either 0 or -1, those are the true and
     false values when testing X.  */
  else if (x == constm1_rtx || x == const0_rtx
	   || (mode != VOIDmode
	       && num_sign_bit_copies (x, mode) == GET_MODE_BITSIZE (mode)))
    {
      *ptrue = constm1_rtx, *pfalse = const0_rtx;
      return x;
    }

  /* Likewise for 0 or a single bit.  */
  else if (SCALAR_INT_MODE_P (mode)
	   && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	   && exact_log2 (nz = nonzero_bits (x, mode)) >= 0)
    {
      *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
      return x;
    }

  /* Otherwise fail; show no condition with true and false values the same.  */
  *ptrue = *pfalse = x;
  return 0;
}

/* Return the value of expression X given the fact that condition COND
   is known to be true when applied to REG as its first operand and VAL
   as its second.  X is known to not be shared and so can be modified in
   place.

   We only handle the simplest cases, and specifically those cases that
   arise with IF_THEN_ELSE expressions.  */

static rtx
known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
{
  enum rtx_code code = GET_CODE (x);
  rtx temp;
  const char *fmt;
  int i, j;

  if (side_effects_p (x))
    return x;

  /* If either operand of the condition is a floating point value,
     then we have to avoid collapsing an EQ comparison.  */
  if (cond == EQ
      && rtx_equal_p (x, reg)
      && ! FLOAT_MODE_P (GET_MODE (x))
      && ! FLOAT_MODE_P (GET_MODE (val)))
    return val;

  if (cond == UNEQ && rtx_equal_p (x, reg))
    return val;

  /* If X is (abs REG) and we know something about REG's relationship
     with zero, we may be able to simplify this.  */

  if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
    switch (cond)
      {
      case GE:  case GT:  case EQ:
	return XEXP (x, 0);
      case LT:  case LE:
	return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
				   XEXP (x, 0),
				   GET_MODE (XEXP (x, 0)));
      default:
	break;
      }

  /* The only other cases we handle are MIN, MAX, and comparisons if the
     operands are the same as REG and VAL.  */

  else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
    {
      if (rtx_equal_p (XEXP (x, 0), val))
	cond = swap_condition (cond), temp = val, val = reg, reg = temp;

      if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
	{
	  if (COMPARISON_P (x))
	    {
	      if (comparison_dominates_p (cond, code))
		return const_true_rtx;

	      code = reversed_comparison_code (x, NULL);
	      if (code != UNKNOWN
		  && comparison_dominates_p (cond, code))
		return const0_rtx;
	      else
		return x;
	    }
	  else if (code == SMAX || code == SMIN
		   || code == UMIN || code == UMAX)
	    {
	      int unsignedp = (code == UMIN || code == UMAX);

	      /* Do not reverse the condition when it is NE or EQ.
		 This is because we cannot conclude anything about
		 the value of 'SMAX (x, y)' when x is not equal to y,
		 but we can when x equals y.  */
	      if ((code == SMAX || code == UMAX)
		  && ! (cond == EQ || cond == NE))
		cond = reverse_condition (cond);

	      switch (cond)
		{
		case GE:   case GT:
		  return unsignedp ? x : XEXP (x, 1);
		case LE:   case LT:
		  return unsignedp ? x : XEXP (x, 0);
		case GEU:  case GTU:
		  return unsignedp ? XEXP (x, 1) : x;
		case LEU:  case LTU:
		  return unsignedp ? XEXP (x, 0) : x;
		default:
		  break;
		}
	    }
	}
    }
  else if (code == SUBREG)
    {
      enum machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
      rtx new, r = known_cond (SUBREG_REG (x), cond, reg, val);

      if (SUBREG_REG (x) != r)
	{
	  /* We must simplify subreg here, before we lose track of the
	     original inner_mode.  */
	  new = simplify_subreg (GET_MODE (x), r,
				 inner_mode, SUBREG_BYTE (x));
	  if (new)
	    return new;
	  else
	    SUBST (SUBREG_REG (x), r);
	}

      return x;
    }
  /* We don't have to handle SIGN_EXTEND here, because even in the
     case of replacing something with a modeless CONST_INT, a
     CONST_INT is already (supposed to be) a valid sign extension for
     its narrower mode, which implies it's already properly
     sign-extended for the wider mode.  Now, for ZERO_EXTEND, the
     story is different.  */
  else if (code == ZERO_EXTEND)
    {
      enum machine_mode inner_mode = GET_MODE (XEXP (x, 0));
      rtx new, r = known_cond (XEXP (x, 0), cond, reg, val);

      if (XEXP (x, 0) != r)
	{
	  /* We must simplify the zero_extend here, before we lose
	     track of the original inner_mode.  */
	  new = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
					  r, inner_mode);
	  if (new)
	    return new;
	  else
	    SUBST (XEXP (x, 0), r);
	}

      return x;
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      if (fmt[i] == 'e')
	SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
      else if (fmt[i] == 'E')
	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
	  SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
						cond, reg, val));
    }

  return x;
}

/* See if X and Y are equal for the purposes of seeing if we can rewrite an
   assignment as a field assignment.  */

static int
rtx_equal_for_field_assignment_p (rtx x, rtx y)
{
  if (x == y || rtx_equal_p (x, y))
    return 1;

  if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
    return 0;

  /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
     Note that all SUBREGs of MEM are paradoxical; otherwise they
     would have been rewritten.  */
  if (MEM_P (x) && GET_CODE (y) == SUBREG
      && MEM_P (SUBREG_REG (y))
      && rtx_equal_p (SUBREG_REG (y),
		      gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
    return 1;

  if (MEM_P (y) && GET_CODE (x) == SUBREG
      && MEM_P (SUBREG_REG (x))
      && rtx_equal_p (SUBREG_REG (x),
		      gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
    return 1;

  /* We used to see if get_last_value of X and Y were the same but that's
     not correct.  In one direction, we'll cause the assignment to have
     the wrong destination and in the case, we'll import a register into this
     insn that might have already have been dead.   So fail if none of the
     above cases are true.  */
  return 0;
}

/* See if X, a SET operation, can be rewritten as a bit-field assignment.
   Return that assignment if so.

   We only handle the most common cases.  */

static rtx
make_field_assignment (rtx x)
{
  rtx dest = SET_DEST (x);
  rtx src = SET_SRC (x);
  rtx assign;
  rtx rhs, lhs;
  HOST_WIDE_INT c1;
  HOST_WIDE_INT pos;
  unsigned HOST_WIDE_INT len;
  rtx other;
  enum machine_mode mode;

  /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
     a clear of a one-bit field.  We will have changed it to
     (and (rotate (const_int -2) POS) DEST), so check for that.  Also check
     for a SUBREG.  */

  if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
      && GET_CODE (XEXP (XEXP (src, 0), 0)) == CONST_INT
      && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
      && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
    {
      assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
				1, 1, 1, 0);
      if (assign != 0)
	return gen_rtx_SET (VOIDmode, assign, const0_rtx);
      return x;
    }

  if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
      && subreg_lowpart_p (XEXP (src, 0))
      && (GET_MODE_SIZE (GET_MODE (XEXP (src, 0)))
	  < GET_MODE_SIZE (GET_MODE (SUBREG_REG (XEXP (src, 0)))))
      && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
      && GET_CODE (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == CONST_INT
      && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
      && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
    {
      assign = make_extraction (VOIDmode, dest, 0,
				XEXP (SUBREG_REG (XEXP (src, 0)), 1),
				1, 1, 1, 0);
      if (assign != 0)
	return gen_rtx_SET (VOIDmode, assign, const0_rtx);
      return x;
    }

  /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
     one-bit field.  */
  if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
      && XEXP (XEXP (src, 0), 0) == const1_rtx
      && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
    {
      assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
				1, 1, 1, 0);
      if (assign != 0)
	return gen_rtx_SET (VOIDmode, assign, const1_rtx);
      return x;
    }

  /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
     SRC is an AND with all bits of that field set, then we can discard
     the AND.  */
  if (GET_CODE (dest) == ZERO_EXTRACT
      && GET_CODE (XEXP (dest, 1)) == CONST_INT
      && GET_CODE (src) == AND
      && GET_CODE (XEXP (src, 1)) == CONST_INT)
    {
      HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
      unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
      unsigned HOST_WIDE_INT ze_mask;

      if (width >= HOST_BITS_PER_WIDE_INT)
	ze_mask = -1;
      else
	ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;

      /* Complete overlap.  We can remove the source AND.  */
      if ((and_mask & ze_mask) == ze_mask)
	return gen_rtx_SET (VOIDmode, dest, XEXP (src, 0));

      /* Partial overlap.  We can reduce the source AND.  */
      if ((and_mask & ze_mask) != and_mask)
	{
	  mode = GET_MODE (src);
	  src = gen_rtx_AND (mode, XEXP (src, 0),
			     gen_int_mode (and_mask & ze_mask, mode));
	  return gen_rtx_SET (VOIDmode, dest, src);
	}
    }

  /* The other case we handle is assignments into a constant-position
     field.  They look like (ior/xor (and DEST C1) OTHER).  If C1 represents
     a mask that has all one bits except for a group of zero bits and
     OTHER is known to have zeros where C1 has ones, this is such an
     assignment.  Compute the position and length from C1.  Shift OTHER
     to the appropriate position, force it to the required mode, and
     make the extraction.  Check for the AND in both operands.  */

  if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
    return x;

  rhs = expand_compound_operation (XEXP (src, 0));
  lhs = expand_compound_operation (XEXP (src, 1));

  if (GET_CODE (rhs) == AND
      && GET_CODE (XEXP (rhs, 1)) == CONST_INT
      && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
    c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
  else if (GET_CODE (lhs) == AND
	   && GET_CODE (XEXP (lhs, 1)) == CONST_INT
	   && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
    c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
  else
    return x;

  pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (GET_MODE (dest)), &len);
  if (pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (dest))
      || GET_MODE_BITSIZE (GET_MODE (dest)) > HOST_BITS_PER_WIDE_INT
      || (c1 & nonzero_bits (other, GET_MODE (dest))) != 0)
    return x;

  assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
  if (assign == 0)
    return x;

  /* The mode to use for the source is the mode of the assignment, or of
     what is inside a possible STRICT_LOW_PART.  */
  mode = (GET_CODE (assign) == STRICT_LOW_PART
	  ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));

  /* Shift OTHER right POS places and make it the source, restricting it
     to the proper length and mode.  */

  src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
						     GET_MODE (src),
						     other, pos),
			       dest);
  src = force_to_mode (src, mode,
		       GET_MODE_BITSIZE (mode) >= HOST_BITS_PER_WIDE_INT
		       ? ~(unsigned HOST_WIDE_INT) 0
		       : ((unsigned HOST_WIDE_INT) 1 << len) - 1,
		       0);

  /* If SRC is masked by an AND that does not make a difference in
     the value being stored, strip it.  */
  if (GET_CODE (assign) == ZERO_EXTRACT
      && GET_CODE (XEXP (assign, 1)) == CONST_INT
      && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
      && GET_CODE (src) == AND
      && GET_CODE (XEXP (src, 1)) == CONST_INT
      && ((unsigned HOST_WIDE_INT) INTVAL (XEXP (src, 1))
	  == ((unsigned HOST_WIDE_INT) 1 << INTVAL (XEXP (assign, 1))) - 1))
    src = XEXP (src, 0);

  return gen_rtx_SET (VOIDmode, assign, src);
}

/* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
   if so.  */

static rtx
apply_distributive_law (rtx x)
{
  enum rtx_code code = GET_CODE (x);
  enum rtx_code inner_code;
  rtx lhs, rhs, other;
  rtx tem;

  /* Distributivity is not true for floating point as it can change the
     value.  So we don't do it unless -funsafe-math-optimizations.  */
  if (FLOAT_MODE_P (GET_MODE (x))
      && ! flag_unsafe_math_optimizations)
    return x;

  /* The outer operation can only be one of the following:  */
  if (code != IOR && code != AND && code != XOR
      && code != PLUS && code != MINUS)
    return x;

  lhs = XEXP (x, 0);
  rhs = XEXP (x, 1);

  /* If either operand is a primitive we can't do anything, so get out
     fast.  */
  if (OBJECT_P (lhs) || OBJECT_P (rhs))
    return x;

  lhs = expand_compound_operation (lhs);
  rhs = expand_compound_operation (rhs);
  inner_code = GET_CODE (lhs);
  if (inner_code != GET_CODE (rhs))
    return x;

  /* See if the inner and outer operations distribute.  */
  switch (inner_code)
    {
    case LSHIFTRT:
    case ASHIFTRT:
    case AND:
    case IOR:
      /* These all distribute except over PLUS.  */
      if (code == PLUS || code == MINUS)
	return x;
      break;

    case MULT:
      if (code != PLUS && code != MINUS)
	return x;
      break;

    case ASHIFT:
      /* This is also a multiply, so it distributes over everything.  */
      break;

    case SUBREG:
      /* Non-paradoxical SUBREGs distributes over all operations,
	 provided the inner modes and byte offsets are the same, this
	 is an extraction of a low-order part, we don't convert an fp
	 operation to int or vice versa, this is not a vector mode,
	 and we would not be converting a single-word operation into a
	 multi-word operation.  The latter test is not required, but
	 it prevents generating unneeded multi-word operations.  Some
	 of the previous tests are redundant given the latter test,
	 but are retained because they are required for correctness.

	 We produce the result slightly differently in this case.  */

      if (GET_MODE (SUBREG_REG (lhs)) != GET_MODE (SUBREG_REG (rhs))
	  || SUBREG_BYTE (lhs) != SUBREG_BYTE (rhs)
	  || ! subreg_lowpart_p (lhs)
	  || (GET_MODE_CLASS (GET_MODE (lhs))
	      != GET_MODE_CLASS (GET_MODE (SUBREG_REG (lhs))))
	  || (GET_MODE_SIZE (GET_MODE (lhs))
	      > GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))))
	  || VECTOR_MODE_P (GET_MODE (lhs))
	  || GET_MODE_SIZE (GET_MODE (SUBREG_REG (lhs))) > UNITS_PER_WORD
	  /* Result might need to be truncated.  Don't change mode if
	     explicit truncation is needed.  */
	  || !TRULY_NOOP_TRUNCATION
	       (GET_MODE_BITSIZE (GET_MODE (x)),
		GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (lhs)))))
	return x;

      tem = simplify_gen_binary (code, GET_MODE (SUBREG_REG (lhs)),
				 SUBREG_REG (lhs), SUBREG_REG (rhs));
      return gen_lowpart (GET_MODE (x), tem);

    default:
      return x;
    }

  /* Set LHS and RHS to the inner operands (A and B in the example
     above) and set OTHER to the common operand (C in the example).
     There is only one way to do this unless the inner operation is
     commutative.  */
  if (COMMUTATIVE_ARITH_P (lhs)
      && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
    other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
  else if (COMMUTATIVE_ARITH_P (lhs)
	   && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
    other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
  else if (COMMUTATIVE_ARITH_P (lhs)
	   && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
    other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
  else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
    other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
  else
    return x;

  /* Form the new inner operation, seeing if it simplifies first.  */
  tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);

  /* There is one exception to the general way of distributing:
     (a | c) ^ (b | c) -> (a ^ b) & ~c  */
  if (code == XOR && inner_code == IOR)
    {
      inner_code = AND;
      other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
    }

  /* We may be able to continuing distributing the result, so call
     ourselves recursively on the inner operation before forming the
     outer operation, which we return.  */
  return simplify_gen_binary (inner_code, GET_MODE (x),
			      apply_distributive_law (tem), other);
}

/* See if X is of the form (* (+ A B) C), and if so convert to
   (+ (* A C) (* B C)) and try to simplify.

   Most of the time, this results in no change.  However, if some of
   the operands are the same or inverses of each other, simplifications
   will result.

   For example, (and (ior A B) (not B)) can occur as the result of
   expanding a bit field assignment.  When we apply the distributive
   law to this, we get (ior (and (A (not B))) (and (B (not B)))),
   which then simplifies to (and (A (not B))).

   Note that no checks happen on the validity of applying the inverse
   distributive law.  This is pointless since we can do it in the
   few places where this routine is called.

   N is the index of the term that is decomposed (the arithmetic operation,
   i.e. (+ A B) in the first example above).  !N is the index of the term that
   is distributed, i.e. of C in the first example above.  */
static rtx
distribute_and_simplify_rtx (rtx x, int n)
{
  enum machine_mode mode;
  enum rtx_code outer_code, inner_code;
  rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;

  decomposed = XEXP (x, n);
  if (!ARITHMETIC_P (decomposed))
    return NULL_RTX;

  mode = GET_MODE (x);
  outer_code = GET_CODE (x);
  distributed = XEXP (x, !n);

  inner_code = GET_CODE (decomposed);
  inner_op0 = XEXP (decomposed, 0);
  inner_op1 = XEXP (decomposed, 1);

  /* Special case (and (xor B C) (not A)), which is equivalent to
     (xor (ior A B) (ior A C))  */
  if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
    {
      distributed = XEXP (distributed, 0);
      outer_code = IOR;
    }

  if (n == 0)
    {
      /* Distribute the second term.  */
      new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
      new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
    }
  else
    {
      /* Distribute the first term.  */
      new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
      new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
    }

  tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
						     new_op0, new_op1));
  if (GET_CODE (tmp) != outer_code
      && rtx_cost (tmp, SET) < rtx_cost (x, SET))
    return tmp;

  return NULL_RTX;
}

/* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
   in MODE.  Return an equivalent form, if different from (and VAROP
   (const_int CONSTOP)).  Otherwise, return NULL_RTX.  */

static rtx
simplify_and_const_int_1 (enum machine_mode mode, rtx varop,
			  unsigned HOST_WIDE_INT constop)
{
  unsigned HOST_WIDE_INT nonzero;
  unsigned HOST_WIDE_INT orig_constop;
  rtx orig_varop;
  int i;

  orig_varop = varop;
  orig_constop = constop;
  if (GET_CODE (varop) == CLOBBER)
    return NULL_RTX;

  /* Simplify VAROP knowing that we will be only looking at some of the
     bits in it.

     Note by passing in CONSTOP, we guarantee that the bits not set in
     CONSTOP are not significant and will never be examined.  We must
     ensure that is the case by explicitly masking out those bits
     before returning.  */
  varop = force_to_mode (varop, mode, constop, 0);

  /* If VAROP is a CLOBBER, we will fail so return it.  */
  if (GET_CODE (varop) == CLOBBER)
    return varop;

  /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
     to VAROP and return the new constant.  */
  if (GET_CODE (varop) == CONST_INT)
    return gen_int_mode (INTVAL (varop) & constop, mode);

  /* See what bits may be nonzero in VAROP.  Unlike the general case of
     a call to nonzero_bits, here we don't care about bits outside
     MODE.  */

  nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);

  /* Turn off all bits in the constant that are known to already be zero.
     Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
     which is tested below.  */

  constop &= nonzero;

  /* If we don't have any bits left, return zero.  */
  if (constop == 0)
    return const0_rtx;

  /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
     a power of two, we can replace this with an ASHIFT.  */
  if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
      && (i = exact_log2 (constop)) >= 0)
    return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);

  /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
     or XOR, then try to apply the distributive law.  This may eliminate
     operations if either branch can be simplified because of the AND.
     It may also make some cases more complex, but those cases probably
     won't match a pattern either with or without this.  */

  if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
    return
      gen_lowpart
	(mode,
	 apply_distributive_law
	 (simplify_gen_binary (GET_CODE (varop), GET_MODE (varop),
			       simplify_and_const_int (NULL_RTX,
						       GET_MODE (varop),
						       XEXP (varop, 0),
						       constop),
			       simplify_and_const_int (NULL_RTX,
						       GET_MODE (varop),
						       XEXP (varop, 1),
						       constop))));

  /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
     the AND and see if one of the operands simplifies to zero.  If so, we
     may eliminate it.  */

  if (GET_CODE (varop) == PLUS
      && exact_log2 (constop + 1) >= 0)
    {
      rtx o0, o1;

      o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
      o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
      if (o0 == const0_rtx)
	return o1;
      if (o1 == const0_rtx)
	return o0;
    }

  /* Make a SUBREG if necessary.  If we can't make it, fail.  */
  varop = gen_lowpart (mode, varop);
  if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
    return NULL_RTX;

  /* If we are only masking insignificant bits, return VAROP.  */
  if (constop == nonzero)
    return varop;

  if (varop == orig_varop && constop == orig_constop)
    return NULL_RTX;

  /* Otherwise, return an AND.  */
  return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
}


/* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
   in MODE.

   Return an equivalent form, if different from X.  Otherwise, return X.  If
   X is zero, we are to always construct the equivalent form.  */

static rtx
simplify_and_const_int (rtx x, enum machine_mode mode, rtx varop,
			unsigned HOST_WIDE_INT constop)
{
  rtx tem = simplify_and_const_int_1 (mode, varop, constop);
  if (tem)
    return tem;

  if (!x)
    x = simplify_gen_binary (AND, GET_MODE (varop), varop,
			     gen_int_mode (constop, mode));
  if (GET_MODE (x) != mode)
    x = gen_lowpart (mode, x);
  return x;
}

/* Given a REG, X, compute which bits in X can be nonzero.
   We don't care about bits outside of those defined in MODE.

   For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
   a shift, AND, or zero_extract, we can do better.  */

static rtx
reg_nonzero_bits_for_combine (const_rtx x, enum machine_mode mode,
			      const_rtx known_x ATTRIBUTE_UNUSED,
			      enum machine_mode known_mode ATTRIBUTE_UNUSED,
			      unsigned HOST_WIDE_INT known_ret ATTRIBUTE_UNUSED,
			      unsigned HOST_WIDE_INT *nonzero)
{
  rtx tem;
  reg_stat_type *rsp;

  /* If X is a register whose nonzero bits value is current, use it.
     Otherwise, if X is a register whose value we can find, use that
     value.  Otherwise, use the previously-computed global nonzero bits
     for this register.  */

  rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
  if (rsp->last_set_value != 0
      && (rsp->last_set_mode == mode
	  || (GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
	      && GET_MODE_CLASS (mode) == MODE_INT))
      && ((rsp->last_set_label >= label_tick_ebb_start
	   && rsp->last_set_label < label_tick)
	  || (rsp->last_set_label == label_tick
              && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
	      && REG_N_SETS (REGNO (x)) == 1
	      && !REGNO_REG_SET_P
	          (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
    {
      *nonzero &= rsp->last_set_nonzero_bits;
      return NULL;
    }

  tem = get_last_value (x);

  if (tem)
    {
#ifdef SHORT_IMMEDIATES_SIGN_EXTEND
      /* If X is narrower than MODE and TEM is a non-negative
	 constant that would appear negative in the mode of X,
	 sign-extend it for use in reg_nonzero_bits because some
	 machines (maybe most) will actually do the sign-extension
	 and this is the conservative approach.

	 ??? For 2.5, try to tighten up the MD files in this regard
	 instead of this kludge.  */

      if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode)
	  && GET_CODE (tem) == CONST_INT
	  && INTVAL (tem) > 0
	  && 0 != (INTVAL (tem)
		   & ((HOST_WIDE_INT) 1
		      << (GET_MODE_BITSIZE (GET_MODE (x)) - 1))))
	tem = GEN_INT (INTVAL (tem)
		       | ((HOST_WIDE_INT) (-1)
			  << GET_MODE_BITSIZE (GET_MODE (x))));
#endif
      return tem;
    }
  else if (nonzero_sign_valid && rsp->nonzero_bits)
    {
      unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;

      if (GET_MODE_BITSIZE (GET_MODE (x)) < GET_MODE_BITSIZE (mode))
	/* We don't know anything about the upper bits.  */
	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (GET_MODE (x));
      *nonzero &= mask;
    }

  return NULL;
}

/* Return the number of bits at the high-order end of X that are known to
   be equal to the sign bit.  X will be used in mode MODE; if MODE is
   VOIDmode, X will be used in its own mode.  The returned value  will always
   be between 1 and the number of bits in MODE.  */

static rtx
reg_num_sign_bit_copies_for_combine (const_rtx x, enum machine_mode mode,
				     const_rtx known_x ATTRIBUTE_UNUSED,
				     enum machine_mode known_mode
				     ATTRIBUTE_UNUSED,
				     unsigned int known_ret ATTRIBUTE_UNUSED,
				     unsigned int *result)
{
  rtx tem;
  reg_stat_type *rsp;

  rsp = VEC_index (reg_stat_type, reg_stat, REGNO (x));
  if (rsp->last_set_value != 0
      && rsp->last_set_mode == mode
      && ((rsp->last_set_label >= label_tick_ebb_start
	   && rsp->last_set_label < label_tick)
	  || (rsp->last_set_label == label_tick
              && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
	      && REG_N_SETS (REGNO (x)) == 1
	      && !REGNO_REG_SET_P
	          (DF_LR_IN (ENTRY_BLOCK_PTR->next_bb), REGNO (x)))))
    {
      *result = rsp->last_set_sign_bit_copies;
      return NULL;
    }

  tem = get_last_value (x);
  if (tem != 0)
    return tem;

  if (nonzero_sign_valid && rsp->sign_bit_copies != 0
      && GET_MODE_BITSIZE (GET_MODE (x)) == GET_MODE_BITSIZE (mode))
    *result = rsp->sign_bit_copies;

  return NULL;
}

/* Return the number of "extended" bits there are in X, when interpreted
   as a quantity in MODE whose signedness is indicated by UNSIGNEDP.  For
   unsigned quantities, this is the number of high-order zero bits.
   For signed quantities, this is the number of copies of the sign bit
   minus 1.  In both case, this function returns the number of "spare"
   bits.  For example, if two quantities for which this function returns
   at least 1 are added, the addition is known not to overflow.

   This function will always return 0 unless called during combine, which
   implies that it must be called from a define_split.  */

unsigned int
extended_count (const_rtx x, enum machine_mode mode, int unsignedp)
{
  if (nonzero_sign_valid == 0)
    return 0;

  return (unsignedp
	  ? (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	     ? (unsigned int) (GET_MODE_BITSIZE (mode) - 1
			       - floor_log2 (nonzero_bits (x, mode)))
	     : 0)
	  : num_sign_bit_copies (x, mode) - 1);
}

/* This function is called from `simplify_shift_const' to merge two
   outer operations.  Specifically, we have already found that we need
   to perform operation *POP0 with constant *PCONST0 at the outermost
   position.  We would now like to also perform OP1 with constant CONST1
   (with *POP0 being done last).

   Return 1 if we can do the operation and update *POP0 and *PCONST0 with
   the resulting operation.  *PCOMP_P is set to 1 if we would need to
   complement the innermost operand, otherwise it is unchanged.

   MODE is the mode in which the operation will be done.  No bits outside
   the width of this mode matter.  It is assumed that the width of this mode
   is smaller than or equal to HOST_BITS_PER_WIDE_INT.

   If *POP0 or OP1 are UNKNOWN, it means no operation is required.  Only NEG, PLUS,
   IOR, XOR, and AND are supported.  We may set *POP0 to SET if the proper
   result is simply *PCONST0.

   If the resulting operation cannot be expressed as one operation, we
   return 0 and do not change *POP0, *PCONST0, and *PCOMP_P.  */

static int
merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, enum machine_mode mode, int *pcomp_p)
{
  enum rtx_code op0 = *pop0;
  HOST_WIDE_INT const0 = *pconst0;

  const0 &= GET_MODE_MASK (mode);
  const1 &= GET_MODE_MASK (mode);

  /* If OP0 is an AND, clear unimportant bits in CONST1.  */
  if (op0 == AND)
    const1 &= const0;

  /* If OP0 or OP1 is UNKNOWN, this is easy.  Similarly if they are the same or
     if OP0 is SET.  */

  if (op1 == UNKNOWN || op0 == SET)
    return 1;

  else if (op0 == UNKNOWN)
    op0 = op1, const0 = const1;

  else if (op0 == op1)
    {
      switch (op0)
	{
	case AND:
	  const0 &= const1;
	  break;
	case IOR:
	  const0 |= const1;
	  break;
	case XOR:
	  const0 ^= const1;
	  break;
	case PLUS:
	  const0 += const1;
	  break;
	case NEG:
	  op0 = UNKNOWN;
	  break;
	default:
	  break;
	}
    }

  /* Otherwise, if either is a PLUS or NEG, we can't do anything.  */
  else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
    return 0;

  /* If the two constants aren't the same, we can't do anything.  The
     remaining six cases can all be done.  */
  else if (const0 != const1)
    return 0;

  else
    switch (op0)
      {
      case IOR:
	if (op1 == AND)
	  /* (a & b) | b == b */
	  op0 = SET;
	else /* op1 == XOR */
	  /* (a ^ b) | b == a | b */
	  {;}
	break;

      case XOR:
	if (op1 == AND)
	  /* (a & b) ^ b == (~a) & b */
	  op0 = AND, *pcomp_p = 1;
	else /* op1 == IOR */
	  /* (a | b) ^ b == a & ~b */
	  op0 = AND, const0 = ~const0;
	break;

      case AND:
	if (op1 == IOR)
	  /* (a | b) & b == b */
	op0 = SET;
	else /* op1 == XOR */
	  /* (a ^ b) & b) == (~a) & b */
	  *pcomp_p = 1;
	break;
      default:
	break;
      }

  /* Check for NO-OP cases.  */
  const0 &= GET_MODE_MASK (mode);
  if (const0 == 0
      && (op0 == IOR || op0 == XOR || op0 == PLUS))
    op0 = UNKNOWN;
  else if (const0 == 0 && op0 == AND)
    op0 = SET;
  else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
	   && op0 == AND)
    op0 = UNKNOWN;

  /* ??? Slightly redundant with the above mask, but not entirely.
     Moving this above means we'd have to sign-extend the mode mask
     for the final test.  */
  const0 = trunc_int_for_mode (const0, mode);

  *pop0 = op0;
  *pconst0 = const0;

  return 1;
}

/* Simplify a shift of VAROP by COUNT bits.  CODE says what kind of shift.
   The result of the shift is RESULT_MODE.  Return NULL_RTX if we cannot
   simplify it.  Otherwise, return a simplified value.

   The shift is normally computed in the widest mode we find in VAROP, as
   long as it isn't a different number of words than RESULT_MODE.  Exceptions
   are ASHIFTRT and ROTATE, which are always done in their original mode.  */

static rtx
simplify_shift_const_1 (enum rtx_code code, enum machine_mode result_mode,
			rtx varop, int orig_count)
{
  enum rtx_code orig_code = code;
  rtx orig_varop = varop;
  int count;
  enum machine_mode mode = result_mode;
  enum machine_mode shift_mode, tmode;
  unsigned int mode_words
    = (GET_MODE_SIZE (mode) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD;
  /* We form (outer_op (code varop count) (outer_const)).  */
  enum rtx_code outer_op = UNKNOWN;
  HOST_WIDE_INT outer_const = 0;
  int complement_p = 0;
  rtx new, x;

  /* Make sure and truncate the "natural" shift on the way in.  We don't
     want to do this inside the loop as it makes it more difficult to
     combine shifts.  */
  if (SHIFT_COUNT_TRUNCATED)
    orig_count &= GET_MODE_BITSIZE (mode) - 1;

  /* If we were given an invalid count, don't do anything except exactly
     what was requested.  */

  if (orig_count < 0 || orig_count >= (int) GET_MODE_BITSIZE (mode))
    return NULL_RTX;

  count = orig_count;

  /* Unless one of the branches of the `if' in this loop does a `continue',
     we will `break' the loop after the `if'.  */

  while (count != 0)
    {
      /* If we have an operand of (clobber (const_int 0)), fail.  */
      if (GET_CODE (varop) == CLOBBER)
	return NULL_RTX;

      /* If we discovered we had to complement VAROP, leave.  Making a NOT
	 here would cause an infinite loop.  */
      if (complement_p)
	break;

      /* Convert ROTATERT to ROTATE.  */
      if (code == ROTATERT)
	{
	  unsigned int bitsize = GET_MODE_BITSIZE (result_mode);;
	  code = ROTATE;
	  if (VECTOR_MODE_P (result_mode))
	    count = bitsize / GET_MODE_NUNITS (result_mode) - count;
	  else
	    count = bitsize - count;
	}

      /* We need to determine what mode we will do the shift in.  If the
	 shift is a right shift or a ROTATE, we must always do it in the mode
	 it was originally done in.  Otherwise, we can do it in MODE, the
	 widest mode encountered.  */
      shift_mode
	= (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE
	   ? result_mode : mode);

      /* Handle cases where the count is greater than the size of the mode
	 minus 1.  For ASHIFT, use the size minus one as the count (this can
	 occur when simplifying (lshiftrt (ashiftrt ..))).  For rotates,
	 take the count modulo the size.  For other shifts, the result is
	 zero.

	 Since these shifts are being produced by the compiler by combining
	 multiple operations, each of which are defined, we know what the
	 result is supposed to be.  */

      if (count > (GET_MODE_BITSIZE (shift_mode) - 1))
	{
	  if (code == ASHIFTRT)
	    count = GET_MODE_BITSIZE (shift_mode) - 1;
	  else if (code == ROTATE || code == ROTATERT)
	    count %= GET_MODE_BITSIZE (shift_mode);
	  else
	    {
	      /* We can't simply return zero because there may be an
		 outer op.  */
	      varop = const0_rtx;
	      count = 0;
	      break;
	    }
	}

      /* An arithmetic right shift of a quantity known to be -1 or 0
	 is a no-op.  */
      if (code == ASHIFTRT
	  && (num_sign_bit_copies (varop, shift_mode)
	      == GET_MODE_BITSIZE (shift_mode)))
	{
	  count = 0;
	  break;
	}

      /* If we are doing an arithmetic right shift and discarding all but
	 the sign bit copies, this is equivalent to doing a shift by the
	 bitsize minus one.  Convert it into that shift because it will often
	 allow other simplifications.  */

      if (code == ASHIFTRT
	  && (count + num_sign_bit_copies (varop, shift_mode)
	      >= GET_MODE_BITSIZE (shift_mode)))
	count = GET_MODE_BITSIZE (shift_mode) - 1;

      /* We simplify the tests below and elsewhere by converting
	 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
	 `make_compound_operation' will convert it to an ASHIFTRT for
	 those machines (such as VAX) that don't have an LSHIFTRT.  */
      if (GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
	  && code == ASHIFTRT
	  && ((nonzero_bits (varop, shift_mode)
	       & ((HOST_WIDE_INT) 1 << (GET_MODE_BITSIZE (shift_mode) - 1)))
	      == 0))
	code = LSHIFTRT;

      if (((code == LSHIFTRT
	    && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
	    && !(nonzero_bits (varop, shift_mode) >> count))
	   || (code == ASHIFT
	       && GET_MODE_BITSIZE (shift_mode) <= HOST_BITS_PER_WIDE_INT
	       && !((nonzero_bits (varop, shift_mode) << count)
		    & GET_MODE_MASK (shift_mode))))
	  && !side_effects_p (varop))
	varop = const0_rtx;

      switch (GET_CODE (varop))
	{
	case SIGN_EXTEND:
	case ZERO_EXTEND:
	case SIGN_EXTRACT:
	case ZERO_EXTRACT:
	  new = expand_compound_operation (varop);
	  if (new != varop)
	    {
	      varop = new;
	      continue;
	    }
	  break;

	case MEM:
	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
	     minus the width of a smaller mode, we can do this with a
	     SIGN_EXTEND or ZERO_EXTEND from the narrower memory location.  */
	  if ((code == ASHIFTRT || code == LSHIFTRT)
	      && ! mode_dependent_address_p (XEXP (varop, 0))
	      && ! MEM_VOLATILE_P (varop)
	      && (tmode = mode_for_size (GET_MODE_BITSIZE (mode) - count,
					 MODE_INT, 1)) != BLKmode)
	    {
	      new = adjust_address_nv (varop, tmode,
				       BYTES_BIG_ENDIAN ? 0
				       : count / BITS_PER_UNIT);

	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
				     : ZERO_EXTEND, mode, new);
	      count = 0;
	      continue;
	    }
	  break;

	case SUBREG:
	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
	     the same number of words as what we've seen so far.  Then store
	     the widest mode in MODE.  */
	  if (subreg_lowpart_p (varop)
	      && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
		  > GET_MODE_SIZE (GET_MODE (varop)))
	      && (unsigned int) ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (varop)))
				  + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
		 == mode_words)
	    {
	      varop = SUBREG_REG (varop);
	      if (GET_MODE_SIZE (GET_MODE (varop)) > GET_MODE_SIZE (mode))
		mode = GET_MODE (varop);
	      continue;
	    }
	  break;

	case MULT:
	  /* Some machines use MULT instead of ASHIFT because MULT
	     is cheaper.  But it is still better on those machines to
	     merge two shifts into one.  */
	  if (GET_CODE (XEXP (varop, 1)) == CONST_INT
	      && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
	    {
	      varop
		= simplify_gen_binary (ASHIFT, GET_MODE (varop),
				       XEXP (varop, 0),
				       GEN_INT (exact_log2 (
						INTVAL (XEXP (varop, 1)))));
	      continue;
	    }
	  break;

	case UDIV:
	  /* Similar, for when divides are cheaper.  */
	  if (GET_CODE (XEXP (varop, 1)) == CONST_INT
	      && exact_log2 (INTVAL (XEXP (varop, 1))) >= 0)
	    {
	      varop
		= simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
				       XEXP (varop, 0),
				       GEN_INT (exact_log2 (
						INTVAL (XEXP (varop, 1)))));
	      continue;
	    }
	  break;

	case ASHIFTRT:
	  /* If we are extracting just the sign bit of an arithmetic
	     right shift, that shift is not needed.  However, the sign
	     bit of a wider mode may be different from what would be
	     interpreted as the sign bit in a narrower mode, so, if
	     the result is narrower, don't discard the shift.  */
	  if (code == LSHIFTRT
	      && count == (GET_MODE_BITSIZE (result_mode) - 1)
	      && (GET_MODE_BITSIZE (result_mode)
		  >= GET_MODE_BITSIZE (GET_MODE (varop))))
	    {
	      varop = XEXP (varop, 0);
	      continue;
	    }

	  /* ... fall through ...  */

	case LSHIFTRT:
	case ASHIFT:
	case ROTATE:
	  /* Here we have two nested shifts.  The result is usually the
	     AND of a new shift with a mask.  We compute the result below.  */
	  if (GET_CODE (XEXP (varop, 1)) == CONST_INT
	      && INTVAL (XEXP (varop, 1)) >= 0
	      && INTVAL (XEXP (varop, 1)) < GET_MODE_BITSIZE (GET_MODE (varop))
	      && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
	      && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
	      && !VECTOR_MODE_P (result_mode))
	    {
	      enum rtx_code first_code = GET_CODE (varop);
	      unsigned int first_count = INTVAL (XEXP (varop, 1));
	      unsigned HOST_WIDE_INT mask;
	      rtx mask_rtx;

	      /* We have one common special case.  We can't do any merging if
		 the inner code is an ASHIFTRT of a smaller mode.  However, if
		 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
		 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
		 we can convert it to
		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0 C2) C3) C1).
		 This simplifies certain SIGN_EXTEND operations.  */
	      if (code == ASHIFT && first_code == ASHIFTRT
		  && count == (GET_MODE_BITSIZE (result_mode)
			       - GET_MODE_BITSIZE (GET_MODE (varop))))
		{
		  /* C3 has the low-order C1 bits zero.  */

		  mask = (GET_MODE_MASK (mode)
			  & ~(((HOST_WIDE_INT) 1 << first_count) - 1));

		  varop = simplify_and_const_int (NULL_RTX, result_mode,
						  XEXP (varop, 0), mask);
		  varop = simplify_shift_const (NULL_RTX, ASHIFT, result_mode,
						varop, count);
		  count = first_count;
		  code = ASHIFTRT;
		  continue;
		}

	      /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
		 than C1 high-order bits equal to the sign bit, we can convert
		 this to either an ASHIFT or an ASHIFTRT depending on the
		 two counts.

		 We cannot do this if VAROP's mode is not SHIFT_MODE.  */

	      if (code == ASHIFTRT && first_code == ASHIFT
		  && GET_MODE (varop) == shift_mode
		  && (num_sign_bit_copies (XEXP (varop, 0), shift_mode)
		      > first_count))
		{
		  varop = XEXP (varop, 0);
		  count -= first_count;
		  if (count < 0)
		    {
		      count = -count;
		      code = ASHIFT;
		    }

		  continue;
		}

	      /* There are some cases we can't do.  If CODE is ASHIFTRT,
		 we can only do this if FIRST_CODE is also ASHIFTRT.

		 We can't do the case when CODE is ROTATE and FIRST_CODE is
		 ASHIFTRT.

		 If the mode of this shift is not the mode of the outer shift,
		 we can't do this if either shift is a right shift or ROTATE.

		 Finally, we can't do any of these if the mode is too wide
		 unless the codes are the same.

		 Handle the case where the shift codes are the same
		 first.  */

	      if (code == first_code)
		{
		  if (GET_MODE (varop) != result_mode
		      && (code == ASHIFTRT || code == LSHIFTRT
			  || code == ROTATE))
		    break;

		  count += first_count;
		  varop = XEXP (varop, 0);
		  continue;
		}

	      if (code == ASHIFTRT
		  || (code == ROTATE && first_code == ASHIFTRT)
		  || GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
		  || (GET_MODE (varop) != result_mode
		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
			  || first_code == ROTATE
			  || code == ROTATE)))
		break;

	      /* To compute the mask to apply after the shift, shift the
		 nonzero bits of the inner shift the same way the
		 outer shift will.  */

	      mask_rtx = GEN_INT (nonzero_bits (varop, GET_MODE (varop)));

	      mask_rtx
		= simplify_const_binary_operation (code, result_mode, mask_rtx,
						   GEN_INT (count));

	      /* Give up if we can't compute an outer operation to use.  */
	      if (mask_rtx == 0
		  || GET_CODE (mask_rtx) != CONST_INT
		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
					INTVAL (mask_rtx),
					result_mode, &complement_p))
		break;

	      /* If the shifts are in the same direction, we add the
		 counts.  Otherwise, we subtract them.  */
	      if ((code == ASHIFTRT || code == LSHIFTRT)
		  == (first_code == ASHIFTRT || first_code == LSHIFTRT))
		count += first_count;
	      else
		count -= first_count;

	      /* If COUNT is positive, the new shift is usually CODE,
		 except for the two exceptions below, in which case it is
		 FIRST_CODE.  If the count is negative, FIRST_CODE should
		 always be used  */
	      if (count > 0
		  && ((first_code == ROTATE && code == ASHIFT)
		      || (first_code == ASHIFTRT && code == LSHIFTRT)))
		code = first_code;
	      else if (count < 0)
		code = first_code, count = -count;

	      varop = XEXP (varop, 0);
	      continue;
	    }

	  /* If we have (A << B << C) for any shift, we can convert this to
	     (A << C << B).  This wins if A is a constant.  Only try this if
	     B is not a constant.  */

	  else if (GET_CODE (varop) == code
		   && GET_CODE (XEXP (varop, 0)) == CONST_INT
		   && GET_CODE (XEXP (varop, 1)) != CONST_INT)
	    {
	      rtx new = simplify_const_binary_operation (code, mode,
							 XEXP (varop, 0),
							 GEN_INT (count));
	      varop = gen_rtx_fmt_ee (code, mode, new, XEXP (varop, 1));
	      count = 0;
	      continue;
	    }
	  break;

	case NOT:
	  if (VECTOR_MODE_P (mode))
	    break;

	  /* Make this fit the case below.  */
	  varop = gen_rtx_XOR (mode, XEXP (varop, 0),
			       GEN_INT (GET_MODE_MASK (mode)));
	  continue;

	case IOR:
	case AND:
	case XOR:
	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
	     with C the size of VAROP - 1 and the shift is logical if
	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
	     we have an (le X 0) operation.   If we have an arithmetic shift
	     and STORE_FLAG_VALUE is 1 or we have a logical shift with
	     STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation.  */

	  if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
	      && (code == LSHIFTRT || code == ASHIFTRT)
	      && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
	    {
	      count = 0;
	      varop = gen_rtx_LE (GET_MODE (varop), XEXP (varop, 1),
				  const0_rtx);

	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
		varop = gen_rtx_NEG (GET_MODE (varop), varop);

	      continue;
	    }

	  /* If we have (shift (logical)), move the logical to the outside
	     to allow it to possibly combine with another logical and the
	     shift to combine with another shift.  This also canonicalizes to
	     what a ZERO_EXTRACT looks like.  Also, some machines have
	     (and (shift)) insns.  */

	  if (GET_CODE (XEXP (varop, 1)) == CONST_INT
	      /* We can't do this if we have (ashiftrt (xor))  and the
		 constant has its sign bit set in shift_mode.  */
	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
		   && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
					      shift_mode))
	      && (new = simplify_const_binary_operation (code, result_mode,
							 XEXP (varop, 1),
							 GEN_INT (count))) != 0
	      && GET_CODE (new) == CONST_INT
	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
				  INTVAL (new), result_mode, &complement_p))
	    {
	      varop = XEXP (varop, 0);
	      continue;
	    }

	  /* If we can't do that, try to simplify the shift in each arm of the
	     logical expression, make a new logical expression, and apply
	     the inverse distributive law.  This also can't be done
	     for some (ashiftrt (xor)).  */
	  if (GET_CODE (XEXP (varop, 1)) == CONST_INT
	     && !(code == ASHIFTRT && GET_CODE (varop) == XOR
		  && 0 > trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
					     shift_mode)))
	    {
	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_mode,
					      XEXP (varop, 0), count);
	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_mode,
					      XEXP (varop, 1), count);

	      varop = simplify_gen_binary (GET_CODE (varop), shift_mode,
					   lhs, rhs);
	      varop = apply_distributive_law (varop);

	      count = 0;
	      continue;
	    }
	  break;

	case EQ:
	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
	     says that the sign bit can be tested, FOO has mode MODE, C is
	     GET_MODE_BITSIZE (MODE) - 1, and FOO has only its low-order bit
	     that may be nonzero.  */
	  if (code == LSHIFTRT
	      && XEXP (varop, 1) == const0_rtx
	      && GET_MODE (XEXP (varop, 0)) == result_mode
	      && count == (GET_MODE_BITSIZE (result_mode) - 1)
	      && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
	      && STORE_FLAG_VALUE == -1
	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
	      && merge_outer_ops (&outer_op, &outer_const, XOR,
				  (HOST_WIDE_INT) 1, result_mode,
				  &complement_p))
	    {
	      varop = XEXP (varop, 0);
	      count = 0;
	      continue;
	    }
	  break;

	case NEG:
	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
	     than the number of bits in the mode is equivalent to A.  */
	  if (code == LSHIFTRT
	      && count == (GET_MODE_BITSIZE (result_mode) - 1)
	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1)
	    {
	      varop = XEXP (varop, 0);
	      count = 0;
	      continue;
	    }

	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
	     NEG outside to allow shifts to combine.  */
	  if (code == ASHIFT
	      && merge_outer_ops (&outer_op, &outer_const, NEG,
				  (HOST_WIDE_INT) 0, result_mode,
				  &complement_p))
	    {
	      varop = XEXP (varop, 0);
	      continue;
	    }
	  break;

	case PLUS:
	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
	     is one less than the number of bits in the mode is
	     equivalent to (xor A 1).  */
	  if (code == LSHIFTRT
	      && count == (GET_MODE_BITSIZE (result_mode) - 1)
	      && XEXP (varop, 1) == constm1_rtx
	      && nonzero_bits (XEXP (varop, 0), result_mode) == 1
	      && merge_outer_ops (&outer_op, &outer_const, XOR,
				  (HOST_WIDE_INT) 1, result_mode,
				  &complement_p))
	    {
	      count = 0;
	      varop = XEXP (varop, 0);
	      continue;
	    }

	  /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
	     that might be nonzero in BAR are those being shifted out and those
	     bits are known zero in FOO, we can replace the PLUS with FOO.
	     Similarly in the other operand order.  This code occurs when
	     we are computing the size of a variable-size array.  */

	  if ((code == ASHIFTRT || code == LSHIFTRT)
	      && count < HOST_BITS_PER_WIDE_INT
	      && nonzero_bits (XEXP (varop, 1), result_mode) >> count == 0
	      && (nonzero_bits (XEXP (varop, 1), result_mode)
		  & nonzero_bits (XEXP (varop, 0), result_mode)) == 0)
	    {
	      varop = XEXP (varop, 0);
	      continue;
	    }
	  else if ((code == ASHIFTRT || code == LSHIFTRT)
		   && count < HOST_BITS_PER_WIDE_INT
		   && GET_MODE_BITSIZE (result_mode) <= HOST_BITS_PER_WIDE_INT
		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
			    >> count)
		   && 0 == (nonzero_bits (XEXP (varop, 0), result_mode)
			    & nonzero_bits (XEXP (varop, 1),
						 result_mode)))
	    {
	      varop = XEXP (varop, 1);
	      continue;
	    }

	  /* (ashift (plus foo C) N) is (plus (ashift foo N) C').  */
	  if (code == ASHIFT
	      && GET_CODE (XEXP (varop, 1)) == CONST_INT
	      && (new = simplify_const_binary_operation (ASHIFT, result_mode,
							 XEXP (varop, 1),
							 GEN_INT (count))) != 0
	      && GET_CODE (new) == CONST_INT
	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
				  INTVAL (new), result_mode, &complement_p))
	    {
	      varop = XEXP (varop, 0);
	      continue;
	    }

	  /* Check for 'PLUS signbit', which is the canonical form of 'XOR
	     signbit', and attempt to change the PLUS to an XOR and move it to
	     the outer operation as is done above in the AND/IOR/XOR case
	     leg for shift(logical). See details in logical handling above
	     for reasoning in doing so.  */
	  if (code == LSHIFTRT
	      && GET_CODE (XEXP (varop, 1)) == CONST_INT
	      && mode_signbit_p (result_mode, XEXP (varop, 1))
	      && (new = simplify_const_binary_operation (code, result_mode,
							 XEXP (varop, 1),
							 GEN_INT (count))) != 0
	      && GET_CODE (new) == CONST_INT
	      && merge_outer_ops (&outer_op, &outer_const, XOR,
				  INTVAL (new), result_mode, &complement_p))
	    {
	      varop = XEXP (varop, 0);
	      continue;
	    }

	  break;

	case MINUS:
	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
	     with C the size of VAROP - 1 and the shift is logical if
	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
	     we have a (gt X 0) operation.  If the shift is arithmetic with
	     STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
	     we have a (neg (gt X 0)) operation.  */

	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
	      && count == (GET_MODE_BITSIZE (GET_MODE (varop)) - 1)
	      && (code == LSHIFTRT || code == ASHIFTRT)
	      && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT
	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
	    {
	      count = 0;
	      varop = gen_rtx_GT (GET_MODE (varop), XEXP (varop, 1),
				  const0_rtx);

	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
		varop = gen_rtx_NEG (GET_MODE (varop), varop);

	      continue;
	    }
	  break;

	case TRUNCATE:
	  /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
	     if the truncate does not affect the value.  */
	  if (code == LSHIFTRT
	      && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
	      && GET_CODE (XEXP (XEXP (varop, 0), 1)) == CONST_INT
	      && (INTVAL (XEXP (XEXP (varop, 0), 1))
		  >= (GET_MODE_BITSIZE (GET_MODE (XEXP (varop, 0)))
		      - GET_MODE_BITSIZE (GET_MODE (varop)))))
	    {
	      rtx varop_inner = XEXP (varop, 0);

	      varop_inner
		= gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
				    XEXP (varop_inner, 0),
				    GEN_INT
				    (count + INTVAL (XEXP (varop_inner, 1))));
	      varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
	      count = 0;
	      continue;
	    }
	  break;

	default:
	  break;
	}

      break;
    }

  /* We need to determine what mode to do the shift in.  If the shift is
     a right shift or ROTATE, we must always do it in the mode it was
     originally done in.  Otherwise, we can do it in MODE, the widest mode
     encountered.  The code we care about is that of the shift that will
     actually be done, not the shift that was originally requested.  */
  shift_mode
    = (code == ASHIFTRT || code == LSHIFTRT || code == ROTATE
       ? result_mode : mode);

  /* We have now finished analyzing the shift.  The result should be
     a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
     OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
     to the result of the shift.  OUTER_CONST is the relevant constant,
     but we must turn off all bits turned off in the shift.  */

  if (outer_op == UNKNOWN
      && orig_code == code && orig_count == count
      && varop == orig_varop
      && shift_mode == GET_MODE (varop))
    return NULL_RTX;

  /* Make a SUBREG if necessary.  If we can't make it, fail.  */
  varop = gen_lowpart (shift_mode, varop);
  if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
    return NULL_RTX;

  /* If we have an outer operation and we just made a shift, it is
     possible that we could have simplified the shift were it not
     for the outer operation.  So try to do the simplification
     recursively.  */

  if (outer_op != UNKNOWN)
    x = simplify_shift_const_1 (code, shift_mode, varop, count);
  else
    x = NULL_RTX;

  if (x == NULL_RTX)
    x = simplify_gen_binary (code, shift_mode, varop, GEN_INT (count));