aboutsummaryrefslogtreecommitdiff
path: root/gcc/tree-ssa-loop-im.c
blob: bcdd9d7c3bb6e87ce290b7c26e3103366d8adef3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
/* Loop invariant motion.
   Copyright (C) 2003, 2004 Free Software Foundation, Inc.
   
This file is part of GCC.
   
GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
   
GCC is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.
   
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 59 Temple Place - Suite 330, Boston, MA
02111-1307, USA.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "rtl.h"
#include "tm_p.h"
#include "hard-reg-set.h"
#include "basic-block.h"
#include "output.h"
#include "diagnostic.h"
#include "tree-flow.h"
#include "tree-dump.h"
#include "timevar.h"
#include "cfgloop.h"
#include "domwalk.h"
#include "params.h"
#include "tree-pass.h"
#include "flags.h"

/* A type for the list of statements that have to be moved in order to be able
   to hoist an invariant computation.  */

struct depend
{
  tree stmt;
  struct depend *next;
};

/* The auxiliary data kept for each statement.  */

struct lim_aux_data
{
  struct loop *max_loop;	/* The outermost loop in that the statement
				   is invariant.  */

  struct loop *tgt_loop;	/* The loop out of that we want to move the
				   invariant.  */

  struct loop *always_executed_in;
				/* The outermost loop for that we are sure
				   the statement is executed if the loop
				   is entered.  */

  bool sm_done;			/* True iff the store motion for a memory
				   reference in the statement has already
				   been executed.  */

  unsigned cost;		/* Cost of the computation performed by the
				   statement.  */

  struct depend *depends;	/* List of statements that must be also hoisted
				   out of the loop when this statement is
				   hoisted; i.e. those that define the operands
				   of the statement and are inside of the
				   MAX_LOOP loop.  */
};

#define LIM_DATA(STMT) (TREE_CODE (STMT) == PHI_NODE \
			? NULL \
			: (struct lim_aux_data *) (stmt_ann (STMT)->common.aux))

/* Description of a memory reference for store motion.  */

struct mem_ref
{
  tree *ref;			/* The reference itself.  */
  tree stmt;			/* The statement in that it occurs.  */
  struct mem_ref *next;		/* Next use in the chain.  */
};

/* Minimum cost of an expensive expression.  */
#define LIM_EXPENSIVE ((unsigned) PARAM_VALUE (PARAM_LIM_EXPENSIVE))

/* The outermost loop for that execution of the header guarantees that the
   block will be executed.  */
#define ALWAYS_EXECUTED_IN(BB) ((struct loop *) (BB)->aux)

static unsigned max_stmt_uid;	/* Maximal uid of a statement.  Uids to phi
				   nodes are assigned using the versions of
				   ssa names they define.  */

/* Returns uid of statement STMT.  */

static unsigned
get_stmt_uid (tree stmt)
{
  if (TREE_CODE (stmt) == PHI_NODE)
    return SSA_NAME_VERSION (PHI_RESULT (stmt)) + max_stmt_uid;

  return stmt_ann (stmt)->uid;
}

/* Calls CBCK for each index in memory reference ADDR_P.  There are two
   kinds situations handled; in each of these cases, the memory reference
   and DATA are passed to the callback:
   
   Access to an array: ARRAY_{RANGE_}REF (base, index).  In this case we also
   pass the pointer to the index to the callback.

   Pointer dereference: INDIRECT_REF (addr).  In this case we also pass the
   pointer to addr to the callback.
   
   If the callback returns false, the whole search stops and false is returned.
   Otherwise the function returns true after traversing through the whole
   reference *ADDR_P.  */

bool
for_each_index (tree *addr_p, bool (*cbck) (tree, tree *, void *), void *data)
{
  tree *nxt, *idx;

  for (; ; addr_p = nxt)
    {
      switch (TREE_CODE (*addr_p))
	{
	case SSA_NAME:
	  return cbck (*addr_p, addr_p, data);

	case MISALIGNED_INDIRECT_REF:
	case ALIGN_INDIRECT_REF:
	case INDIRECT_REF:
	  nxt = &TREE_OPERAND (*addr_p, 0);
	  return cbck (*addr_p, nxt, data);

	case BIT_FIELD_REF:
	case VIEW_CONVERT_EXPR:
	case ARRAY_RANGE_REF:
	case REALPART_EXPR:
	case IMAGPART_EXPR:
	  nxt = &TREE_OPERAND (*addr_p, 0);
	  break;

	case COMPONENT_REF:
	  /* If the component has varying offset, it behaves like index
	     as well.  */
	  idx = &TREE_OPERAND (*addr_p, 2);
	  if (*idx
	      && !cbck (*addr_p, idx, data))
	    return false;

	  nxt = &TREE_OPERAND (*addr_p, 0);
	  break;

	case ARRAY_REF:
	  nxt = &TREE_OPERAND (*addr_p, 0);
	  if (!cbck (*addr_p, &TREE_OPERAND (*addr_p, 1), data))
	    return false;
	  break;

	case VAR_DECL:
	case PARM_DECL:
	case STRING_CST:
	case RESULT_DECL:
	  return true;

	default:
    	  gcc_unreachable ();
	}
    }
}

/* If it is possible to hoist the statement STMT unconditionally,
   returns MOVE_POSSIBLE.
   If it is possible to hoist the statement STMT, but we must avoid making
   it executed if it would not be executed in the original program (e.g.
   because it may trap), return MOVE_PRESERVE_EXECUTION.
   Otherwise return MOVE_IMPOSSIBLE.  */

enum move_pos
movement_possibility (tree stmt)
{
  tree lhs, rhs;

  if (flag_unswitch_loops
      && TREE_CODE (stmt) == COND_EXPR)
    {
      /* If we perform unswitching, force the operands of the invariant
	 condition to be moved out of the loop.  */
      get_stmt_operands (stmt);

      return MOVE_POSSIBLE;
    }

  if (TREE_CODE (stmt) != MODIFY_EXPR)
    return MOVE_IMPOSSIBLE;

  if (stmt_ends_bb_p (stmt))
    return MOVE_IMPOSSIBLE;

  get_stmt_operands (stmt);

  if (stmt_ann (stmt)->has_volatile_ops)
    return MOVE_IMPOSSIBLE;

  lhs = TREE_OPERAND (stmt, 0);
  if (TREE_CODE (lhs) == SSA_NAME
      && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
    return MOVE_IMPOSSIBLE;

  rhs = TREE_OPERAND (stmt, 1);

  if (TREE_SIDE_EFFECTS (rhs))
    return MOVE_IMPOSSIBLE;

  if (TREE_CODE (lhs) != SSA_NAME
      || tree_could_trap_p (rhs))
    return MOVE_PRESERVE_EXECUTION;

  return MOVE_POSSIBLE;
}

/* Suppose that operand DEF is used inside the LOOP.  Returns the outermost
   loop to that we could move the expression using DEF if it did not have
   other operands, i.e. the outermost loop enclosing LOOP in that the value
   of DEF is invariant.  */

static struct loop *
outermost_invariant_loop (tree def, struct loop *loop)
{
  tree def_stmt;
  basic_block def_bb;
  struct loop *max_loop;

  if (TREE_CODE (def) != SSA_NAME)
    return superloop_at_depth (loop, 1);

  def_stmt = SSA_NAME_DEF_STMT (def);
  def_bb = bb_for_stmt (def_stmt);
  if (!def_bb)
    return superloop_at_depth (loop, 1);

  max_loop = find_common_loop (loop, def_bb->loop_father);

  if (LIM_DATA (def_stmt) && LIM_DATA (def_stmt)->max_loop)
    max_loop = find_common_loop (max_loop,
				 LIM_DATA (def_stmt)->max_loop->outer);
  if (max_loop == loop)
    return NULL;
  max_loop = superloop_at_depth (loop, max_loop->depth + 1);

  return max_loop;
}

/* Returns the outermost superloop of LOOP in that the expression EXPR is
   invariant.  */

static struct loop *
outermost_invariant_loop_expr (tree expr, struct loop *loop)
{
  enum tree_code_class class = TREE_CODE_CLASS (TREE_CODE (expr));
  unsigned i, nops;
  struct loop *max_loop = superloop_at_depth (loop, 1), *aloop;

  if (TREE_CODE (expr) == SSA_NAME
      || TREE_CODE (expr) == INTEGER_CST
      || is_gimple_min_invariant (expr))
    return outermost_invariant_loop (expr, loop);

  if (class != tcc_unary
      && class != tcc_binary
      && class != tcc_expression
      && class != tcc_comparison)
    return NULL;

  nops = first_rtl_op (TREE_CODE (expr));
  for (i = 0; i < nops; i++)
    {
      aloop = outermost_invariant_loop_expr (TREE_OPERAND (expr, i), loop);
      if (!aloop)
	return NULL;

      if (flow_loop_nested_p (max_loop, aloop))
	max_loop = aloop;
    }

  return max_loop;
}

/* DATA is a structure containing information associated with a statement
   inside LOOP.  DEF is one of the operands of this statement.
   
   Find the outermost loop enclosing LOOP in that value of DEF is invariant
   and record this in DATA->max_loop field.  If DEF itself is defined inside
   this loop as well (i.e. we need to hoist it out of the loop if we want
   to hoist the statement represented by DATA), record the statement in that
   DEF is defined to the DATA->depends list.  Additionally if ADD_COST is true,
   add the cost of the computation of DEF to the DATA->cost.
   
   If DEF is not invariant in LOOP, return false.  Otherwise return TRUE.  */

static bool
add_dependency (tree def, struct lim_aux_data *data, struct loop *loop,
		bool add_cost)
{
  tree def_stmt = SSA_NAME_DEF_STMT (def);
  basic_block def_bb = bb_for_stmt (def_stmt);
  struct loop *max_loop;
  struct depend *dep;

  if (!def_bb)
    return true;

  max_loop = outermost_invariant_loop (def, loop);
  if (!max_loop)
    return false;

  if (flow_loop_nested_p (data->max_loop, max_loop))
    data->max_loop = max_loop;

  if (!LIM_DATA (def_stmt))
    return true;

  if (add_cost
      /* Only add the cost if the statement defining DEF is inside LOOP,
	 i.e. if it is likely that by moving the invariants dependent
	 on it, we will be able to avoid creating a new register for
	 it (since it will be only used in these dependent invariants).  */
      && def_bb->loop_father == loop)
    data->cost += LIM_DATA (def_stmt)->cost;

  dep = xmalloc (sizeof (struct depend));
  dep->stmt = def_stmt;
  dep->next = data->depends;
  data->depends = dep;

  return true;
}

/* Returns an estimate for a cost of statement STMT.  TODO -- the values here
   are just ad-hoc constants.  The estimates should be based on target-specific
   values.  */

static unsigned
stmt_cost (tree stmt)
{
  tree lhs, rhs;
  unsigned cost = 1;

  /* Always try to create possibilities for unswitching.  */
  if (TREE_CODE (stmt) == COND_EXPR)
    return LIM_EXPENSIVE;

  lhs = TREE_OPERAND (stmt, 0);
  rhs = TREE_OPERAND (stmt, 1);

  /* Hoisting memory references out should almost surely be a win.  */
  if (!is_gimple_variable (lhs))
    cost += 20;
  if (is_gimple_addressable (rhs) && !is_gimple_variable (rhs))
    cost += 20;

  switch (TREE_CODE (rhs))
    {
    case CALL_EXPR:
      /* We should be hoisting calls if possible.  */

      /* Unless the call is a builtin_constant_p; this always folds to a
	 constant, so moving it is useless.  */
      rhs = get_callee_fndecl (rhs);
      if (DECL_BUILT_IN (rhs)
	  && DECL_FUNCTION_CODE (rhs) == BUILT_IN_CONSTANT_P)
	return 0;

      cost += 20;
      break;

    case MULT_EXPR:
    case TRUNC_DIV_EXPR:
    case CEIL_DIV_EXPR:
    case FLOOR_DIV_EXPR:
    case ROUND_DIV_EXPR:
    case EXACT_DIV_EXPR:
    case CEIL_MOD_EXPR:
    case FLOOR_MOD_EXPR:
    case ROUND_MOD_EXPR:
    case TRUNC_MOD_EXPR:
      /* Division and multiplication are usually expensive.  */
      cost += 20;
      break;

    default:
      break;
    }

  return cost;
}

/* Determine the outermost loop to that it is possible to hoist a statement
   STMT and store it to LIM_DATA (STMT)->max_loop.  To do this we determine
   the outermost loop in that the value computed by STMT is invariant.
   If MUST_PRESERVE_EXEC is true, additionally choose such a loop that
   we preserve the fact whether STMT is executed.  It also fills other related
   information to LIM_DATA (STMT).
   
   The function returns false if STMT cannot be hoisted outside of the loop it
   is defined in, and true otherwise.  */

static bool
determine_max_movement (tree stmt, bool must_preserve_exec)
{
  basic_block bb = bb_for_stmt (stmt);
  struct loop *loop = bb->loop_father;
  struct loop *level;
  struct lim_aux_data *lim_data = LIM_DATA (stmt);
  tree val;
  ssa_op_iter iter;
  
  if (must_preserve_exec)
    level = ALWAYS_EXECUTED_IN (bb);
  else
    level = superloop_at_depth (loop, 1);
  lim_data->max_loop = level;

  FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_USE)
    if (!add_dependency (val, lim_data, loop, true))
      return false;

  FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_VIRTUAL_USES | SSA_OP_VIRTUAL_KILLS)
    if (!add_dependency (val, lim_data, loop, false))
      return false;

  lim_data->cost += stmt_cost (stmt);

  return true;
}

/* Suppose that some statement in ORIG_LOOP is hoisted to the loop LEVEL,
   and that one of the operands of this statement is computed by STMT.
   Ensure that STMT (together with all the statements that define its
   operands) is hoisted at least out of the loop LEVEL.  */

static void
set_level (tree stmt, struct loop *orig_loop, struct loop *level)
{
  struct loop *stmt_loop = bb_for_stmt (stmt)->loop_father;
  struct depend *dep;

  stmt_loop = find_common_loop (orig_loop, stmt_loop);
  if (LIM_DATA (stmt) && LIM_DATA (stmt)->tgt_loop)
    stmt_loop = find_common_loop (stmt_loop,
				  LIM_DATA (stmt)->tgt_loop->outer);
  if (flow_loop_nested_p (stmt_loop, level))
    return;

  gcc_assert (LIM_DATA (stmt));
  gcc_assert (level == LIM_DATA (stmt)->max_loop
	      || flow_loop_nested_p (LIM_DATA (stmt)->max_loop, level));

  LIM_DATA (stmt)->tgt_loop = level;
  for (dep = LIM_DATA (stmt)->depends; dep; dep = dep->next)
    set_level (dep->stmt, orig_loop, level);
}

/* Determines an outermost loop from that we want to hoist the statement STMT.
   For now we chose the outermost possible loop.  TODO -- use profiling
   information to set it more sanely.  */

static void
set_profitable_level (tree stmt)
{
  set_level (stmt, bb_for_stmt (stmt)->loop_father, LIM_DATA (stmt)->max_loop);
}

/* Returns true if STMT is not a pure call.  */

static bool
nonpure_call_p (tree stmt)
{
  tree call = get_call_expr_in (stmt);

  if (!call)
    return false;

  return TREE_SIDE_EFFECTS (call) != 0;
}

/* Releases the memory occupied by DATA.  */

static void
free_lim_aux_data (struct lim_aux_data *data)
{
  struct depend *dep, *next;

  for (dep = data->depends; dep; dep = next)
    {
      next = dep->next;
      free (dep);
    }
  free (data);
}

/* Determine the outermost loops in that statements in basic block BB are
   invariant, and record them to the LIM_DATA associated with the statements.
   Callback for walk_dominator_tree.  */

static void
determine_invariantness_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
			      basic_block bb)
{
  enum move_pos pos;
  block_stmt_iterator bsi;
  tree stmt;
  bool maybe_never = ALWAYS_EXECUTED_IN (bb) == NULL;
  struct loop *outermost = ALWAYS_EXECUTED_IN (bb);

  if (!bb->loop_father->outer)
    return;

  if (dump_file && (dump_flags & TDF_DETAILS))
    fprintf (dump_file, "Basic block %d (loop %d -- depth %d):\n\n",
	     bb->index, bb->loop_father->num, bb->loop_father->depth);

  for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
    {
      stmt = bsi_stmt (bsi);

      pos = movement_possibility (stmt);
      if (pos == MOVE_IMPOSSIBLE)
	{
	  if (nonpure_call_p (stmt))
	    {
	      maybe_never = true;
	      outermost = NULL;
	    }
	  continue;
	}

      stmt_ann (stmt)->common.aux = xcalloc (1, sizeof (struct lim_aux_data));
      LIM_DATA (stmt)->always_executed_in = outermost;

      if (maybe_never && pos == MOVE_PRESERVE_EXECUTION)
	continue;

      if (!determine_max_movement (stmt, pos == MOVE_PRESERVE_EXECUTION))
	{
	  LIM_DATA (stmt)->max_loop = NULL;
	  continue;
	}

      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  print_generic_stmt_indented (dump_file, stmt, 0, 2);
	  fprintf (dump_file, "  invariant up to level %d, cost %d.\n\n",
		   LIM_DATA (stmt)->max_loop->depth,
		   LIM_DATA (stmt)->cost);
	}

      if (LIM_DATA (stmt)->cost >= LIM_EXPENSIVE)
	set_profitable_level (stmt);
    }
}

/* For each statement determines the outermost loop in that it is invariant,
   statements on whose motion it depends and the cost of the computation.
   This information is stored to the LIM_DATA structure associated with
   each statement.  */

static void
determine_invariantness (void)
{
  struct dom_walk_data walk_data;

  memset (&walk_data, 0, sizeof (struct dom_walk_data));
  walk_data.before_dom_children_before_stmts = determine_invariantness_stmt;

  init_walk_dominator_tree (&walk_data);
  walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
  fini_walk_dominator_tree (&walk_data);
}

/* Commits edge insertions and updates loop structures.  */

void
loop_commit_inserts (void)
{
  unsigned old_last_basic_block, i;
  basic_block bb;

  old_last_basic_block = last_basic_block;
  bsi_commit_edge_inserts (NULL);
  for (i = old_last_basic_block; i < (unsigned) last_basic_block; i++)
    {
      bb = BASIC_BLOCK (i);
      add_bb_to_loop (bb,
		      find_common_loop (EDGE_SUCC (bb, 0)->dest->loop_father,
					EDGE_PRED (bb, 0)->src->loop_father));
    }
}

/* Hoist the statements in basic block BB out of the loops prescribed by
   data stored in LIM_DATA structures associated with each statement.  Callback
   for walk_dominator_tree.  */

static void
move_computations_stmt (struct dom_walk_data *dw_data ATTRIBUTE_UNUSED,
			basic_block bb)
{
  struct loop *level;
  block_stmt_iterator bsi;
  tree stmt;
  unsigned cost = 0;

  if (!bb->loop_father->outer)
    return;

  for (bsi = bsi_start (bb); !bsi_end_p (bsi); )
    {
      stmt = bsi_stmt (bsi);

      if (!LIM_DATA (stmt))
	{
	  bsi_next (&bsi);
	  continue;
	}

      cost = LIM_DATA (stmt)->cost;
      level = LIM_DATA (stmt)->tgt_loop;
      free_lim_aux_data (LIM_DATA (stmt));
      stmt_ann (stmt)->common.aux = NULL;

      if (!level)
	{
	  bsi_next (&bsi);
	  continue;
	}

      /* We do not really want to move conditionals out of the loop; we just
	 placed it here to force its operands to be moved if necessary.  */
      if (TREE_CODE (stmt) == COND_EXPR)
	continue;

      if (dump_file && (dump_flags & TDF_DETAILS))
	{
	  fprintf (dump_file, "Moving statement\n");
	  print_generic_stmt (dump_file, stmt, 0);
	  fprintf (dump_file, "(cost %u) out of loop %d.\n\n",
		   cost, level->num);
	}
      bsi_insert_on_edge (loop_preheader_edge (level), stmt);
      bsi_remove (&bsi);
    }
}

/* Hoist the statements out of the loops prescribed by data stored in
   LIM_DATA structures associated with each statement.*/

static void
move_computations (void)
{
  struct dom_walk_data walk_data;

  memset (&walk_data, 0, sizeof (struct dom_walk_data));
  walk_data.before_dom_children_before_stmts = move_computations_stmt;

  init_walk_dominator_tree (&walk_data);
  walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR);
  fini_walk_dominator_tree (&walk_data);

  loop_commit_inserts ();
  rewrite_into_ssa (false);
  if (bitmap_first_set_bit (vars_to_rename) >= 0)
    {
      /* The rewrite of ssa names may cause violation of loop closed ssa
	 form invariants.  TODO -- avoid these rewrites completely.
	 Information in virtual phi nodes is sufficient for it.  */
      rewrite_into_loop_closed_ssa ();
    }
  bitmap_clear (vars_to_rename);
}

/* Checks whether the statement defining variable *INDEX can be hoisted
   out of the loop passed in DATA.  Callback for for_each_index.  */

static bool
may_move_till (tree ref, tree *index, void *data)
{
  struct loop *loop = data, *max_loop;

  /* If REF is an array reference, check also that the step and the lower
     bound is invariant in LOOP.  */
  if (TREE_CODE (ref) == ARRAY_REF)
    {
      tree step = array_ref_element_size (ref);
      tree lbound = array_ref_low_bound (ref);

      max_loop = outermost_invariant_loop_expr (step, loop);
      if (!max_loop)
	return false;

      max_loop = outermost_invariant_loop_expr (lbound, loop);
      if (!max_loop)
	return false;
    }

  max_loop = outermost_invariant_loop (*index, loop);
  if (!max_loop)
    return false;

  return true;
}

/* Forces statements defining (invariant) SSA names in expression EXPR to be
   moved out of the LOOP.  ORIG_LOOP is the loop in that EXPR is used.  */

static void
force_move_till_expr (tree expr, struct loop *orig_loop, struct loop *loop)
{
  enum tree_code_class class = TREE_CODE_CLASS (TREE_CODE (expr));
  unsigned i, nops;

  if (TREE_CODE (expr) == SSA_NAME)
    {
      tree stmt = SSA_NAME_DEF_STMT (expr);
      if (IS_EMPTY_STMT (stmt))
	return;

      set_level (stmt, orig_loop, loop);
      return;
    }

  if (class != tcc_unary
      && class != tcc_binary
      && class != tcc_expression
      && class != tcc_comparison)
    return;

  nops = first_rtl_op (TREE_CODE (expr));
  for (i = 0; i < nops; i++)
    force_move_till_expr (TREE_OPERAND (expr, i), orig_loop, loop);
}

/* Forces statement defining invariants in REF (and *INDEX) to be moved out of
   the LOOP.  The reference REF is used in the loop ORIG_LOOP.  Callback for
   for_each_index.  */

struct fmt_data
{
  struct loop *loop;
  struct loop *orig_loop;
};

static bool
force_move_till (tree ref, tree *index, void *data)
{
  tree stmt;
  struct fmt_data *fmt_data = data;

  if (TREE_CODE (ref) == ARRAY_REF)
    {
      tree step = array_ref_element_size (ref);
      tree lbound = array_ref_low_bound (ref);

      force_move_till_expr (step, fmt_data->orig_loop, fmt_data->loop);
      force_move_till_expr (lbound, fmt_data->orig_loop, fmt_data->loop);
    }

  if (TREE_CODE (*index) != SSA_NAME)
    return true;

  stmt = SSA_NAME_DEF_STMT (*index);
  if (IS_EMPTY_STMT (stmt))
    return true;

  set_level (stmt, fmt_data->orig_loop, fmt_data->loop);

  return true;
}

/* Records memory reference *REF (that occurs in statement STMT)
   to the list MEM_REFS.  */

static void
record_mem_ref (struct mem_ref **mem_refs, tree stmt, tree *ref)
{
  struct mem_ref *aref = xmalloc (sizeof (struct mem_ref));

  aref->stmt = stmt;
  aref->ref = ref;

  aref->next = *mem_refs;
  *mem_refs = aref;
}

/* Releases list of memory references MEM_REFS.  */

static void
free_mem_refs (struct mem_ref *mem_refs)
{
  struct mem_ref *act;

  while (mem_refs)
    {
      act = mem_refs;
      mem_refs = mem_refs->next;
      free (act);
    }
}

/* If VAR is defined in LOOP and the statement it is defined in does not belong
   to the set SEEN, add the statement to QUEUE of length IN_QUEUE and
   to the set SEEN.  */

static void
maybe_queue_var (tree var, struct loop *loop,
		 sbitmap seen, tree *queue, unsigned *in_queue)
{
  tree stmt = SSA_NAME_DEF_STMT (var);
  basic_block def_bb = bb_for_stmt (stmt);
	      
  if (!def_bb
      || !flow_bb_inside_loop_p (loop, def_bb)
      || TEST_BIT (seen, get_stmt_uid (stmt)))
    return;
	  
  SET_BIT (seen, get_stmt_uid (stmt));
  queue[(*in_queue)++] = stmt;
}

/* If COMMON_REF is NULL, set COMMON_REF to *OP and return true.
   Otherwise return true if the memory reference *OP is equal to COMMON_REF.
   Record the reference OP to list MEM_REFS.  STMT is the statement in that
   the reference occurs.  */

struct sra_data
{
  struct mem_ref **mem_refs;
  tree common_ref;
  tree stmt;
};

static bool
fem_single_reachable_address (tree *op, void *data)
{
  struct sra_data *sra_data = data;

  if (sra_data->common_ref
      && !operand_equal_p (*op, sra_data->common_ref, 0))
    return false;
  sra_data->common_ref = *op;

  record_mem_ref (sra_data->mem_refs, sra_data->stmt, op);
  return true;
}

/* Runs CALLBACK for each operand of STMT that is a memory reference.  DATA
   is passed to the CALLBACK as well.  The traversal stops if CALLBACK
   returns false, for_each_memref then returns false as well.  Otherwise
   for_each_memref returns true.  */

static bool
for_each_memref (tree stmt, bool (*callback)(tree *, void *), void *data)
{
  tree *op;

  if (TREE_CODE (stmt) == RETURN_EXPR)
    stmt = TREE_OPERAND (stmt, 1);

  if (TREE_CODE (stmt) == MODIFY_EXPR)
    {
      op = &TREE_OPERAND (stmt, 0);
      if (TREE_CODE (*op) != SSA_NAME
	  && !callback (op, data))
	return false;

      op = &TREE_OPERAND (stmt, 1);
      if (TREE_CODE (*op) != SSA_NAME
	  && is_gimple_lvalue (*op)
	  && !callback (op, data))
	return false;

      stmt = TREE_OPERAND (stmt, 1);
    }

  if (TREE_CODE (stmt) == WITH_SIZE_EXPR)
    stmt = TREE_OPERAND (stmt, 0);

  if (TREE_CODE (stmt) == CALL_EXPR)
    {
      tree args;

      for (args = TREE_OPERAND (stmt, 1); args; args = TREE_CHAIN (args))
	{
	  op = &TREE_VALUE (args);

	  if (TREE_CODE (*op) != SSA_NAME
	      && is_gimple_lvalue (*op)
	      && !callback (op, data))
	    return false;
	}
    }

  return true;
}

/* Determine whether all memory references inside the LOOP that correspond
   to virtual ssa names defined in statement STMT are equal.
   If so, store the list of the references to MEM_REFS, and return one
   of them.  Otherwise store NULL to MEM_REFS and return NULL_TREE.
   *SEEN_CALL_STMT is set to true if the virtual operands suggest
   that the reference might be clobbered by a call inside the LOOP.  */

static tree
single_reachable_address (struct loop *loop, tree stmt,
			  struct mem_ref **mem_refs,
			  bool *seen_call_stmt)
{
  unsigned max_uid = max_stmt_uid + num_ssa_names;
  tree *queue = xmalloc (sizeof (tree) * max_uid);
  sbitmap seen = sbitmap_alloc (max_uid);
  unsigned in_queue = 1;
  dataflow_t df;
  unsigned i, n;
  struct sra_data sra_data;
  tree call;
  tree val;
  ssa_op_iter iter;

  sbitmap_zero (seen);

  *mem_refs = NULL;
  sra_data.mem_refs = mem_refs;
  sra_data.common_ref = NULL_TREE;

  queue[0] = stmt;
  SET_BIT (seen, get_stmt_uid (stmt));
  *seen_call_stmt = false;

  while (in_queue)
    {
      stmt = queue[--in_queue];
      sra_data.stmt = stmt;

      if (LIM_DATA (stmt)
	  && LIM_DATA (stmt)->sm_done)
	goto fail;

      switch (TREE_CODE (stmt))
	{
	case MODIFY_EXPR:
	case CALL_EXPR:
	case RETURN_EXPR:
	  if (!for_each_memref (stmt, fem_single_reachable_address,
				&sra_data))
	    goto fail;

	  /* If this is a function that may depend on the memory location,
	     record the fact.  We cannot directly refuse call clobbered
	     operands here, since sra_data.common_ref did not have
	     to be set yet.  */
	  call = get_call_expr_in (stmt);
	  if (call
	      && !(call_expr_flags (call) & ECF_CONST))
	    *seen_call_stmt = true;

	  /* Traverse also definitions of the VUSES (there may be other
	     distinct from the one we used to get to this statement).  */
	  FOR_EACH_SSA_TREE_OPERAND (val, stmt, iter, SSA_OP_VIRTUAL_USES)
	    maybe_queue_var (val, loop, seen, queue, &in_queue);

	  break;

	case PHI_NODE:
	  for (i = 0; i < (unsigned) PHI_NUM_ARGS (stmt); i++)
	    if (TREE_CODE (PHI_ARG_DEF (stmt, i)) == SSA_NAME)
	      maybe_queue_var (PHI_ARG_DEF (stmt, i), loop,
		               seen, queue, &in_queue);
	  break;

	default:
	  goto fail;
	}

      /* Find uses of virtual names.  */
      df = get_immediate_uses (stmt);
      n = num_immediate_uses (df);

      for (i = 0; i < n; i++)
	{
	  stmt = immediate_use (df, i);

	  if (!flow_bb_inside_loop_p (loop, bb_for_stmt (stmt)))
	    continue;

	  if (TEST_BIT (seen, get_stmt_uid (stmt)))
	    continue;
	  SET_BIT (seen, get_stmt_uid (stmt));

	  queue[in_queue++] = stmt;
	}
    }

  free (queue);
  sbitmap_free (seen);

  return sra_data.common_ref;

fail:
  free_mem_refs (*mem_refs);
  *mem_refs = NULL;
  free (queue);
  sbitmap_free (seen);

  return NULL;
}

/* Rewrites memory references in list MEM_REFS by variable TMP_VAR.  */

static void
rewrite_mem_refs (tree tmp_var, struct mem_ref *mem_refs)
{
  tree var;
  ssa_op_iter iter;

  for (; mem_refs; mem_refs = mem_refs->next)
    {
      FOR_EACH_SSA_TREE_OPERAND (var, mem_refs->stmt, iter, SSA_OP_ALL_VIRTUALS)
	{
	  var = SSA_NAME_VAR (var);
	  bitmap_set_bit (vars_to_rename, var_ann (var)->uid);
	}

      *mem_refs->ref = tmp_var;
      modify_stmt (mem_refs->stmt);
    }
}

/* Records request for store motion of memory reference REF from LOOP.
   MEM_REFS is the list of occurrences of the reference REF inside LOOP;
   these references are rewritten by a new temporary variable.
   Exits from the LOOP are stored in EXITS, there are N_EXITS of them.
   The initialization of the temporary variable is put to the preheader
   of the loop, and assignments to the reference from the temporary variable
   are emitted to exits.  */

static void
schedule_sm (struct loop *loop, edge *exits, unsigned n_exits, tree ref,
	     struct mem_ref *mem_refs)
{
  struct mem_ref *aref;
  tree tmp_var;
  unsigned i;
  tree load, store;
  struct fmt_data fmt_data;

  if (dump_file && (dump_flags & TDF_DETAILS))
    {
      fprintf (dump_file, "Executing store motion of ");
      print_generic_expr (dump_file, ref, 0);
      fprintf (dump_file, " from loop %d\n", loop->num);
    }

  tmp_var = make_rename_temp (TREE_TYPE (ref), "lsm_tmp");

  fmt_data.loop = loop;
  fmt_data.orig_loop = loop;
  for_each_index (&ref, force_move_till, &fmt_data);

  rewrite_mem_refs (tmp_var, mem_refs);
  for (aref = mem_refs; aref; aref = aref->next)
    if (LIM_DATA (aref->stmt))
      LIM_DATA (aref->stmt)->sm_done = true;

  /* Emit the load & stores.  */
  load = build (MODIFY_EXPR, void_type_node, tmp_var, ref);
  get_stmt_ann (load)->common.aux = xcalloc (1, sizeof (struct lim_aux_data));
  LIM_DATA (load)->max_loop = loop;
  LIM_DATA (load)->tgt_loop = loop;

  /* Put this into the latch, so that we are sure it will be processed after
     all dependencies.  */
  bsi_insert_on_edge (loop_latch_edge (loop), load);

  for (i = 0; i < n_exits; i++)
    {
      store = build (MODIFY_EXPR, void_type_node,
		     unshare_expr (ref), tmp_var);
      bsi_insert_on_edge (exits[i], store);
    }
}

/* Returns true if REF may be clobbered by calls.  */

static bool
is_call_clobbered_ref (tree ref)
{
  tree base;

  base = get_base_address (ref);
  if (!base)
    return true;

  if (DECL_P (base))
    return is_call_clobbered (base);

  if (INDIRECT_REF_P (base))
    {
      /* Check whether the alias tags associated with the pointer
	 are call clobbered.  */
      tree ptr = TREE_OPERAND (base, 0);
      struct ptr_info_def *pi = SSA_NAME_PTR_INFO (ptr);
      tree nmt = (pi) ? pi->name_mem_tag : NULL_TREE;
      tree tmt = var_ann (SSA_NAME_VAR (ptr))->type_mem_tag;

      if ((nmt && is_call_clobbered (nmt))
	  || (tmt && is_call_clobbered (tmt)))
	return true;

      return false;
    }

  gcc_unreachable ();
}

/* Determine whether all memory references inside LOOP corresponding to the
   virtual ssa name REG are equal to each other, and whether the address of
   this common reference can be hoisted outside of the loop.  If this is true,
   prepare the statements that load the value of the memory reference to a
   temporary variable in the loop preheader, store it back on the loop exits,
   and replace all the references inside LOOP by this temporary variable.
   LOOP has N_EXITS stored in EXITS.  */

static void
determine_lsm_reg (struct loop *loop, edge *exits, unsigned n_exits, tree reg)
{
  tree ref;
  struct mem_ref *mem_refs, *aref;
  struct loop *must_exec;
  bool sees_call;
  
  if (is_gimple_reg (reg))
    return;
  
  ref = single_reachable_address (loop, SSA_NAME_DEF_STMT (reg), &mem_refs,
				  &sees_call);
  if (!ref)
    return;

  /* If we cannot create a ssa name for the result, give up.  */
  if (!is_gimple_reg_type (TREE_TYPE (ref))
      || TREE_THIS_VOLATILE (ref))
    goto fail;

  /* If there is a call that may use the location, give up as well.  */
  if (sees_call
      && is_call_clobbered_ref (ref))
    goto fail;

  if (!for_each_index (&ref, may_move_till, loop))
    goto fail;

  if (tree_could_trap_p (ref))
    {
      /* If the memory access is unsafe (i.e. it might trap), ensure that some
	 of the statements in that it occurs is always executed when the loop
	 is entered.  This way we know that by moving the load from the
	 reference out of the loop we will not cause the error that would not
	 occur otherwise.

	 TODO -- in fact we would like to check for anticipability of the
	 reference, i.e. that on each path from loop entry to loop exit at
	 least one of the statements containing the memory reference is
	 executed.  */

      for (aref = mem_refs; aref; aref = aref->next)
	{
	  if (!LIM_DATA (aref->stmt))
	    continue;

	  must_exec = LIM_DATA (aref->stmt)->always_executed_in;
	  if (!must_exec)
	    continue;

	  if (must_exec == loop
	      || flow_loop_nested_p (must_exec, loop))
	    break;
	}

      if (!aref)
	goto fail;
    }

  schedule_sm (loop, exits, n_exits, ref, mem_refs);

fail: ;
  free_mem_refs (mem_refs);
}

/* Checks whether LOOP (with N_EXITS exits stored in EXITS array) is suitable
   for a store motion optimization (i.e. whether we can insert statement
   on its exits).  */

static bool
loop_suitable_for_sm (struct loop *loop ATTRIBUTE_UNUSED, edge *exits,
		      unsigned n_exits)
{
  unsigned i;

  for (i = 0; i < n_exits; i++)
    if (exits[i]->flags & EDGE_ABNORMAL)
      return false;

  return true;
}

/* Try to perform store motion for all memory references modified inside
   LOOP.  */

static void
determine_lsm_loop (struct loop *loop)
{
  tree phi;
  unsigned n_exits;
  edge *exits = get_loop_exit_edges (loop, &n_exits);

  if (!loop_suitable_for_sm (loop, exits, n_exits))
    {
      free (exits);
      return;
    }

  for (phi = phi_nodes (loop->header); phi; phi = TREE_CHAIN (phi))
    determine_lsm_reg (loop, exits, n_exits, PHI_RESULT (phi));

  free (exits);
}

/* Try to perform store motion for all memory references modified inside
   any of LOOPS.  */

static void
determine_lsm (struct loops *loops)
{
  struct loop *loop;
  basic_block bb;

  /* Create a UID for each statement in the function.  Ordering of the
     UIDs is not important for this pass.  */
  max_stmt_uid = 0;
  FOR_EACH_BB (bb)
    {
      block_stmt_iterator bsi;

      for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
	stmt_ann (bsi_stmt (bsi))->uid = max_stmt_uid++;
    }

  compute_immediate_uses (TDFA_USE_VOPS, NULL);

  /* Pass the loops from the outermost.  For each virtual operand loop phi node
     check whether all the references inside the loop correspond to a single
     address, and if so, move them.  */

  loop = loops->tree_root->inner;
  while (1)
    {
      determine_lsm_loop (loop);

      if (loop->inner)
	{
	  loop = loop->inner;
	  continue;
	}
      while (!loop->next)
	{
	  loop = loop->outer;
	  if (loop == loops->tree_root)
	    {
	      free_df ();
	      loop_commit_inserts ();
	      return;
	    }
	}
      loop = loop->next;
    }
}

/* Fills ALWAYS_EXECUTED_IN information for basic blocks of LOOP, i.e.
   for each such basic block bb records the outermost loop for that execution
   of its header implies execution of bb.  CONTAINS_CALL is the bitmap of
   blocks that contain a nonpure call.  */

static void
fill_always_executed_in (struct loop *loop, sbitmap contains_call)
{
  basic_block bb = NULL, *bbs, last = NULL;
  unsigned i;
  edge e;
  struct loop *inn_loop = loop;

  if (!loop->header->aux)
    {
      bbs = get_loop_body_in_dom_order (loop);

      for (i = 0; i < loop->num_nodes; i++)
	{
	  edge_iterator ei;
	  bb = bbs[i];

	  if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
	    last = bb;

	  if (TEST_BIT (contains_call, bb->index))
	    break;

	  FOR_EACH_EDGE (e, ei, bb->succs)
	    if (!flow_bb_inside_loop_p (loop, e->dest))
	      break;
	  if (e)
	    break;

	  /* A loop might be infinite (TODO use simple loop analysis
	     to disprove this if possible).  */
	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
	    break;

	  if (!flow_bb_inside_loop_p (inn_loop, bb))
	    break;

	  if (bb->loop_father->header == bb)
	    {
	      if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
		break;

	      /* In a loop that is always entered we may proceed anyway.
		 But record that we entered it and stop once we leave it.  */
	      inn_loop = bb->loop_father;
	    }
	}

      while (1)
	{
	  last->aux = loop;
	  if (last == loop->header)
	    break;
	  last = get_immediate_dominator (CDI_DOMINATORS, last);
	}

      free (bbs);
    }

  for (loop = loop->inner; loop; loop = loop->next)
    fill_always_executed_in (loop, contains_call);
}

/* Compute the global information needed by the loop invariant motion pass.
   LOOPS is the loop tree.  */

static void
tree_ssa_lim_initialize (struct loops *loops)
{
  sbitmap contains_call = sbitmap_alloc (last_basic_block);
  block_stmt_iterator bsi;
  struct loop *loop;
  basic_block bb;

  sbitmap_zero (contains_call);
  FOR_EACH_BB (bb)
    {
      for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
	{
	  if (nonpure_call_p (bsi_stmt (bsi)))
	    break;
	}

      if (!bsi_end_p (bsi))
	SET_BIT (contains_call, bb->index);
    }

  for (loop = loops->tree_root->inner; loop; loop = loop->next)
    fill_always_executed_in (loop, contains_call);

  sbitmap_free (contains_call);
}

/* Cleans up after the invariant motion pass.  */

static void
tree_ssa_lim_finalize (void)
{
  basic_block bb;

  FOR_EACH_BB (bb)
    {
      bb->aux = NULL;
    }
}

/* Moves invariants from LOOPS.  Only "expensive" invariants are moved out --
   i.e. those that are likely to be win regardless of the register pressure.  */

void
tree_ssa_lim (struct loops *loops)
{
  tree_ssa_lim_initialize (loops);

  /* For each statement determine the outermost loop in that it is
     invariant and cost for computing the invariant.  */
  determine_invariantness ();

  /* For each memory reference determine whether it is possible to hoist it
     out of the loop.  Force the necessary invariants to be moved out of the
     loops as well.  */
  determine_lsm (loops);

  /* Move the expressions that are expensive enough.  */
  move_computations ();

  tree_ssa_lim_finalize ();
}
06' href='#n5006'>5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138 5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152 5153 5154 5155 5156 5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175 5176 5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254 5255 5256 5257 5258 5259 5260 5261 5262 5263 5264 5265 5266 5267 5268 5269 5270 5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437 5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448 5449 5450 5451 5452 5453 5454 5455 5456 5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467 5468 5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497 5498 5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569 5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742 5743 5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794 5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830 5831 5832 5833 5834 5835 5836 5837 5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851 5852 5853 5854 5855 5856 5857 5858 5859 5860 5861 5862 5863 5864 5865 5866 5867 5868 5869 5870 5871 5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125 6126 6127 6128 6129 6130 6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143 6144 6145 6146 6147 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6236 6237 6238 6239 6240 6241 6242 6243 6244 6245 6246 6247 6248 6249 6250 6251 6252 6253 6254 6255 6256 6257 6258 6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269 6270 6271 6272 6273 6274 6275 6276 6277 6278 6279 6280 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379 6380 6381 6382 6383 6384 6385 6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397 6398 6399 6400 6401 6402 6403 6404 6405 6406 6407 6408 6409 6410 6411 6412 6413 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6502 6503 6504 6505 6506 6507 6508 6509 6510 6511 6512 6513 6514 6515 6516 6517 6518 6519 6520 6521 6522 6523 6524 6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549 6550 6551 6552 6553 6554 6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568 6569 6570 6571 6572 6573 6574 6575 6576 6577 6578 6579 6580 6581 6582 6583 6584 6585 6586 6587 6588 6589 6590 6591 6592 6593 6594 6595 6596 6597 6598 6599 6600 6601 6602 6603 6604 6605 6606 6607 6608 6609 6610 6611 6612 6613 6614 6615 6616 6617 6618 6619 6620 6621 6622 6623 6624 6625 6626 6627 6628 6629 6630 6631 6632 6633 6634 6635 6636 6637 6638 6639 6640 6641 6642 6643 6644 6645 6646 6647 6648 6649 6650 6651 6652 6653 6654 6655 6656 6657 6658 6659 6660 6661 6662 6663 6664 6665 6666 6667 6668 6669 6670 6671 6672 6673 6674 6675 6676 6677 6678 6679 6680 6681 6682 6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694 6695 6696 6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714 6715 6716 6717 6718 6719 6720 6721 6722 6723 6724 6725 6726 6727 6728 6729 6730 6731 6732 6733 6734 6735 6736 6737 6738 6739 6740 6741 6742 6743 6744 6745 6746 6747 6748 6749 6750 6751 6752 6753 6754 6755 6756 6757 6758 6759 6760 6761 6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779 6780 6781 6782 6783 6784 6785 6786 6787 6788 6789 6790 6791 6792 6793 6794 6795 6796 6797 6798 6799 6800 6801 6802 6803 6804 6805 6806 6807 6808 6809 6810 6811 6812 6813 6814 6815 6816 6817 6818 6819 6820 6821 6822 6823 6824 6825 6826 6827 6828 6829 6830 6831 6832 6833 6834 6835 6836 6837 6838 6839 6840 6841 6842 6843 6844 6845 6846 6847 6848 6849 6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863 6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880 6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921 6922 6923 6924 6925 6926 6927 6928 6929 6930 6931 6932 6933 6934 6935 6936 6937 6938 6939 6940 6941 6942 6943 6944 6945 6946 6947 6948 6949 6950 6951 6952 6953 6954 6955 6956 6957 6958 6959 6960 6961 6962 6963 6964 6965 6966 6967 6968 6969 6970 6971 6972 6973 6974 6975 6976 6977 6978 6979 6980 6981 6982 6983 6984 6985 6986 6987 6988 6989 6990 6991 6992 6993 6994 6995 6996 6997 6998 6999 7000 7001 7002 7003 7004 7005 7006 7007 7008 7009 7010 7011 7012 7013 7014 7015 7016 7017 7018 7019 7020 7021 7022 7023 7024 7025 7026 7027 7028 7029 7030 7031 7032 7033 7034 7035 7036 7037 7038 7039 7040 7041 7042 7043 7044 7045 7046 7047 7048 7049 7050 7051 7052 7053 7054 7055 7056 7057 7058 7059 7060 7061 7062 7063 7064 7065 7066 7067 7068 7069 7070 7071 7072 7073 7074 7075 7076 7077 7078 7079 7080 7081 7082 7083 7084 7085 7086 7087 7088 7089 7090 7091 7092 7093 7094 7095 7096 7097 7098 7099 7100 7101 7102 7103 7104 7105 7106 7107 7108 7109 7110 7111 7112 7113 7114 7115 7116 7117 7118 7119 7120 7121 7122 7123 7124 7125 7126 7127 7128 7129 7130 7131 7132 7133 7134 7135 7136 7137 7138 7139 7140 7141 7142 7143 7144 7145 7146 7147 7148 7149 7150 7151 7152 7153 7154 7155 7156 7157 7158 7159 7160 7161 7162 7163 7164 7165 7166 7167 7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183 7184 7185 7186 7187 7188 7189 7190 7191 7192 7193 7194 7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217 7218 7219 7220 7221 7222 7223 7224 7225 7226 7227 7228 7229 7230 7231 7232 7233 7234 7235 7236 7237 7238 7239 7240 7241 7242 7243 7244 7245 7246 7247 7248 7249 7250 7251 7252 7253 7254 7255 7256 7257 7258 7259 7260 7261 7262 7263 7264 7265 7266 7267 7268 7269 7270 7271 7272 7273 7274 7275 7276 7277 7278 7279 7280 7281 7282 7283 7284 7285 7286 7287 7288 7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314 7315 7316 7317 7318 7319 7320 7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332 7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356 7357 7358 7359 7360 7361 7362 7363 7364 7365 7366 7367 7368 7369 7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385 7386 7387 7388 7389 7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431 7432 7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445 7446 7447 7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473 7474 7475 7476 7477 7478 7479 7480 7481 7482 7483 7484 7485 7486 7487 7488 7489 7490 7491 7492 7493 7494 7495 7496 7497 7498 7499 7500 7501 7502 7503 7504 7505 7506 7507 7508 7509 7510 7511 7512 7513 7514 7515 7516 7517 7518 7519 7520 7521 7522 7523 7524 7525 7526 7527 7528 7529 7530 7531 7532 7533 7534 7535 7536 7537 7538 7539 7540 7541 7542 7543 7544 7545 7546 7547 7548 7549 7550 7551 7552 7553 7554 7555 7556 7557 7558 7559 7560 7561 7562 7563 7564 7565 7566 7567 7568 7569 7570 7571 7572 7573 7574 7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602 7603 7604 7605 7606 7607 7608 7609 7610 7611 7612 7613 7614 7615 7616 7617 7618 7619 7620 7621 7622 7623 7624 7625 7626 7627 7628 7629 7630 7631 7632 7633 7634 7635 7636 7637 7638 7639 7640 7641 7642 7643 7644 7645 7646 7647 7648 7649 7650 7651 7652 7653 7654 7655 7656 7657 7658 7659 7660 7661 7662 7663 7664 7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675 7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743 7744 7745 7746 7747 7748 7749 7750 7751 7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793 7794 7795 7796 7797 7798
/* Common subexpression elimination for GNU compiler.
   Copyright (C) 1987-2020 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "cfghooks.h"
#include "df.h"
#include "memmodel.h"
#include "tm_p.h"
#include "insn-config.h"
#include "regs.h"
#include "emit-rtl.h"
#include "recog.h"
#include "cfgrtl.h"
#include "cfganal.h"
#include "cfgcleanup.h"
#include "alias.h"
#include "toplev.h"
#include "rtlhooks-def.h"
#include "tree-pass.h"
#include "dbgcnt.h"
#include "rtl-iter.h"
#include "regs.h"
#include "function-abi.h"

/* The basic idea of common subexpression elimination is to go
   through the code, keeping a record of expressions that would
   have the same value at the current scan point, and replacing
   expressions encountered with the cheapest equivalent expression.

   It is too complicated to keep track of the different possibilities
   when control paths merge in this code; so, at each label, we forget all
   that is known and start fresh.  This can be described as processing each
   extended basic block separately.  We have a separate pass to perform
   global CSE.

   Note CSE can turn a conditional or computed jump into a nop or
   an unconditional jump.  When this occurs we arrange to run the jump
   optimizer after CSE to delete the unreachable code.

   We use two data structures to record the equivalent expressions:
   a hash table for most expressions, and a vector of "quantity
   numbers" to record equivalent (pseudo) registers.

   The use of the special data structure for registers is desirable
   because it is faster.  It is possible because registers references
   contain a fairly small number, the register number, taken from
   a contiguously allocated series, and two register references are
   identical if they have the same number.  General expressions
   do not have any such thing, so the only way to retrieve the
   information recorded on an expression other than a register
   is to keep it in a hash table.

Registers and "quantity numbers":

   At the start of each basic block, all of the (hardware and pseudo)
   registers used in the function are given distinct quantity
   numbers to indicate their contents.  During scan, when the code
   copies one register into another, we copy the quantity number.
   When a register is loaded in any other way, we allocate a new
   quantity number to describe the value generated by this operation.
   `REG_QTY (N)' records what quantity register N is currently thought
   of as containing.

   All real quantity numbers are greater than or equal to zero.
   If register N has not been assigned a quantity, `REG_QTY (N)' will
   equal -N - 1, which is always negative.

   Quantity numbers below zero do not exist and none of the `qty_table'
   entries should be referenced with a negative index.

   We also maintain a bidirectional chain of registers for each
   quantity number.  The `qty_table` members `first_reg' and `last_reg',
   and `reg_eqv_table' members `next' and `prev' hold these chains.

   The first register in a chain is the one whose lifespan is least local.
   Among equals, it is the one that was seen first.
   We replace any equivalent register with that one.

   If two registers have the same quantity number, it must be true that
   REG expressions with qty_table `mode' must be in the hash table for both
   registers and must be in the same class.

   The converse is not true.  Since hard registers may be referenced in
   any mode, two REG expressions might be equivalent in the hash table
   but not have the same quantity number if the quantity number of one
   of the registers is not the same mode as those expressions.

Constants and quantity numbers

   When a quantity has a known constant value, that value is stored
   in the appropriate qty_table `const_rtx'.  This is in addition to
   putting the constant in the hash table as is usual for non-regs.

   Whether a reg or a constant is preferred is determined by the configuration
   macro CONST_COSTS and will often depend on the constant value.  In any
   event, expressions containing constants can be simplified, by fold_rtx.

   When a quantity has a known nearly constant value (such as an address
   of a stack slot), that value is stored in the appropriate qty_table
   `const_rtx'.

   Integer constants don't have a machine mode.  However, cse
   determines the intended machine mode from the destination
   of the instruction that moves the constant.  The machine mode
   is recorded in the hash table along with the actual RTL
   constant expression so that different modes are kept separate.

Other expressions:

   To record known equivalences among expressions in general
   we use a hash table called `table'.  It has a fixed number of buckets
   that contain chains of `struct table_elt' elements for expressions.
   These chains connect the elements whose expressions have the same
   hash codes.

   Other chains through the same elements connect the elements which
   currently have equivalent values.

   Register references in an expression are canonicalized before hashing
   the expression.  This is done using `reg_qty' and qty_table `first_reg'.
   The hash code of a register reference is computed using the quantity
   number, not the register number.

   When the value of an expression changes, it is necessary to remove from the
   hash table not just that expression but all expressions whose values
   could be different as a result.

     1. If the value changing is in memory, except in special cases
     ANYTHING referring to memory could be changed.  That is because
     nobody knows where a pointer does not point.
     The function `invalidate_memory' removes what is necessary.

     The special cases are when the address is constant or is
     a constant plus a fixed register such as the frame pointer
     or a static chain pointer.  When such addresses are stored in,
     we can tell exactly which other such addresses must be invalidated
     due to overlap.  `invalidate' does this.
     All expressions that refer to non-constant
     memory addresses are also invalidated.  `invalidate_memory' does this.

     2. If the value changing is a register, all expressions
     containing references to that register, and only those,
     must be removed.

   Because searching the entire hash table for expressions that contain
   a register is very slow, we try to figure out when it isn't necessary.
   Precisely, this is necessary only when expressions have been
   entered in the hash table using this register, and then the value has
   changed, and then another expression wants to be added to refer to
   the register's new value.  This sequence of circumstances is rare
   within any one basic block.

   `REG_TICK' and `REG_IN_TABLE', accessors for members of
   cse_reg_info, are used to detect this case.  REG_TICK (i) is
   incremented whenever a value is stored in register i.
   REG_IN_TABLE (i) holds -1 if no references to register i have been
   entered in the table; otherwise, it contains the value REG_TICK (i)
   had when the references were entered.  If we want to enter a
   reference and REG_IN_TABLE (i) != REG_TICK (i), we must scan and
   remove old references.  Until we want to enter a new entry, the
   mere fact that the two vectors don't match makes the entries be
   ignored if anyone tries to match them.

   Registers themselves are entered in the hash table as well as in
   the equivalent-register chains.  However, `REG_TICK' and
   `REG_IN_TABLE' do not apply to expressions which are simple
   register references.  These expressions are removed from the table
   immediately when they become invalid, and this can be done even if
   we do not immediately search for all the expressions that refer to
   the register.

   A CLOBBER rtx in an instruction invalidates its operand for further
   reuse.  A CLOBBER or SET rtx whose operand is a MEM:BLK
   invalidates everything that resides in memory.

Related expressions:

   Constant expressions that differ only by an additive integer
   are called related.  When a constant expression is put in
   the table, the related expression with no constant term
   is also entered.  These are made to point at each other
   so that it is possible to find out if there exists any
   register equivalent to an expression related to a given expression.  */

/* Length of qty_table vector.  We know in advance we will not need
   a quantity number this big.  */

static int max_qty;

/* Next quantity number to be allocated.
   This is 1 + the largest number needed so far.  */

static int next_qty;

/* Per-qty information tracking.

   `first_reg' and `last_reg' track the head and tail of the
   chain of registers which currently contain this quantity.

   `mode' contains the machine mode of this quantity.

   `const_rtx' holds the rtx of the constant value of this
   quantity, if known.  A summations of the frame/arg pointer
   and a constant can also be entered here.  When this holds
   a known value, `const_insn' is the insn which stored the
   constant value.

   `comparison_{code,const,qty}' are used to track when a
   comparison between a quantity and some constant or register has
   been passed.  In such a case, we know the results of the comparison
   in case we see it again.  These members record a comparison that
   is known to be true.  `comparison_code' holds the rtx code of such
   a comparison, else it is set to UNKNOWN and the other two
   comparison members are undefined.  `comparison_const' holds
   the constant being compared against, or zero if the comparison
   is not against a constant.  `comparison_qty' holds the quantity
   being compared against when the result is known.  If the comparison
   is not with a register, `comparison_qty' is -1.  */

struct qty_table_elem
{
  rtx const_rtx;
  rtx_insn *const_insn;
  rtx comparison_const;
  int comparison_qty;
  unsigned int first_reg, last_reg;
  /* The sizes of these fields should match the sizes of the
     code and mode fields of struct rtx_def (see rtl.h).  */
  ENUM_BITFIELD(rtx_code) comparison_code : 16;
  ENUM_BITFIELD(machine_mode) mode : 8;
};

/* The table of all qtys, indexed by qty number.  */
static struct qty_table_elem *qty_table;

/* For machines that have a CC0, we do not record its value in the hash
   table since its use is guaranteed to be the insn immediately following
   its definition and any other insn is presumed to invalidate it.

   Instead, we store below the current and last value assigned to CC0.
   If it should happen to be a constant, it is stored in preference
   to the actual assigned value.  In case it is a constant, we store
   the mode in which the constant should be interpreted.  */

static rtx this_insn_cc0, prev_insn_cc0;
static machine_mode this_insn_cc0_mode, prev_insn_cc0_mode;

/* Insn being scanned.  */

static rtx_insn *this_insn;
static bool optimize_this_for_speed_p;

/* Index by register number, gives the number of the next (or
   previous) register in the chain of registers sharing the same
   value.

   Or -1 if this register is at the end of the chain.

   If REG_QTY (N) == -N - 1, reg_eqv_table[N].next is undefined.  */

/* Per-register equivalence chain.  */
struct reg_eqv_elem
{
  int next, prev;
};

/* The table of all register equivalence chains.  */
static struct reg_eqv_elem *reg_eqv_table;

struct cse_reg_info
{
  /* The timestamp at which this register is initialized.  */
  unsigned int timestamp;

  /* The quantity number of the register's current contents.  */
  int reg_qty;

  /* The number of times the register has been altered in the current
     basic block.  */
  int reg_tick;

  /* The REG_TICK value at which rtx's containing this register are
     valid in the hash table.  If this does not equal the current
     reg_tick value, such expressions existing in the hash table are
     invalid.  */
  int reg_in_table;

  /* The SUBREG that was set when REG_TICK was last incremented.  Set
     to -1 if the last store was to the whole register, not a subreg.  */
  unsigned int subreg_ticked;
};

/* A table of cse_reg_info indexed by register numbers.  */
static struct cse_reg_info *cse_reg_info_table;

/* The size of the above table.  */
static unsigned int cse_reg_info_table_size;

/* The index of the first entry that has not been initialized.  */
static unsigned int cse_reg_info_table_first_uninitialized;

/* The timestamp at the beginning of the current run of
   cse_extended_basic_block.  We increment this variable at the beginning of
   the current run of cse_extended_basic_block.  The timestamp field of a
   cse_reg_info entry matches the value of this variable if and only
   if the entry has been initialized during the current run of
   cse_extended_basic_block.  */
static unsigned int cse_reg_info_timestamp;

/* A HARD_REG_SET containing all the hard registers for which there is
   currently a REG expression in the hash table.  Note the difference
   from the above variables, which indicate if the REG is mentioned in some
   expression in the table.  */

static HARD_REG_SET hard_regs_in_table;

/* True if CSE has altered the CFG.  */
static bool cse_cfg_altered;

/* True if CSE has altered conditional jump insns in such a way
   that jump optimization should be redone.  */
static bool cse_jumps_altered;

/* True if we put a LABEL_REF into the hash table for an INSN
   without a REG_LABEL_OPERAND, we have to rerun jump after CSE
   to put in the note.  */
static bool recorded_label_ref;

/* canon_hash stores 1 in do_not_record
   if it notices a reference to CC0, PC, or some other volatile
   subexpression.  */

static int do_not_record;

/* canon_hash stores 1 in hash_arg_in_memory
   if it notices a reference to memory within the expression being hashed.  */

static int hash_arg_in_memory;

/* The hash table contains buckets which are chains of `struct table_elt's,
   each recording one expression's information.
   That expression is in the `exp' field.

   The canon_exp field contains a canonical (from the point of view of
   alias analysis) version of the `exp' field.

   Those elements with the same hash code are chained in both directions
   through the `next_same_hash' and `prev_same_hash' fields.

   Each set of expressions with equivalent values
   are on a two-way chain through the `next_same_value'
   and `prev_same_value' fields, and all point with
   the `first_same_value' field at the first element in
   that chain.  The chain is in order of increasing cost.
   Each element's cost value is in its `cost' field.

   The `in_memory' field is nonzero for elements that
   involve any reference to memory.  These elements are removed
   whenever a write is done to an unidentified location in memory.
   To be safe, we assume that a memory address is unidentified unless
   the address is either a symbol constant or a constant plus
   the frame pointer or argument pointer.

   The `related_value' field is used to connect related expressions
   (that differ by adding an integer).
   The related expressions are chained in a circular fashion.
   `related_value' is zero for expressions for which this
   chain is not useful.

   The `cost' field stores the cost of this element's expression.
   The `regcost' field stores the value returned by approx_reg_cost for
   this element's expression.

   The `is_const' flag is set if the element is a constant (including
   a fixed address).

   The `flag' field is used as a temporary during some search routines.

   The `mode' field is usually the same as GET_MODE (`exp'), but
   if `exp' is a CONST_INT and has no machine mode then the `mode'
   field is the mode it was being used as.  Each constant is
   recorded separately for each mode it is used with.  */

struct table_elt
{
  rtx exp;
  rtx canon_exp;
  struct table_elt *next_same_hash;
  struct table_elt *prev_same_hash;
  struct table_elt *next_same_value;
  struct table_elt *prev_same_value;
  struct table_elt *first_same_value;
  struct table_elt *related_value;
  int cost;
  int regcost;
  /* The size of this field should match the size
     of the mode field of struct rtx_def (see rtl.h).  */
  ENUM_BITFIELD(machine_mode) mode : 8;
  char in_memory;
  char is_const;
  char flag;
};

/* We don't want a lot of buckets, because we rarely have very many
   things stored in the hash table, and a lot of buckets slows
   down a lot of loops that happen frequently.  */
#define HASH_SHIFT	5
#define HASH_SIZE	(1 << HASH_SHIFT)
#define HASH_MASK	(HASH_SIZE - 1)

/* Compute hash code of X in mode M.  Special-case case where X is a pseudo
   register (hard registers may require `do_not_record' to be set).  */

#define HASH(X, M)	\
 ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER	\
  ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X)))	\
  : canon_hash (X, M)) & HASH_MASK)

/* Like HASH, but without side-effects.  */
#define SAFE_HASH(X, M)	\
 ((REG_P (X) && REGNO (X) >= FIRST_PSEUDO_REGISTER	\
  ? (((unsigned) REG << 7) + (unsigned) REG_QTY (REGNO (X)))	\
  : safe_hash (X, M)) & HASH_MASK)

/* Determine whether register number N is considered a fixed register for the
   purpose of approximating register costs.
   It is desirable to replace other regs with fixed regs, to reduce need for
   non-fixed hard regs.
   A reg wins if it is either the frame pointer or designated as fixed.  */
#define FIXED_REGNO_P(N)  \
  ((N) == FRAME_POINTER_REGNUM || (N) == HARD_FRAME_POINTER_REGNUM \
   || fixed_regs[N] || global_regs[N])

/* Compute cost of X, as stored in the `cost' field of a table_elt.  Fixed
   hard registers and pointers into the frame are the cheapest with a cost
   of 0.  Next come pseudos with a cost of one and other hard registers with
   a cost of 2.  Aside from these special cases, call `rtx_cost'.  */

#define CHEAP_REGNO(N)							\
  (REGNO_PTR_FRAME_P (N)						\
   || (HARD_REGISTER_NUM_P (N)						\
       && FIXED_REGNO_P (N) && REGNO_REG_CLASS (N) != NO_REGS))

#define COST(X, MODE)							\
  (REG_P (X) ? 0 : notreg_cost (X, MODE, SET, 1))
#define COST_IN(X, MODE, OUTER, OPNO)					\
  (REG_P (X) ? 0 : notreg_cost (X, MODE, OUTER, OPNO))

/* Get the number of times this register has been updated in this
   basic block.  */

#define REG_TICK(N) (get_cse_reg_info (N)->reg_tick)

/* Get the point at which REG was recorded in the table.  */

#define REG_IN_TABLE(N) (get_cse_reg_info (N)->reg_in_table)

/* Get the SUBREG set at the last increment to REG_TICK (-1 if not a
   SUBREG).  */

#define SUBREG_TICKED(N) (get_cse_reg_info (N)->subreg_ticked)

/* Get the quantity number for REG.  */

#define REG_QTY(N) (get_cse_reg_info (N)->reg_qty)

/* Determine if the quantity number for register X represents a valid index
   into the qty_table.  */

#define REGNO_QTY_VALID_P(N) (REG_QTY (N) >= 0)

/* Compare table_elt X and Y and return true iff X is cheaper than Y.  */

#define CHEAPER(X, Y) \
 (preferable ((X)->cost, (X)->regcost, (Y)->cost, (Y)->regcost) < 0)

static struct table_elt *table[HASH_SIZE];

/* Chain of `struct table_elt's made so far for this function
   but currently removed from the table.  */

static struct table_elt *free_element_chain;

/* Set to the cost of a constant pool reference if one was found for a
   symbolic constant.  If this was found, it means we should try to
   convert constants into constant pool entries if they don't fit in
   the insn.  */

static int constant_pool_entries_cost;
static int constant_pool_entries_regcost;

/* Trace a patch through the CFG.  */

struct branch_path
{
  /* The basic block for this path entry.  */
  basic_block bb;
};

/* This data describes a block that will be processed by
   cse_extended_basic_block.  */

struct cse_basic_block_data
{
  /* Total number of SETs in block.  */
  int nsets;
  /* Size of current branch path, if any.  */
  int path_size;
  /* Current path, indicating which basic_blocks will be processed.  */
  struct branch_path *path;
};


/* Pointers to the live in/live out bitmaps for the boundaries of the
   current EBB.  */
static bitmap cse_ebb_live_in, cse_ebb_live_out;

/* A simple bitmap to track which basic blocks have been visited
   already as part of an already processed extended basic block.  */
static sbitmap cse_visited_basic_blocks;

static bool fixed_base_plus_p (rtx x);
static int notreg_cost (rtx, machine_mode, enum rtx_code, int);
static int preferable (int, int, int, int);
static void new_basic_block (void);
static void make_new_qty (unsigned int, machine_mode);
static void make_regs_eqv (unsigned int, unsigned int);
static void delete_reg_equiv (unsigned int);
static int mention_regs (rtx);
static int insert_regs (rtx, struct table_elt *, int);
static void remove_from_table (struct table_elt *, unsigned);
static void remove_pseudo_from_table (rtx, unsigned);
static struct table_elt *lookup (rtx, unsigned, machine_mode);
static struct table_elt *lookup_for_remove (rtx, unsigned, machine_mode);
static rtx lookup_as_function (rtx, enum rtx_code);
static struct table_elt *insert_with_costs (rtx, struct table_elt *, unsigned,
					    machine_mode, int, int);
static struct table_elt *insert (rtx, struct table_elt *, unsigned,
				 machine_mode);
static void merge_equiv_classes (struct table_elt *, struct table_elt *);
static void invalidate (rtx, machine_mode);
static void remove_invalid_refs (unsigned int);
static void remove_invalid_subreg_refs (unsigned int, poly_uint64,
					machine_mode);
static void rehash_using_reg (rtx);
static void invalidate_memory (void);
static rtx use_related_value (rtx, struct table_elt *);

static inline unsigned canon_hash (rtx, machine_mode);
static inline unsigned safe_hash (rtx, machine_mode);
static inline unsigned hash_rtx_string (const char *);

static rtx canon_reg (rtx, rtx_insn *);
static enum rtx_code find_comparison_args (enum rtx_code, rtx *, rtx *,
					   machine_mode *,
					   machine_mode *);
static rtx fold_rtx (rtx, rtx_insn *);
static rtx equiv_constant (rtx);
static void record_jump_equiv (rtx_insn *, bool);
static void record_jump_cond (enum rtx_code, machine_mode, rtx, rtx,
			      int);
static void cse_insn (rtx_insn *);
static void cse_prescan_path (struct cse_basic_block_data *);
static void invalidate_from_clobbers (rtx_insn *);
static void invalidate_from_sets_and_clobbers (rtx_insn *);
static void cse_extended_basic_block (struct cse_basic_block_data *);
extern void dump_class (struct table_elt*);
static void get_cse_reg_info_1 (unsigned int regno);
static struct cse_reg_info * get_cse_reg_info (unsigned int regno);

static void flush_hash_table (void);
static bool insn_live_p (rtx_insn *, int *);
static bool set_live_p (rtx, rtx_insn *, int *);
static void cse_change_cc_mode_insn (rtx_insn *, rtx);
static void cse_change_cc_mode_insns (rtx_insn *, rtx_insn *, rtx);
static machine_mode cse_cc_succs (basic_block, basic_block, rtx, rtx,
				       bool);


#undef RTL_HOOKS_GEN_LOWPART
#define RTL_HOOKS_GEN_LOWPART		gen_lowpart_if_possible

static const struct rtl_hooks cse_rtl_hooks = RTL_HOOKS_INITIALIZER;

/* Nonzero if X has the form (PLUS frame-pointer integer).  */

static bool
fixed_base_plus_p (rtx x)
{
  switch (GET_CODE (x))
    {
    case REG:
      if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx)
	return true;
      if (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM])
	return true;
      return false;

    case PLUS:
      if (!CONST_INT_P (XEXP (x, 1)))
	return false;
      return fixed_base_plus_p (XEXP (x, 0));

    default:
      return false;
    }
}

/* Dump the expressions in the equivalence class indicated by CLASSP.
   This function is used only for debugging.  */
DEBUG_FUNCTION void
dump_class (struct table_elt *classp)
{
  struct table_elt *elt;

  fprintf (stderr, "Equivalence chain for ");
  print_rtl (stderr, classp->exp);
  fprintf (stderr, ": \n");

  for (elt = classp->first_same_value; elt; elt = elt->next_same_value)
    {
      print_rtl (stderr, elt->exp);
      fprintf (stderr, "\n");
    }
}

/* Return an estimate of the cost of the registers used in an rtx.
   This is mostly the number of different REG expressions in the rtx;
   however for some exceptions like fixed registers we use a cost of
   0.  If any other hard register reference occurs, return MAX_COST.  */

static int
approx_reg_cost (const_rtx x)
{
  int cost = 0;
  subrtx_iterator::array_type array;
  FOR_EACH_SUBRTX (iter, array, x, NONCONST)
    {
      const_rtx x = *iter;
      if (REG_P (x))
	{
	  unsigned int regno = REGNO (x);
	  if (!CHEAP_REGNO (regno))
	    {
	      if (regno < FIRST_PSEUDO_REGISTER)
		{
		  if (targetm.small_register_classes_for_mode_p (GET_MODE (x)))
		    return MAX_COST;
		  cost += 2;
		}
	      else
		cost += 1;
	    }
	}
    }
  return cost;
}

/* Return a negative value if an rtx A, whose costs are given by COST_A
   and REGCOST_A, is more desirable than an rtx B.
   Return a positive value if A is less desirable, or 0 if the two are
   equally good.  */
static int
preferable (int cost_a, int regcost_a, int cost_b, int regcost_b)
{
  /* First, get rid of cases involving expressions that are entirely
     unwanted.  */
  if (cost_a != cost_b)
    {
      if (cost_a == MAX_COST)
	return 1;
      if (cost_b == MAX_COST)
	return -1;
    }

  /* Avoid extending lifetimes of hardregs.  */
  if (regcost_a != regcost_b)
    {
      if (regcost_a == MAX_COST)
	return 1;
      if (regcost_b == MAX_COST)
	return -1;
    }

  /* Normal operation costs take precedence.  */
  if (cost_a != cost_b)
    return cost_a - cost_b;
  /* Only if these are identical consider effects on register pressure.  */
  if (regcost_a != regcost_b)
    return regcost_a - regcost_b;
  return 0;
}

/* Internal function, to compute cost when X is not a register; called
   from COST macro to keep it simple.  */

static int
notreg_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno)
{
  scalar_int_mode int_mode, inner_mode;
  return ((GET_CODE (x) == SUBREG
	   && REG_P (SUBREG_REG (x))
	   && is_int_mode (mode, &int_mode)
	   && is_int_mode (GET_MODE (SUBREG_REG (x)), &inner_mode)
	   && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
	   && subreg_lowpart_p (x)
	   && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, inner_mode))
	  ? 0
	  : rtx_cost (x, mode, outer, opno, optimize_this_for_speed_p) * 2);
}


/* Initialize CSE_REG_INFO_TABLE.  */

static void
init_cse_reg_info (unsigned int nregs)
{
  /* Do we need to grow the table?  */
  if (nregs > cse_reg_info_table_size)
    {
      unsigned int new_size;

      if (cse_reg_info_table_size < 2048)
	{
	  /* Compute a new size that is a power of 2 and no smaller
	     than the large of NREGS and 64.  */
	  new_size = (cse_reg_info_table_size
		      ? cse_reg_info_table_size : 64);

	  while (new_size < nregs)
	    new_size *= 2;
	}
      else
	{
	  /* If we need a big table, allocate just enough to hold
	     NREGS registers.  */
	  new_size = nregs;
	}

      /* Reallocate the table with NEW_SIZE entries.  */
      free (cse_reg_info_table);
      cse_reg_info_table = XNEWVEC (struct cse_reg_info, new_size);
      cse_reg_info_table_size = new_size;
      cse_reg_info_table_first_uninitialized = 0;
    }

  /* Do we have all of the first NREGS entries initialized?  */
  if (cse_reg_info_table_first_uninitialized < nregs)
    {
      unsigned int old_timestamp = cse_reg_info_timestamp - 1;
      unsigned int i;

      /* Put the old timestamp on newly allocated entries so that they
	 will all be considered out of date.  We do not touch those
	 entries beyond the first NREGS entries to be nice to the
	 virtual memory.  */
      for (i = cse_reg_info_table_first_uninitialized; i < nregs; i++)
	cse_reg_info_table[i].timestamp = old_timestamp;

      cse_reg_info_table_first_uninitialized = nregs;
    }
}

/* Given REGNO, initialize the cse_reg_info entry for REGNO.  */

static void
get_cse_reg_info_1 (unsigned int regno)
{
  /* Set TIMESTAMP field to CSE_REG_INFO_TIMESTAMP so that this
     entry will be considered to have been initialized.  */
  cse_reg_info_table[regno].timestamp = cse_reg_info_timestamp;

  /* Initialize the rest of the entry.  */
  cse_reg_info_table[regno].reg_tick = 1;
  cse_reg_info_table[regno].reg_in_table = -1;
  cse_reg_info_table[regno].subreg_ticked = -1;
  cse_reg_info_table[regno].reg_qty = -regno - 1;
}

/* Find a cse_reg_info entry for REGNO.  */

static inline struct cse_reg_info *
get_cse_reg_info (unsigned int regno)
{
  struct cse_reg_info *p = &cse_reg_info_table[regno];

  /* If this entry has not been initialized, go ahead and initialize
     it.  */
  if (p->timestamp != cse_reg_info_timestamp)
    get_cse_reg_info_1 (regno);

  return p;
}

/* Clear the hash table and initialize each register with its own quantity,
   for a new basic block.  */

static void
new_basic_block (void)
{
  int i;

  next_qty = 0;

  /* Invalidate cse_reg_info_table.  */
  cse_reg_info_timestamp++;

  /* Clear out hash table state for this pass.  */
  CLEAR_HARD_REG_SET (hard_regs_in_table);

  /* The per-quantity values used to be initialized here, but it is
     much faster to initialize each as it is made in `make_new_qty'.  */

  for (i = 0; i < HASH_SIZE; i++)
    {
      struct table_elt *first;

      first = table[i];
      if (first != NULL)
	{
	  struct table_elt *last = first;

	  table[i] = NULL;

	  while (last->next_same_hash != NULL)
	    last = last->next_same_hash;

	  /* Now relink this hash entire chain into
	     the free element list.  */

	  last->next_same_hash = free_element_chain;
	  free_element_chain = first;
	}
    }

  prev_insn_cc0 = 0;
}

/* Say that register REG contains a quantity in mode MODE not in any
   register before and initialize that quantity.  */

static void
make_new_qty (unsigned int reg, machine_mode mode)
{
  int q;
  struct qty_table_elem *ent;
  struct reg_eqv_elem *eqv;

  gcc_assert (next_qty < max_qty);

  q = REG_QTY (reg) = next_qty++;
  ent = &qty_table[q];
  ent->first_reg = reg;
  ent->last_reg = reg;
  ent->mode = mode;
  ent->const_rtx = ent->const_insn = NULL;
  ent->comparison_code = UNKNOWN;

  eqv = &reg_eqv_table[reg];
  eqv->next = eqv->prev = -1;
}

/* Make reg NEW equivalent to reg OLD.
   OLD is not changing; NEW is.  */

static void
make_regs_eqv (unsigned int new_reg, unsigned int old_reg)
{
  unsigned int lastr, firstr;
  int q = REG_QTY (old_reg);
  struct qty_table_elem *ent;

  ent = &qty_table[q];

  /* Nothing should become eqv until it has a "non-invalid" qty number.  */
  gcc_assert (REGNO_QTY_VALID_P (old_reg));

  REG_QTY (new_reg) = q;
  firstr = ent->first_reg;
  lastr = ent->last_reg;

  /* Prefer fixed hard registers to anything.  Prefer pseudo regs to other
     hard regs.  Among pseudos, if NEW will live longer than any other reg
     of the same qty, and that is beyond the current basic block,
     make it the new canonical replacement for this qty.  */
  if (! (firstr < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (firstr))
      /* Certain fixed registers might be of the class NO_REGS.  This means
	 that not only can they not be allocated by the compiler, but
	 they cannot be used in substitutions or canonicalizations
	 either.  */
      && (new_reg >= FIRST_PSEUDO_REGISTER || REGNO_REG_CLASS (new_reg) != NO_REGS)
      && ((new_reg < FIRST_PSEUDO_REGISTER && FIXED_REGNO_P (new_reg))
	  || (new_reg >= FIRST_PSEUDO_REGISTER
	      && (firstr < FIRST_PSEUDO_REGISTER
		  || (bitmap_bit_p (cse_ebb_live_out, new_reg)
		      && !bitmap_bit_p (cse_ebb_live_out, firstr))
		  || (bitmap_bit_p (cse_ebb_live_in, new_reg)
		      && !bitmap_bit_p (cse_ebb_live_in, firstr))))))
    {
      reg_eqv_table[firstr].prev = new_reg;
      reg_eqv_table[new_reg].next = firstr;
      reg_eqv_table[new_reg].prev = -1;
      ent->first_reg = new_reg;
    }
  else
    {
      /* If NEW is a hard reg (known to be non-fixed), insert at end.
	 Otherwise, insert before any non-fixed hard regs that are at the
	 end.  Registers of class NO_REGS cannot be used as an
	 equivalent for anything.  */
      while (lastr < FIRST_PSEUDO_REGISTER && reg_eqv_table[lastr].prev >= 0
	     && (REGNO_REG_CLASS (lastr) == NO_REGS || ! FIXED_REGNO_P (lastr))
	     && new_reg >= FIRST_PSEUDO_REGISTER)
	lastr = reg_eqv_table[lastr].prev;
      reg_eqv_table[new_reg].next = reg_eqv_table[lastr].next;
      if (reg_eqv_table[lastr].next >= 0)
	reg_eqv_table[reg_eqv_table[lastr].next].prev = new_reg;
      else
	qty_table[q].last_reg = new_reg;
      reg_eqv_table[lastr].next = new_reg;
      reg_eqv_table[new_reg].prev = lastr;
    }
}

/* Remove REG from its equivalence class.  */

static void
delete_reg_equiv (unsigned int reg)
{
  struct qty_table_elem *ent;
  int q = REG_QTY (reg);
  int p, n;

  /* If invalid, do nothing.  */
  if (! REGNO_QTY_VALID_P (reg))
    return;

  ent = &qty_table[q];

  p = reg_eqv_table[reg].prev;
  n = reg_eqv_table[reg].next;

  if (n != -1)
    reg_eqv_table[n].prev = p;
  else
    ent->last_reg = p;
  if (p != -1)
    reg_eqv_table[p].next = n;
  else
    ent->first_reg = n;

  REG_QTY (reg) = -reg - 1;
}

/* Remove any invalid expressions from the hash table
   that refer to any of the registers contained in expression X.

   Make sure that newly inserted references to those registers
   as subexpressions will be considered valid.

   mention_regs is not called when a register itself
   is being stored in the table.

   Return 1 if we have done something that may have changed the hash code
   of X.  */

static int
mention_regs (rtx x)
{
  enum rtx_code code;
  int i, j;
  const char *fmt;
  int changed = 0;

  if (x == 0)
    return 0;

  code = GET_CODE (x);
  if (code == REG)
    {
      unsigned int regno = REGNO (x);
      unsigned int endregno = END_REGNO (x);
      unsigned int i;

      for (i = regno; i < endregno; i++)
	{
	  if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
	    remove_invalid_refs (i);

	  REG_IN_TABLE (i) = REG_TICK (i);
	  SUBREG_TICKED (i) = -1;
	}

      return 0;
    }

  /* If this is a SUBREG, we don't want to discard other SUBREGs of the same
     pseudo if they don't use overlapping words.  We handle only pseudos
     here for simplicity.  */
  if (code == SUBREG && REG_P (SUBREG_REG (x))
      && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
    {
      unsigned int i = REGNO (SUBREG_REG (x));

      if (REG_IN_TABLE (i) >= 0 && REG_IN_TABLE (i) != REG_TICK (i))
	{
	  /* If REG_IN_TABLE (i) differs from REG_TICK (i) by one, and
	     the last store to this register really stored into this
	     subreg, then remove the memory of this subreg.
	     Otherwise, remove any memory of the entire register and
	     all its subregs from the table.  */
	  if (REG_TICK (i) - REG_IN_TABLE (i) > 1
	      || SUBREG_TICKED (i) != REGNO (SUBREG_REG (x)))
	    remove_invalid_refs (i);
	  else
	    remove_invalid_subreg_refs (i, SUBREG_BYTE (x), GET_MODE (x));
	}

      REG_IN_TABLE (i) = REG_TICK (i);
      SUBREG_TICKED (i) = REGNO (SUBREG_REG (x));
      return 0;
    }

  /* If X is a comparison or a COMPARE and either operand is a register
     that does not have a quantity, give it one.  This is so that a later
     call to record_jump_equiv won't cause X to be assigned a different
     hash code and not found in the table after that call.

     It is not necessary to do this here, since rehash_using_reg can
     fix up the table later, but doing this here eliminates the need to
     call that expensive function in the most common case where the only
     use of the register is in the comparison.  */

  if (code == COMPARE || COMPARISON_P (x))
    {
      if (REG_P (XEXP (x, 0))
	  && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 0))))
	if (insert_regs (XEXP (x, 0), NULL, 0))
	  {
	    rehash_using_reg (XEXP (x, 0));
	    changed = 1;
	  }

      if (REG_P (XEXP (x, 1))
	  && ! REGNO_QTY_VALID_P (REGNO (XEXP (x, 1))))
	if (insert_regs (XEXP (x, 1), NULL, 0))
	  {
	    rehash_using_reg (XEXP (x, 1));
	    changed = 1;
	  }
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    if (fmt[i] == 'e')
      changed |= mention_regs (XEXP (x, i));
    else if (fmt[i] == 'E')
      for (j = 0; j < XVECLEN (x, i); j++)
	changed |= mention_regs (XVECEXP (x, i, j));

  return changed;
}

/* Update the register quantities for inserting X into the hash table
   with a value equivalent to CLASSP.
   (If the class does not contain a REG, it is irrelevant.)
   If MODIFIED is nonzero, X is a destination; it is being modified.
   Note that delete_reg_equiv should be called on a register
   before insert_regs is done on that register with MODIFIED != 0.

   Nonzero value means that elements of reg_qty have changed
   so X's hash code may be different.  */

static int
insert_regs (rtx x, struct table_elt *classp, int modified)
{
  if (REG_P (x))
    {
      unsigned int regno = REGNO (x);
      int qty_valid;

      /* If REGNO is in the equivalence table already but is of the
	 wrong mode for that equivalence, don't do anything here.  */

      qty_valid = REGNO_QTY_VALID_P (regno);
      if (qty_valid)
	{
	  struct qty_table_elem *ent = &qty_table[REG_QTY (regno)];

	  if (ent->mode != GET_MODE (x))
	    return 0;
	}

      if (modified || ! qty_valid)
	{
	  if (classp)
	    for (classp = classp->first_same_value;
		 classp != 0;
		 classp = classp->next_same_value)
	      if (REG_P (classp->exp)
		  && GET_MODE (classp->exp) == GET_MODE (x))
		{
		  unsigned c_regno = REGNO (classp->exp);

		  gcc_assert (REGNO_QTY_VALID_P (c_regno));

		  /* Suppose that 5 is hard reg and 100 and 101 are
		     pseudos.  Consider

		     (set (reg:si 100) (reg:si 5))
		     (set (reg:si 5) (reg:si 100))
		     (set (reg:di 101) (reg:di 5))

		     We would now set REG_QTY (101) = REG_QTY (5), but the
		     entry for 5 is in SImode.  When we use this later in
		     copy propagation, we get the register in wrong mode.  */
		  if (qty_table[REG_QTY (c_regno)].mode != GET_MODE (x))
		    continue;

		  make_regs_eqv (regno, c_regno);
		  return 1;
		}

	  /* Mention_regs for a SUBREG checks if REG_TICK is exactly one larger
	     than REG_IN_TABLE to find out if there was only a single preceding
	     invalidation - for the SUBREG - or another one, which would be
	     for the full register.  However, if we find here that REG_TICK
	     indicates that the register is invalid, it means that it has
	     been invalidated in a separate operation.  The SUBREG might be used
	     now (then this is a recursive call), or we might use the full REG
	     now and a SUBREG of it later.  So bump up REG_TICK so that
	     mention_regs will do the right thing.  */
	  if (! modified
	      && REG_IN_TABLE (regno) >= 0
	      && REG_TICK (regno) == REG_IN_TABLE (regno) + 1)
	    REG_TICK (regno)++;
	  make_new_qty (regno, GET_MODE (x));
	  return 1;
	}

      return 0;
    }

  /* If X is a SUBREG, we will likely be inserting the inner register in the
     table.  If that register doesn't have an assigned quantity number at
     this point but does later, the insertion that we will be doing now will
     not be accessible because its hash code will have changed.  So assign
     a quantity number now.  */

  else if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))
	   && ! REGNO_QTY_VALID_P (REGNO (SUBREG_REG (x))))
    {
      insert_regs (SUBREG_REG (x), NULL, 0);
      mention_regs (x);
      return 1;
    }
  else
    return mention_regs (x);
}


/* Compute upper and lower anchors for CST.  Also compute the offset of CST
   from these anchors/bases such that *_BASE + *_OFFS = CST.  Return false iff
   CST is equal to an anchor.  */

static bool
compute_const_anchors (rtx cst,
		       HOST_WIDE_INT *lower_base, HOST_WIDE_INT *lower_offs,
		       HOST_WIDE_INT *upper_base, HOST_WIDE_INT *upper_offs)
{
  HOST_WIDE_INT n = INTVAL (cst);

  *lower_base = n & ~(targetm.const_anchor - 1);
  if (*lower_base == n)
    return false;

  *upper_base =
    (n + (targetm.const_anchor - 1)) & ~(targetm.const_anchor - 1);
  *upper_offs = n - *upper_base;
  *lower_offs = n - *lower_base;
  return true;
}

/* Insert the equivalence between ANCHOR and (REG + OFF) in mode MODE.  */

static void
insert_const_anchor (HOST_WIDE_INT anchor, rtx reg, HOST_WIDE_INT offs,
		     machine_mode mode)
{
  struct table_elt *elt;
  unsigned hash;
  rtx anchor_exp;
  rtx exp;

  anchor_exp = GEN_INT (anchor);
  hash = HASH (anchor_exp, mode);
  elt = lookup (anchor_exp, hash, mode);
  if (!elt)
    elt = insert (anchor_exp, NULL, hash, mode);

  exp = plus_constant (mode, reg, offs);
  /* REG has just been inserted and the hash codes recomputed.  */
  mention_regs (exp);
  hash = HASH (exp, mode);

  /* Use the cost of the register rather than the whole expression.  When
     looking up constant anchors we will further offset the corresponding
     expression therefore it does not make sense to prefer REGs over
     reg-immediate additions.  Prefer instead the oldest expression.  Also
     don't prefer pseudos over hard regs so that we derive constants in
     argument registers from other argument registers rather than from the
     original pseudo that was used to synthesize the constant.  */
  insert_with_costs (exp, elt, hash, mode, COST (reg, mode), 1);
}

/* The constant CST is equivalent to the register REG.  Create
   equivalences between the two anchors of CST and the corresponding
   register-offset expressions using REG.  */

static void
insert_const_anchors (rtx reg, rtx cst, machine_mode mode)
{
  HOST_WIDE_INT lower_base, lower_offs, upper_base, upper_offs;

  if (!compute_const_anchors (cst, &lower_base, &lower_offs,
			      &upper_base, &upper_offs))
      return;

  /* Ignore anchors of value 0.  Constants accessible from zero are
     simple.  */
  if (lower_base != 0)
    insert_const_anchor (lower_base, reg, -lower_offs, mode);

  if (upper_base != 0)
    insert_const_anchor (upper_base, reg, -upper_offs, mode);
}

/* We need to express ANCHOR_ELT->exp + OFFS.  Walk the equivalence list of
   ANCHOR_ELT and see if offsetting any of the entries by OFFS would create a
   valid expression.  Return the cheapest and oldest of such expressions.  In
   *OLD, return how old the resulting expression is compared to the other
   equivalent expressions.  */

static rtx
find_reg_offset_for_const (struct table_elt *anchor_elt, HOST_WIDE_INT offs,
			   unsigned *old)
{
  struct table_elt *elt;
  unsigned idx;
  struct table_elt *match_elt;
  rtx match;

  /* Find the cheapest and *oldest* expression to maximize the chance of
     reusing the same pseudo.  */

  match_elt = NULL;
  match = NULL_RTX;
  for (elt = anchor_elt->first_same_value, idx = 0;
       elt;
       elt = elt->next_same_value, idx++)
    {
      if (match_elt && CHEAPER (match_elt, elt))
	return match;

      if (REG_P (elt->exp)
	  || (GET_CODE (elt->exp) == PLUS
	      && REG_P (XEXP (elt->exp, 0))
	      && GET_CODE (XEXP (elt->exp, 1)) == CONST_INT))
	{
	  rtx x;

	  /* Ignore expressions that are no longer valid.  */
	  if (!REG_P (elt->exp) && !exp_equiv_p (elt->exp, elt->exp, 1, false))
	    continue;

	  x = plus_constant (GET_MODE (elt->exp), elt->exp, offs);
	  if (REG_P (x)
	      || (GET_CODE (x) == PLUS
		  && IN_RANGE (INTVAL (XEXP (x, 1)),
			       -targetm.const_anchor,
			       targetm.const_anchor - 1)))
	    {
	      match = x;
	      match_elt = elt;
	      *old = idx;
	    }
	}
    }

  return match;
}

/* Try to express the constant SRC_CONST using a register+offset expression
   derived from a constant anchor.  Return it if successful or NULL_RTX,
   otherwise.  */

static rtx
try_const_anchors (rtx src_const, machine_mode mode)
{
  struct table_elt *lower_elt, *upper_elt;
  HOST_WIDE_INT lower_base, lower_offs, upper_base, upper_offs;
  rtx lower_anchor_rtx, upper_anchor_rtx;
  rtx lower_exp = NULL_RTX, upper_exp = NULL_RTX;
  unsigned lower_old, upper_old;

  /* CONST_INT is used for CC modes, but we should leave those alone.  */
  if (GET_MODE_CLASS (mode) == MODE_CC)
    return NULL_RTX;

  gcc_assert (SCALAR_INT_MODE_P (mode));
  if (!compute_const_anchors (src_const, &lower_base, &lower_offs,
			      &upper_base, &upper_offs))
    return NULL_RTX;

  lower_anchor_rtx = GEN_INT (lower_base);
  upper_anchor_rtx = GEN_INT (upper_base);
  lower_elt = lookup (lower_anchor_rtx, HASH (lower_anchor_rtx, mode), mode);
  upper_elt = lookup (upper_anchor_rtx, HASH (upper_anchor_rtx, mode), mode);

  if (lower_elt)
    lower_exp = find_reg_offset_for_const (lower_elt, lower_offs, &lower_old);
  if (upper_elt)
    upper_exp = find_reg_offset_for_const (upper_elt, upper_offs, &upper_old);

  if (!lower_exp)
    return upper_exp;
  if (!upper_exp)
    return lower_exp;

  /* Return the older expression.  */
  return (upper_old > lower_old ? upper_exp : lower_exp);
}

/* Look in or update the hash table.  */

/* Remove table element ELT from use in the table.
   HASH is its hash code, made using the HASH macro.
   It's an argument because often that is known in advance
   and we save much time not recomputing it.  */

static void
remove_from_table (struct table_elt *elt, unsigned int hash)
{
  if (elt == 0)
    return;

  /* Mark this element as removed.  See cse_insn.  */
  elt->first_same_value = 0;

  /* Remove the table element from its equivalence class.  */

  {
    struct table_elt *prev = elt->prev_same_value;
    struct table_elt *next = elt->next_same_value;

    if (next)
      next->prev_same_value = prev;

    if (prev)
      prev->next_same_value = next;
    else
      {
	struct table_elt *newfirst = next;
	while (next)
	  {
	    next->first_same_value = newfirst;
	    next = next->next_same_value;
	  }
      }
  }

  /* Remove the table element from its hash bucket.  */

  {
    struct table_elt *prev = elt->prev_same_hash;
    struct table_elt *next = elt->next_same_hash;

    if (next)
      next->prev_same_hash = prev;

    if (prev)
      prev->next_same_hash = next;
    else if (table[hash] == elt)
      table[hash] = next;
    else
      {
	/* This entry is not in the proper hash bucket.  This can happen
	   when two classes were merged by `merge_equiv_classes'.  Search
	   for the hash bucket that it heads.  This happens only very
	   rarely, so the cost is acceptable.  */
	for (hash = 0; hash < HASH_SIZE; hash++)
	  if (table[hash] == elt)
	    table[hash] = next;
      }
  }

  /* Remove the table element from its related-value circular chain.  */

  if (elt->related_value != 0 && elt->related_value != elt)
    {
      struct table_elt *p = elt->related_value;

      while (p->related_value != elt)
	p = p->related_value;
      p->related_value = elt->related_value;
      if (p->related_value == p)
	p->related_value = 0;
    }

  /* Now add it to the free element chain.  */
  elt->next_same_hash = free_element_chain;
  free_element_chain = elt;
}

/* Same as above, but X is a pseudo-register.  */

static void
remove_pseudo_from_table (rtx x, unsigned int hash)
{
  struct table_elt *elt;

  /* Because a pseudo-register can be referenced in more than one
     mode, we might have to remove more than one table entry.  */
  while ((elt = lookup_for_remove (x, hash, VOIDmode)))
    remove_from_table (elt, hash);
}

/* Look up X in the hash table and return its table element,
   or 0 if X is not in the table.

   MODE is the machine-mode of X, or if X is an integer constant
   with VOIDmode then MODE is the mode with which X will be used.

   Here we are satisfied to find an expression whose tree structure
   looks like X.  */

static struct table_elt *
lookup (rtx x, unsigned int hash, machine_mode mode)
{
  struct table_elt *p;

  for (p = table[hash]; p; p = p->next_same_hash)
    if (mode == p->mode && ((x == p->exp && REG_P (x))
			    || exp_equiv_p (x, p->exp, !REG_P (x), false)))
      return p;

  return 0;
}

/* Like `lookup' but don't care whether the table element uses invalid regs.
   Also ignore discrepancies in the machine mode of a register.  */

static struct table_elt *
lookup_for_remove (rtx x, unsigned int hash, machine_mode mode)
{
  struct table_elt *p;

  if (REG_P (x))
    {
      unsigned int regno = REGNO (x);

      /* Don't check the machine mode when comparing registers;
	 invalidating (REG:SI 0) also invalidates (REG:DF 0).  */
      for (p = table[hash]; p; p = p->next_same_hash)
	if (REG_P (p->exp)
	    && REGNO (p->exp) == regno)
	  return p;
    }
  else
    {
      for (p = table[hash]; p; p = p->next_same_hash)
	if (mode == p->mode
	    && (x == p->exp || exp_equiv_p (x, p->exp, 0, false)))
	  return p;
    }

  return 0;
}

/* Look for an expression equivalent to X and with code CODE.
   If one is found, return that expression.  */

static rtx
lookup_as_function (rtx x, enum rtx_code code)
{
  struct table_elt *p
    = lookup (x, SAFE_HASH (x, VOIDmode), GET_MODE (x));

  if (p == 0)
    return 0;

  for (p = p->first_same_value; p; p = p->next_same_value)
    if (GET_CODE (p->exp) == code
	/* Make sure this is a valid entry in the table.  */
	&& exp_equiv_p (p->exp, p->exp, 1, false))
      return p->exp;

  return 0;
}

/* Insert X in the hash table, assuming HASH is its hash code and
   CLASSP is an element of the class it should go in (or 0 if a new
   class should be made).  COST is the code of X and reg_cost is the
   cost of registers in X.  It is inserted at the proper position to
   keep the class in the order cheapest first.

   MODE is the machine-mode of X, or if X is an integer constant
   with VOIDmode then MODE is the mode with which X will be used.

   For elements of equal cheapness, the most recent one
   goes in front, except that the first element in the list
   remains first unless a cheaper element is added.  The order of
   pseudo-registers does not matter, as canon_reg will be called to
   find the cheapest when a register is retrieved from the table.

   The in_memory field in the hash table element is set to 0.
   The caller must set it nonzero if appropriate.

   You should call insert_regs (X, CLASSP, MODIFY) before calling here,
   and if insert_regs returns a nonzero value
   you must then recompute its hash code before calling here.

   If necessary, update table showing constant values of quantities.  */

static struct table_elt *
insert_with_costs (rtx x, struct table_elt *classp, unsigned int hash,
		   machine_mode mode, int cost, int reg_cost)
{
  struct table_elt *elt;

  /* If X is a register and we haven't made a quantity for it,
     something is wrong.  */
  gcc_assert (!REG_P (x) || REGNO_QTY_VALID_P (REGNO (x)));

  /* If X is a hard register, show it is being put in the table.  */
  if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
    add_to_hard_reg_set (&hard_regs_in_table, GET_MODE (x), REGNO (x));

  /* Put an element for X into the right hash bucket.  */

  elt = free_element_chain;
  if (elt)
    free_element_chain = elt->next_same_hash;
  else
    elt = XNEW (struct table_elt);

  elt->exp = x;
  elt->canon_exp = NULL_RTX;
  elt->cost = cost;
  elt->regcost = reg_cost;
  elt->next_same_value = 0;
  elt->prev_same_value = 0;
  elt->next_same_hash = table[hash];
  elt->prev_same_hash = 0;
  elt->related_value = 0;
  elt->in_memory = 0;
  elt->mode = mode;
  elt->is_const = (CONSTANT_P (x) || fixed_base_plus_p (x));

  if (table[hash])
    table[hash]->prev_same_hash = elt;
  table[hash] = elt;

  /* Put it into the proper value-class.  */
  if (classp)
    {
      classp = classp->first_same_value;
      if (CHEAPER (elt, classp))
	/* Insert at the head of the class.  */
	{
	  struct table_elt *p;
	  elt->next_same_value = classp;
	  classp->prev_same_value = elt;
	  elt->first_same_value = elt;

	  for (p = classp; p; p = p->next_same_value)
	    p->first_same_value = elt;
	}
      else
	{
	  /* Insert not at head of the class.  */
	  /* Put it after the last element cheaper than X.  */
	  struct table_elt *p, *next;

	  for (p = classp;
	       (next = p->next_same_value) && CHEAPER (next, elt);
	       p = next)
	    ;

	  /* Put it after P and before NEXT.  */
	  elt->next_same_value = next;
	  if (next)
	    next->prev_same_value = elt;

	  elt->prev_same_value = p;
	  p->next_same_value = elt;
	  elt->first_same_value = classp;
	}
    }
  else
    elt->first_same_value = elt;

  /* If this is a constant being set equivalent to a register or a register
     being set equivalent to a constant, note the constant equivalence.

     If this is a constant, it cannot be equivalent to a different constant,
     and a constant is the only thing that can be cheaper than a register.  So
     we know the register is the head of the class (before the constant was
     inserted).

     If this is a register that is not already known equivalent to a
     constant, we must check the entire class.

     If this is a register that is already known equivalent to an insn,
     update the qtys `const_insn' to show that `this_insn' is the latest
     insn making that quantity equivalent to the constant.  */

  if (elt->is_const && classp && REG_P (classp->exp)
      && !REG_P (x))
    {
      int exp_q = REG_QTY (REGNO (classp->exp));
      struct qty_table_elem *exp_ent = &qty_table[exp_q];

      exp_ent->const_rtx = gen_lowpart (exp_ent->mode, x);
      exp_ent->const_insn = this_insn;
    }

  else if (REG_P (x)
	   && classp
	   && ! qty_table[REG_QTY (REGNO (x))].const_rtx
	   && ! elt->is_const)
    {
      struct table_elt *p;

      for (p = classp; p != 0; p = p->next_same_value)
	{
	  if (p->is_const && !REG_P (p->exp))
	    {
	      int x_q = REG_QTY (REGNO (x));
	      struct qty_table_elem *x_ent = &qty_table[x_q];

	      x_ent->const_rtx
		= gen_lowpart (GET_MODE (x), p->exp);
	      x_ent->const_insn = this_insn;
	      break;
	    }
	}
    }

  else if (REG_P (x)
	   && qty_table[REG_QTY (REGNO (x))].const_rtx
	   && GET_MODE (x) == qty_table[REG_QTY (REGNO (x))].mode)
    qty_table[REG_QTY (REGNO (x))].const_insn = this_insn;

  /* If this is a constant with symbolic value,
     and it has a term with an explicit integer value,
     link it up with related expressions.  */
  if (GET_CODE (x) == CONST)
    {
      rtx subexp = get_related_value (x);
      unsigned subhash;
      struct table_elt *subelt, *subelt_prev;

      if (subexp != 0)
	{
	  /* Get the integer-free subexpression in the hash table.  */
	  subhash = SAFE_HASH (subexp, mode);
	  subelt = lookup (subexp, subhash, mode);
	  if (subelt == 0)
	    subelt = insert (subexp, NULL, subhash, mode);
	  /* Initialize SUBELT's circular chain if it has none.  */
	  if (subelt->related_value == 0)
	    subelt->related_value = subelt;
	  /* Find the element in the circular chain that precedes SUBELT.  */
	  subelt_prev = subelt;
	  while (subelt_prev->related_value != subelt)
	    subelt_prev = subelt_prev->related_value;
	  /* Put new ELT into SUBELT's circular chain just before SUBELT.
	     This way the element that follows SUBELT is the oldest one.  */
	  elt->related_value = subelt_prev->related_value;
	  subelt_prev->related_value = elt;
	}
    }

  return elt;
}

/* Wrap insert_with_costs by passing the default costs.  */

static struct table_elt *
insert (rtx x, struct table_elt *classp, unsigned int hash,
	machine_mode mode)
{
  return insert_with_costs (x, classp, hash, mode,
			    COST (x, mode), approx_reg_cost (x));
}


/* Given two equivalence classes, CLASS1 and CLASS2, put all the entries from
   CLASS2 into CLASS1.  This is done when we have reached an insn which makes
   the two classes equivalent.

   CLASS1 will be the surviving class; CLASS2 should not be used after this
   call.

   Any invalid entries in CLASS2 will not be copied.  */

static void
merge_equiv_classes (struct table_elt *class1, struct table_elt *class2)
{
  struct table_elt *elt, *next, *new_elt;

  /* Ensure we start with the head of the classes.  */
  class1 = class1->first_same_value;
  class2 = class2->first_same_value;

  /* If they were already equal, forget it.  */
  if (class1 == class2)
    return;

  for (elt = class2; elt; elt = next)
    {
      unsigned int hash;
      rtx exp = elt->exp;
      machine_mode mode = elt->mode;

      next = elt->next_same_value;

      /* Remove old entry, make a new one in CLASS1's class.
	 Don't do this for invalid entries as we cannot find their
	 hash code (it also isn't necessary).  */
      if (REG_P (exp) || exp_equiv_p (exp, exp, 1, false))
	{
	  bool need_rehash = false;

	  hash_arg_in_memory = 0;
	  hash = HASH (exp, mode);

	  if (REG_P (exp))
	    {
	      need_rehash = REGNO_QTY_VALID_P (REGNO (exp));
	      delete_reg_equiv (REGNO (exp));
	    }

	  if (REG_P (exp) && REGNO (exp) >= FIRST_PSEUDO_REGISTER)
	    remove_pseudo_from_table (exp, hash);
	  else
	    remove_from_table (elt, hash);

	  if (insert_regs (exp, class1, 0) || need_rehash)
	    {
	      rehash_using_reg (exp);
	      hash = HASH (exp, mode);
	    }
	  new_elt = insert (exp, class1, hash, mode);
	  new_elt->in_memory = hash_arg_in_memory;
	  if (GET_CODE (exp) == ASM_OPERANDS && elt->cost == MAX_COST)
	    new_elt->cost = MAX_COST;
	}
    }
}

/* Flush the entire hash table.  */

static void
flush_hash_table (void)
{
  int i;
  struct table_elt *p;

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = table[i])
      {
	/* Note that invalidate can remove elements
	   after P in the current hash chain.  */
	if (REG_P (p->exp))
	  invalidate (p->exp, VOIDmode);
	else
	  remove_from_table (p, i);
      }
}

/* Check whether an anti dependence exists between X and EXP.  MODE and
   ADDR are as for canon_anti_dependence.  */

static bool
check_dependence (const_rtx x, rtx exp, machine_mode mode, rtx addr)
{
  subrtx_iterator::array_type array;
  FOR_EACH_SUBRTX (iter, array, x, NONCONST)
    {
      const_rtx x = *iter;
      if (MEM_P (x) && canon_anti_dependence (x, true, exp, mode, addr))
	return true;
    }
  return false;
}

/* Remove from the hash table, or mark as invalid, all expressions whose
   values could be altered by storing in register X.  */

static void
invalidate_reg (rtx x)
{
  gcc_assert (GET_CODE (x) == REG);

  /* If X is a register, dependencies on its contents are recorded
     through the qty number mechanism.  Just change the qty number of
     the register, mark it as invalid for expressions that refer to it,
     and remove it itself.  */
  unsigned int regno = REGNO (x);
  unsigned int hash = HASH (x, GET_MODE (x));

  /* Remove REGNO from any quantity list it might be on and indicate
     that its value might have changed.  If it is a pseudo, remove its
     entry from the hash table.

     For a hard register, we do the first two actions above for any
     additional hard registers corresponding to X.  Then, if any of these
     registers are in the table, we must remove any REG entries that
     overlap these registers.  */

  delete_reg_equiv (regno);
  REG_TICK (regno)++;
  SUBREG_TICKED (regno) = -1;

  if (regno >= FIRST_PSEUDO_REGISTER)
    remove_pseudo_from_table (x, hash);
  else
    {
      HOST_WIDE_INT in_table = TEST_HARD_REG_BIT (hard_regs_in_table, regno);
      unsigned int endregno = END_REGNO (x);
      unsigned int rn;
      struct table_elt *p, *next;

      CLEAR_HARD_REG_BIT (hard_regs_in_table, regno);

      for (rn = regno + 1; rn < endregno; rn++)
	{
	  in_table |= TEST_HARD_REG_BIT (hard_regs_in_table, rn);
	  CLEAR_HARD_REG_BIT (hard_regs_in_table, rn);
	  delete_reg_equiv (rn);
	  REG_TICK (rn)++;
	  SUBREG_TICKED (rn) = -1;
	}

      if (in_table)
	for (hash = 0; hash < HASH_SIZE; hash++)
	  for (p = table[hash]; p; p = next)
	    {
	      next = p->next_same_hash;

	      if (!REG_P (p->exp) || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
		continue;

	      unsigned int tregno = REGNO (p->exp);
	      unsigned int tendregno = END_REGNO (p->exp);
	      if (tendregno > regno && tregno < endregno)
		remove_from_table (p, hash);
	    }
    }
}

/* Remove from the hash table, or mark as invalid, all expressions whose
   values could be altered by storing in X.  X is a register, a subreg, or
   a memory reference with nonvarying address (because, when a memory
   reference with a varying address is stored in, all memory references are
   removed by invalidate_memory so specific invalidation is superfluous).
   FULL_MODE, if not VOIDmode, indicates that this much should be
   invalidated instead of just the amount indicated by the mode of X.  This
   is only used for bitfield stores into memory.

   A nonvarying address may be just a register or just a symbol reference,
   or it may be either of those plus a numeric offset.  */

static void
invalidate (rtx x, machine_mode full_mode)
{
  int i;
  struct table_elt *p;
  rtx addr;

  switch (GET_CODE (x))
    {
    case REG:
      invalidate_reg (x);
      return;

    case SUBREG:
      invalidate (SUBREG_REG (x), VOIDmode);
      return;

    case PARALLEL:
      for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
	invalidate (XVECEXP (x, 0, i), VOIDmode);
      return;

    case EXPR_LIST:
      /* This is part of a disjoint return value; extract the location in
	 question ignoring the offset.  */
      invalidate (XEXP (x, 0), VOIDmode);
      return;

    case MEM:
      addr = canon_rtx (get_addr (XEXP (x, 0)));
      /* Calculate the canonical version of X here so that
	 true_dependence doesn't generate new RTL for X on each call.  */
      x = canon_rtx (x);

      /* Remove all hash table elements that refer to overlapping pieces of
	 memory.  */
      if (full_mode == VOIDmode)
	full_mode = GET_MODE (x);

      for (i = 0; i < HASH_SIZE; i++)
	{
	  struct table_elt *next;

	  for (p = table[i]; p; p = next)
	    {
	      next = p->next_same_hash;
	      if (p->in_memory)
		{
		  /* Just canonicalize the expression once;
		     otherwise each time we call invalidate
		     true_dependence will canonicalize the
		     expression again.  */
		  if (!p->canon_exp)
		    p->canon_exp = canon_rtx (p->exp);
		  if (check_dependence (p->canon_exp, x, full_mode, addr))
		    remove_from_table (p, i);
		}
	    }
	}
      return;

    default:
      gcc_unreachable ();
    }
}

/* Invalidate DEST.  Used when DEST is not going to be added
   into the hash table for some reason, e.g. do_not_record
   flagged on it.  */

static void
invalidate_dest (rtx dest)
{
  if (REG_P (dest)
      || GET_CODE (dest) == SUBREG
      || MEM_P (dest))
    invalidate (dest, VOIDmode);
  else if (GET_CODE (dest) == STRICT_LOW_PART
	   || GET_CODE (dest) == ZERO_EXTRACT)
    invalidate (XEXP (dest, 0), GET_MODE (dest));
}

/* Remove all expressions that refer to register REGNO,
   since they are already invalid, and we are about to
   mark that register valid again and don't want the old
   expressions to reappear as valid.  */

static void
remove_invalid_refs (unsigned int regno)
{
  unsigned int i;
  struct table_elt *p, *next;

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	next = p->next_same_hash;
	if (!REG_P (p->exp) && refers_to_regno_p (regno, p->exp))
	  remove_from_table (p, i);
      }
}

/* Likewise for a subreg with subreg_reg REGNO, subreg_byte OFFSET,
   and mode MODE.  */
static void
remove_invalid_subreg_refs (unsigned int regno, poly_uint64 offset,
			    machine_mode mode)
{
  unsigned int i;
  struct table_elt *p, *next;

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	rtx exp = p->exp;
	next = p->next_same_hash;

	if (!REG_P (exp)
	    && (GET_CODE (exp) != SUBREG
		|| !REG_P (SUBREG_REG (exp))
		|| REGNO (SUBREG_REG (exp)) != regno
		|| ranges_maybe_overlap_p (SUBREG_BYTE (exp),
					   GET_MODE_SIZE (GET_MODE (exp)),
					   offset, GET_MODE_SIZE (mode)))
	    && refers_to_regno_p (regno, p->exp))
	  remove_from_table (p, i);
      }
}

/* Recompute the hash codes of any valid entries in the hash table that
   reference X, if X is a register, or SUBREG_REG (X) if X is a SUBREG.

   This is called when we make a jump equivalence.  */

static void
rehash_using_reg (rtx x)
{
  unsigned int i;
  struct table_elt *p, *next;
  unsigned hash;

  if (GET_CODE (x) == SUBREG)
    x = SUBREG_REG (x);

  /* If X is not a register or if the register is known not to be in any
     valid entries in the table, we have no work to do.  */

  if (!REG_P (x)
      || REG_IN_TABLE (REGNO (x)) < 0
      || REG_IN_TABLE (REGNO (x)) != REG_TICK (REGNO (x)))
    return;

  /* Scan all hash chains looking for valid entries that mention X.
     If we find one and it is in the wrong hash chain, move it.  */

  for (i = 0; i < HASH_SIZE; i++)
    for (p = table[i]; p; p = next)
      {
	next = p->next_same_hash;
	if (reg_mentioned_p (x, p->exp)
	    && exp_equiv_p (p->exp, p->exp, 1, false)
	    && i != (hash = SAFE_HASH (p->exp, p->mode)))
	  {
	    if (p->next_same_hash)
	      p->next_same_hash->prev_same_hash = p->prev_same_hash;

	    if (p->prev_same_hash)
	      p->prev_same_hash->next_same_hash = p->next_same_hash;
	    else
	      table[i] = p->next_same_hash;

	    p->next_same_hash = table[hash];
	    p->prev_same_hash = 0;
	    if (table[hash])
	      table[hash]->prev_same_hash = p;
	    table[hash] = p;
	  }
      }
}

/* Remove from the hash table any expression that is a call-clobbered
   register in INSN.  Also update their TICK values.  */

static void
invalidate_for_call (rtx_insn *insn)
{
  unsigned int regno;
  unsigned hash;
  struct table_elt *p, *next;
  int in_table = 0;
  hard_reg_set_iterator hrsi;

  /* Go through all the hard registers.  For each that might be clobbered
     in call insn INSN, remove the register from quantity chains and update
     reg_tick if defined.  Also see if any of these registers is currently
     in the table.

     ??? We could be more precise for partially-clobbered registers,
     and only invalidate values that actually occupy the clobbered part
     of the registers.  It doesn't seem worth the effort though, since
     we shouldn't see this situation much before RA.  Whatever choice
     we make here has to be consistent with the table walk below,
     so any change to this test will require a change there too.  */
  HARD_REG_SET callee_clobbers
    = insn_callee_abi (insn).full_and_partial_reg_clobbers ();
  EXECUTE_IF_SET_IN_HARD_REG_SET (callee_clobbers, 0, regno, hrsi)
    {
      delete_reg_equiv (regno);
      if (REG_TICK (regno) >= 0)
	{
	  REG_TICK (regno)++;
	  SUBREG_TICKED (regno) = -1;
	}
      in_table |= (TEST_HARD_REG_BIT (hard_regs_in_table, regno) != 0);
    }

  /* In the case where we have no call-clobbered hard registers in the
     table, we are done.  Otherwise, scan the table and remove any
     entry that overlaps a call-clobbered register.  */

  if (in_table)
    for (hash = 0; hash < HASH_SIZE; hash++)
      for (p = table[hash]; p; p = next)
	{
	  next = p->next_same_hash;

	  if (!REG_P (p->exp)
	      || REGNO (p->exp) >= FIRST_PSEUDO_REGISTER)
	    continue;

	  /* This must use the same test as above rather than the
	     more accurate clobbers_reg_p.  */
	  if (overlaps_hard_reg_set_p (callee_clobbers, GET_MODE (p->exp),
				       REGNO (p->exp)))
	    remove_from_table (p, hash);
	}
}

/* Given an expression X of type CONST,
   and ELT which is its table entry (or 0 if it
   is not in the hash table),
   return an alternate expression for X as a register plus integer.
   If none can be found, return 0.  */

static rtx
use_related_value (rtx x, struct table_elt *elt)
{
  struct table_elt *relt = 0;
  struct table_elt *p, *q;
  HOST_WIDE_INT offset;

  /* First, is there anything related known?
     If we have a table element, we can tell from that.
     Otherwise, must look it up.  */

  if (elt != 0 && elt->related_value != 0)
    relt = elt;
  else if (elt == 0 && GET_CODE (x) == CONST)
    {
      rtx subexp = get_related_value (x);
      if (subexp != 0)
	relt = lookup (subexp,
		       SAFE_HASH (subexp, GET_MODE (subexp)),
		       GET_MODE (subexp));
    }

  if (relt == 0)
    return 0;

  /* Search all related table entries for one that has an
     equivalent register.  */

  p = relt;
  while (1)
    {
      /* This loop is strange in that it is executed in two different cases.
	 The first is when X is already in the table.  Then it is searching
	 the RELATED_VALUE list of X's class (RELT).  The second case is when
	 X is not in the table.  Then RELT points to a class for the related
	 value.

	 Ensure that, whatever case we are in, that we ignore classes that have
	 the same value as X.  */

      if (rtx_equal_p (x, p->exp))
	q = 0;
      else
	for (q = p->first_same_value; q; q = q->next_same_value)
	  if (REG_P (q->exp))
	    break;

      if (q)
	break;

      p = p->related_value;

      /* We went all the way around, so there is nothing to be found.
	 Alternatively, perhaps RELT was in the table for some other reason
	 and it has no related values recorded.  */
      if (p == relt || p == 0)
	break;
    }

  if (q == 0)
    return 0;

  offset = (get_integer_term (x) - get_integer_term (p->exp));
  /* Note: OFFSET may be 0 if P->xexp and X are related by commutativity.  */
  return plus_constant (q->mode, q->exp, offset);
}


/* Hash a string.  Just add its bytes up.  */
static inline unsigned
hash_rtx_string (const char *ps)
{
  unsigned hash = 0;
  const unsigned char *p = (const unsigned char *) ps;

  if (p)
    while (*p)
      hash += *p++;

  return hash;
}

/* Same as hash_rtx, but call CB on each rtx if it is not NULL.
   When the callback returns true, we continue with the new rtx.  */

unsigned
hash_rtx_cb (const_rtx x, machine_mode mode,
             int *do_not_record_p, int *hash_arg_in_memory_p,
             bool have_reg_qty, hash_rtx_callback_function cb)
{
  int i, j;
  unsigned hash = 0;
  enum rtx_code code;
  const char *fmt;
  machine_mode newmode;
  rtx newx;

  /* Used to turn recursion into iteration.  We can't rely on GCC's
     tail-recursion elimination since we need to keep accumulating values
     in HASH.  */
 repeat:
  if (x == 0)
    return hash;

  /* Invoke the callback first.  */
  if (cb != NULL
      && ((*cb) (x, mode, &newx, &newmode)))
    {
      hash += hash_rtx_cb (newx, newmode, do_not_record_p,
                           hash_arg_in_memory_p, have_reg_qty, cb);
      return hash;
    }

  code = GET_CODE (x);
  switch (code)
    {
    case REG:
      {
	unsigned int regno = REGNO (x);

	if (do_not_record_p && !reload_completed)
	  {
	    /* On some machines, we can't record any non-fixed hard register,
	       because extending its life will cause reload problems.  We
	       consider ap, fp, sp, gp to be fixed for this purpose.

	       We also consider CCmode registers to be fixed for this purpose;
	       failure to do so leads to failure to simplify 0<100 type of
	       conditionals.

	       On all machines, we can't record any global registers.
	       Nor should we record any register that is in a small
	       class, as defined by TARGET_CLASS_LIKELY_SPILLED_P.  */
	    bool record;

	    if (regno >= FIRST_PSEUDO_REGISTER)
	      record = true;
	    else if (x == frame_pointer_rtx
		     || x == hard_frame_pointer_rtx
		     || x == arg_pointer_rtx
		     || x == stack_pointer_rtx
		     || x == pic_offset_table_rtx)
	      record = true;
	    else if (global_regs[regno])
	      record = false;
	    else if (fixed_regs[regno])
	      record = true;
	    else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
	      record = true;
	    else if (targetm.small_register_classes_for_mode_p (GET_MODE (x)))
	      record = false;
	    else if (targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno)))
	      record = false;
	    else
	      record = true;

	    if (!record)
	      {
		*do_not_record_p = 1;
		return 0;
	      }
	  }

	hash += ((unsigned int) REG << 7);
        hash += (have_reg_qty ? (unsigned) REG_QTY (regno) : regno);
	return hash;
      }

    /* We handle SUBREG of a REG specially because the underlying
       reg changes its hash value with every value change; we don't
       want to have to forget unrelated subregs when one subreg changes.  */
    case SUBREG:
      {
	if (REG_P (SUBREG_REG (x)))
	  {
	    hash += (((unsigned int) SUBREG << 7)
		     + REGNO (SUBREG_REG (x))
		     + (constant_lower_bound (SUBREG_BYTE (x))
			/ UNITS_PER_WORD));
	    return hash;
	  }
	break;
      }

    case CONST_INT:
      hash += (((unsigned int) CONST_INT << 7) + (unsigned int) mode
               + (unsigned int) INTVAL (x));
      return hash;

    case CONST_WIDE_INT:
      for (i = 0; i < CONST_WIDE_INT_NUNITS (x); i++)
	hash += CONST_WIDE_INT_ELT (x, i);
      return hash;

    case CONST_POLY_INT:
      {
	inchash::hash h;
	h.add_int (hash);
	for (unsigned int i = 0; i < NUM_POLY_INT_COEFFS; ++i)
	  h.add_wide_int (CONST_POLY_INT_COEFFS (x)[i]);
	return h.end ();
      }

    case CONST_DOUBLE:
      /* This is like the general case, except that it only counts
	 the integers representing the constant.  */
      hash += (unsigned int) code + (unsigned int) GET_MODE (x);
      if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
	hash += ((unsigned int) CONST_DOUBLE_LOW (x)
		 + (unsigned int) CONST_DOUBLE_HIGH (x));
      else
	hash += real_hash (CONST_DOUBLE_REAL_VALUE (x));
      return hash;

    case CONST_FIXED:
      hash += (unsigned int) code + (unsigned int) GET_MODE (x);
      hash += fixed_hash (CONST_FIXED_VALUE (x));
      return hash;

    case CONST_VECTOR:
      {
	int units;
	rtx elt;

	units = const_vector_encoded_nelts (x);

	for (i = 0; i < units; ++i)
	  {
	    elt = CONST_VECTOR_ENCODED_ELT (x, i);
	    hash += hash_rtx_cb (elt, GET_MODE (elt),
                                 do_not_record_p, hash_arg_in_memory_p,
                                 have_reg_qty, cb);
	  }

	return hash;
      }

      /* Assume there is only one rtx object for any given label.  */
    case LABEL_REF:
      /* We don't hash on the address of the CODE_LABEL to avoid bootstrap
	 differences and differences between each stage's debugging dumps.  */
	 hash += (((unsigned int) LABEL_REF << 7)
		  + CODE_LABEL_NUMBER (label_ref_label (x)));
      return hash;

    case SYMBOL_REF:
      {
	/* Don't hash on the symbol's address to avoid bootstrap differences.
	   Different hash values may cause expressions to be recorded in
	   different orders and thus different registers to be used in the
	   final assembler.  This also avoids differences in the dump files
	   between various stages.  */
	unsigned int h = 0;
	const unsigned char *p = (const unsigned char *) XSTR (x, 0);

	while (*p)
	  h += (h << 7) + *p++; /* ??? revisit */

	hash += ((unsigned int) SYMBOL_REF << 7) + h;
	return hash;
      }

    case MEM:
      /* We don't record if marked volatile or if BLKmode since we don't
	 know the size of the move.  */
      if (do_not_record_p && (MEM_VOLATILE_P (x) || GET_MODE (x) == BLKmode))
	{
	  *do_not_record_p = 1;
	  return 0;
	}
      if (hash_arg_in_memory_p && !MEM_READONLY_P (x))
	*hash_arg_in_memory_p = 1;

      /* Now that we have already found this special case,
	 might as well speed it up as much as possible.  */
      hash += (unsigned) MEM;
      x = XEXP (x, 0);
      goto repeat;

    case USE:
      /* A USE that mentions non-volatile memory needs special
	 handling since the MEM may be BLKmode which normally
	 prevents an entry from being made.  Pure calls are
	 marked by a USE which mentions BLKmode memory.
	 See calls.c:emit_call_1.  */
      if (MEM_P (XEXP (x, 0))
	  && ! MEM_VOLATILE_P (XEXP (x, 0)))
	{
	  hash += (unsigned) USE;
	  x = XEXP (x, 0);

	  if (hash_arg_in_memory_p && !MEM_READONLY_P (x))
	    *hash_arg_in_memory_p = 1;

	  /* Now that we have already found this special case,
	     might as well speed it up as much as possible.  */
	  hash += (unsigned) MEM;
	  x = XEXP (x, 0);
	  goto repeat;
	}
      break;

    case PRE_DEC:
    case PRE_INC:
    case POST_DEC:
    case POST_INC:
    case PRE_MODIFY:
    case POST_MODIFY:
    case PC:
    case CC0:
    case CALL:
    case UNSPEC_VOLATILE:
      if (do_not_record_p) {
        *do_not_record_p = 1;
        return 0;
      }
      else
        return hash;
      break;

    case ASM_OPERANDS:
      if (do_not_record_p && MEM_VOLATILE_P (x))
	{
	  *do_not_record_p = 1;
	  return 0;
	}
      else
	{
	  /* We don't want to take the filename and line into account.  */
	  hash += (unsigned) code + (unsigned) GET_MODE (x)
	    + hash_rtx_string (ASM_OPERANDS_TEMPLATE (x))
	    + hash_rtx_string (ASM_OPERANDS_OUTPUT_CONSTRAINT (x))
	    + (unsigned) ASM_OPERANDS_OUTPUT_IDX (x);

	  if (ASM_OPERANDS_INPUT_LENGTH (x))
	    {
	      for (i = 1; i < ASM_OPERANDS_INPUT_LENGTH (x); i++)
		{
		  hash += (hash_rtx_cb (ASM_OPERANDS_INPUT (x, i),
                                        GET_MODE (ASM_OPERANDS_INPUT (x, i)),
                                        do_not_record_p, hash_arg_in_memory_p,
                                        have_reg_qty, cb)
			   + hash_rtx_string
                           (ASM_OPERANDS_INPUT_CONSTRAINT (x, i)));
		}

	      hash += hash_rtx_string (ASM_OPERANDS_INPUT_CONSTRAINT (x, 0));
	      x = ASM_OPERANDS_INPUT (x, 0);
	      mode = GET_MODE (x);
	      goto repeat;
	    }

	  return hash;
	}
      break;

    default:
      break;
    }

  i = GET_RTX_LENGTH (code) - 1;
  hash += (unsigned) code + (unsigned) GET_MODE (x);
  fmt = GET_RTX_FORMAT (code);
  for (; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'e':
	  /* If we are about to do the last recursive call
	     needed at this level, change it into iteration.
	     This function  is called enough to be worth it.  */
	  if (i == 0)
	    {
	      x = XEXP (x, i);
	      goto repeat;
	    }

	  hash += hash_rtx_cb (XEXP (x, i), VOIDmode, do_not_record_p,
                               hash_arg_in_memory_p,
                               have_reg_qty, cb);
	  break;

	case 'E':
	  for (j = 0; j < XVECLEN (x, i); j++)
	    hash += hash_rtx_cb (XVECEXP (x, i, j), VOIDmode, do_not_record_p,
                                 hash_arg_in_memory_p,
                                 have_reg_qty, cb);
	  break;

	case 's':
	  hash += hash_rtx_string (XSTR (x, i));
	  break;

	case 'i':
	  hash += (unsigned int) XINT (x, i);
	  break;

	case 'p':
	  hash += constant_lower_bound (SUBREG_BYTE (x));
	  break;

	case '0': case 't':
	  /* Unused.  */
	  break;

	default:
	  gcc_unreachable ();
	}
    }

  return hash;
}

/* Hash an rtx.  We are careful to make sure the value is never negative.
   Equivalent registers hash identically.
   MODE is used in hashing for CONST_INTs only;
   otherwise the mode of X is used.

   Store 1 in DO_NOT_RECORD_P if any subexpression is volatile.

   If HASH_ARG_IN_MEMORY_P is not NULL, store 1 in it if X contains
   a MEM rtx which does not have the MEM_READONLY_P flag set.

   Note that cse_insn knows that the hash code of a MEM expression
   is just (int) MEM plus the hash code of the address.  */

unsigned
hash_rtx (const_rtx x, machine_mode mode, int *do_not_record_p,
	  int *hash_arg_in_memory_p, bool have_reg_qty)
{
  return hash_rtx_cb (x, mode, do_not_record_p,
                      hash_arg_in_memory_p, have_reg_qty, NULL);
}

/* Hash an rtx X for cse via hash_rtx.
   Stores 1 in do_not_record if any subexpression is volatile.
   Stores 1 in hash_arg_in_memory if X contains a mem rtx which
   does not have the MEM_READONLY_P flag set.  */

static inline unsigned
canon_hash (rtx x, machine_mode mode)
{
  return hash_rtx (x, mode, &do_not_record, &hash_arg_in_memory, true);
}

/* Like canon_hash but with no side effects, i.e. do_not_record
   and hash_arg_in_memory are not changed.  */

static inline unsigned
safe_hash (rtx x, machine_mode mode)
{
  int dummy_do_not_record;
  return hash_rtx (x, mode, &dummy_do_not_record, NULL, true);
}

/* Return 1 iff X and Y would canonicalize into the same thing,
   without actually constructing the canonicalization of either one.
   If VALIDATE is nonzero,
   we assume X is an expression being processed from the rtl
   and Y was found in the hash table.  We check register refs
   in Y for being marked as valid.

   If FOR_GCSE is true, we compare X and Y for equivalence for GCSE.  */

int
exp_equiv_p (const_rtx x, const_rtx y, int validate, bool for_gcse)
{
  int i, j;
  enum rtx_code code;
  const char *fmt;

  /* Note: it is incorrect to assume an expression is equivalent to itself
     if VALIDATE is nonzero.  */
  if (x == y && !validate)
    return 1;

  if (x == 0 || y == 0)
    return x == y;

  code = GET_CODE (x);
  if (code != GET_CODE (y))
    return 0;

  /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.  */
  if (GET_MODE (x) != GET_MODE (y))
    return 0;

  /* MEMs referring to different address space are not equivalent.  */
  if (code == MEM && MEM_ADDR_SPACE (x) != MEM_ADDR_SPACE (y))
    return 0;

  switch (code)
    {
    case PC:
    case CC0:
    CASE_CONST_UNIQUE:
      return x == y;

    case LABEL_REF:
      return label_ref_label (x) == label_ref_label (y);

    case SYMBOL_REF:
      return XSTR (x, 0) == XSTR (y, 0);

    case REG:
      if (for_gcse)
	return REGNO (x) == REGNO (y);
      else
	{
	  unsigned int regno = REGNO (y);
	  unsigned int i;
	  unsigned int endregno = END_REGNO (y);

	  /* If the quantities are not the same, the expressions are not
	     equivalent.  If there are and we are not to validate, they
	     are equivalent.  Otherwise, ensure all regs are up-to-date.  */

	  if (REG_QTY (REGNO (x)) != REG_QTY (regno))
	    return 0;

	  if (! validate)
	    return 1;

	  for (i = regno; i < endregno; i++)
	    if (REG_IN_TABLE (i) != REG_TICK (i))
	      return 0;

	  return 1;
	}

    case MEM:
      if (for_gcse)
	{
	  /* A volatile mem should not be considered equivalent to any
	     other.  */
	  if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
	    return 0;

	  /* Can't merge two expressions in different alias sets, since we
	     can decide that the expression is transparent in a block when
	     it isn't, due to it being set with the different alias set.

	     Also, can't merge two expressions with different MEM_ATTRS.
	     They could e.g. be two different entities allocated into the
	     same space on the stack (see e.g. PR25130).  In that case, the
	     MEM addresses can be the same, even though the two MEMs are
	     absolutely not equivalent.

	     But because really all MEM attributes should be the same for
	     equivalent MEMs, we just use the invariant that MEMs that have
	     the same attributes share the same mem_attrs data structure.  */
	  if (!mem_attrs_eq_p (MEM_ATTRS (x), MEM_ATTRS (y)))
	    return 0;

	  /* If we are handling exceptions, we cannot consider two expressions
	     with different trapping status as equivalent, because simple_mem
	     might accept one and reject the other.  */
	  if (cfun->can_throw_non_call_exceptions
	      && (MEM_NOTRAP_P (x) != MEM_NOTRAP_P (y)))
	    return 0;
	}
      break;

    /*  For commutative operations, check both orders.  */
    case PLUS:
    case MULT:
    case AND:
    case IOR:
    case XOR:
    case NE:
    case EQ:
      return ((exp_equiv_p (XEXP (x, 0), XEXP (y, 0),
			     validate, for_gcse)
	       && exp_equiv_p (XEXP (x, 1), XEXP (y, 1),
				validate, for_gcse))
	      || (exp_equiv_p (XEXP (x, 0), XEXP (y, 1),
				validate, for_gcse)
		  && exp_equiv_p (XEXP (x, 1), XEXP (y, 0),
				   validate, for_gcse)));

    case ASM_OPERANDS:
      /* We don't use the generic code below because we want to
	 disregard filename and line numbers.  */

      /* A volatile asm isn't equivalent to any other.  */
      if (MEM_VOLATILE_P (x) || MEM_VOLATILE_P (y))
	return 0;

      if (GET_MODE (x) != GET_MODE (y)
	  || strcmp (ASM_OPERANDS_TEMPLATE (x), ASM_OPERANDS_TEMPLATE (y))
	  || strcmp (ASM_OPERANDS_OUTPUT_CONSTRAINT (x),
		     ASM_OPERANDS_OUTPUT_CONSTRAINT (y))
	  || ASM_OPERANDS_OUTPUT_IDX (x) != ASM_OPERANDS_OUTPUT_IDX (y)
	  || ASM_OPERANDS_INPUT_LENGTH (x) != ASM_OPERANDS_INPUT_LENGTH (y))
	return 0;

      if (ASM_OPERANDS_INPUT_LENGTH (x))
	{
	  for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
	    if (! exp_equiv_p (ASM_OPERANDS_INPUT (x, i),
			       ASM_OPERANDS_INPUT (y, i),
			       validate, for_gcse)
		|| strcmp (ASM_OPERANDS_INPUT_CONSTRAINT (x, i),
			   ASM_OPERANDS_INPUT_CONSTRAINT (y, i)))
	      return 0;
	}

      return 1;

    default:
      break;
    }

  /* Compare the elements.  If any pair of corresponding elements
     fail to match, return 0 for the whole thing.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      switch (fmt[i])
	{
	case 'e':
	  if (! exp_equiv_p (XEXP (x, i), XEXP (y, i),
			      validate, for_gcse))
	    return 0;
	  break;

	case 'E':
	  if (XVECLEN (x, i) != XVECLEN (y, i))
	    return 0;
	  for (j = 0; j < XVECLEN (x, i); j++)
	    if (! exp_equiv_p (XVECEXP (x, i, j), XVECEXP (y, i, j),
				validate, for_gcse))
	      return 0;
	  break;

	case 's':
	  if (strcmp (XSTR (x, i), XSTR (y, i)))
	    return 0;
	  break;

	case 'i':
	  if (XINT (x, i) != XINT (y, i))
	    return 0;
	  break;

	case 'w':
	  if (XWINT (x, i) != XWINT (y, i))
	    return 0;
	  break;

	case 'p':
	  if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
	    return 0;
	  break;

	case '0':
	case 't':
	  break;

	default:
	  gcc_unreachable ();
	}
    }

  return 1;
}

/* Subroutine of canon_reg.  Pass *XLOC through canon_reg, and validate
   the result if necessary.  INSN is as for canon_reg.  */

static void
validate_canon_reg (rtx *xloc, rtx_insn *insn)
{
  if (*xloc)
    {
      rtx new_rtx = canon_reg (*xloc, insn);

      /* If replacing pseudo with hard reg or vice versa, ensure the
         insn remains valid.  Likewise if the insn has MATCH_DUPs.  */
      gcc_assert (insn && new_rtx);
      validate_change (insn, xloc, new_rtx, 1);
    }
}

/* Canonicalize an expression:
   replace each register reference inside it
   with the "oldest" equivalent register.

   If INSN is nonzero validate_change is used to ensure that INSN remains valid
   after we make our substitution.  The calls are made with IN_GROUP nonzero
   so apply_change_group must be called upon the outermost return from this
   function (unless INSN is zero).  The result of apply_change_group can
   generally be discarded since the changes we are making are optional.  */

static rtx
canon_reg (rtx x, rtx_insn *insn)
{
  int i;
  enum rtx_code code;
  const char *fmt;

  if (x == 0)
    return x;

  code = GET_CODE (x);
  switch (code)
    {
    case PC:
    case CC0:
    case CONST:
    CASE_CONST_ANY:
    case SYMBOL_REF:
    case LABEL_REF:
    case ADDR_VEC:
    case ADDR_DIFF_VEC:
      return x;

    case REG:
      {
	int first;
	int q;
	struct qty_table_elem *ent;

	/* Never replace a hard reg, because hard regs can appear
	   in more than one machine mode, and we must preserve the mode
	   of each occurrence.  Also, some hard regs appear in
	   MEMs that are shared and mustn't be altered.  Don't try to
	   replace any reg that maps to a reg of class NO_REGS.  */
	if (REGNO (x) < FIRST_PSEUDO_REGISTER
	    || ! REGNO_QTY_VALID_P (REGNO (x)))
	  return x;

	q = REG_QTY (REGNO (x));
	ent = &qty_table[q];
	first = ent->first_reg;
	return (first >= FIRST_PSEUDO_REGISTER ? regno_reg_rtx[first]
		: REGNO_REG_CLASS (first) == NO_REGS ? x
		: gen_rtx_REG (ent->mode, first));
      }

    default:
      break;
    }

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    {
      int j;

      if (fmt[i] == 'e')
	validate_canon_reg (&XEXP (x, i), insn);
      else if (fmt[i] == 'E')
	for (j = 0; j < XVECLEN (x, i); j++)
	  validate_canon_reg (&XVECEXP (x, i, j), insn);
    }

  return x;
}

/* Given an operation (CODE, *PARG1, *PARG2), where code is a comparison
   operation (EQ, NE, GT, etc.), follow it back through the hash table and
   what values are being compared.

   *PARG1 and *PARG2 are updated to contain the rtx representing the values
   actually being compared.  For example, if *PARG1 was (cc0) and *PARG2
   was (const_int 0), *PARG1 and *PARG2 will be set to the objects that were
   compared to produce cc0.

   The return value is the comparison operator and is either the code of
   A or the code corresponding to the inverse of the comparison.  */

static enum rtx_code
find_comparison_args (enum rtx_code code, rtx *parg1, rtx *parg2,
		      machine_mode *pmode1, machine_mode *pmode2)
{
  rtx arg1, arg2;
  hash_set<rtx> *visited = NULL;
  /* Set nonzero when we find something of interest.  */
  rtx x = NULL;

  arg1 = *parg1, arg2 = *parg2;

  /* If ARG2 is const0_rtx, see what ARG1 is equivalent to.  */

  while (arg2 == CONST0_RTX (GET_MODE (arg1)))
    {
      int reverse_code = 0;
      struct table_elt *p = 0;

      /* Remember state from previous iteration.  */
      if (x)
	{
	  if (!visited)
	    visited = new hash_set<rtx>;
	  visited->add (x);
	  x = 0;
	}

      /* If arg1 is a COMPARE, extract the comparison arguments from it.
	 On machines with CC0, this is the only case that can occur, since
	 fold_rtx will return the COMPARE or item being compared with zero
	 when given CC0.  */

      if (GET_CODE (arg1) == COMPARE && arg2 == const0_rtx)
	x = arg1;

      /* If ARG1 is a comparison operator and CODE is testing for
	 STORE_FLAG_VALUE, get the inner arguments.  */

      else if (COMPARISON_P (arg1))
	{
#ifdef FLOAT_STORE_FLAG_VALUE
	  REAL_VALUE_TYPE fsfv;
#endif

	  if (code == NE
	      || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
		  && code == LT && STORE_FLAG_VALUE == -1)
#ifdef FLOAT_STORE_FLAG_VALUE
	      || (SCALAR_FLOAT_MODE_P (GET_MODE (arg1))
		  && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
		      REAL_VALUE_NEGATIVE (fsfv)))
#endif
	      )
	    x = arg1;
	  else if (code == EQ
		   || (GET_MODE_CLASS (GET_MODE (arg1)) == MODE_INT
		       && code == GE && STORE_FLAG_VALUE == -1)
#ifdef FLOAT_STORE_FLAG_VALUE
		   || (SCALAR_FLOAT_MODE_P (GET_MODE (arg1))
		       && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
			   REAL_VALUE_NEGATIVE (fsfv)))
#endif
		   )
	    x = arg1, reverse_code = 1;
	}

      /* ??? We could also check for

	 (ne (and (eq (...) (const_int 1))) (const_int 0))

	 and related forms, but let's wait until we see them occurring.  */

      if (x == 0)
	/* Look up ARG1 in the hash table and see if it has an equivalence
	   that lets us see what is being compared.  */
	p = lookup (arg1, SAFE_HASH (arg1, GET_MODE (arg1)), GET_MODE (arg1));
      if (p)
	{
	  p = p->first_same_value;

	  /* If what we compare is already known to be constant, that is as
	     good as it gets.
	     We need to break the loop in this case, because otherwise we
	     can have an infinite loop when looking at a reg that is known
	     to be a constant which is the same as a comparison of a reg
	     against zero which appears later in the insn stream, which in
	     turn is constant and the same as the comparison of the first reg
	     against zero...  */
	  if (p->is_const)
	    break;
	}

      for (; p; p = p->next_same_value)
	{
	  machine_mode inner_mode = GET_MODE (p->exp);
#ifdef FLOAT_STORE_FLAG_VALUE
	  REAL_VALUE_TYPE fsfv;
#endif

	  /* If the entry isn't valid, skip it.  */
	  if (! exp_equiv_p (p->exp, p->exp, 1, false))
	    continue;

	  /* If it's a comparison we've used before, skip it.  */
	  if (visited && visited->contains (p->exp))
	    continue;

	  if (GET_CODE (p->exp) == COMPARE
	      /* Another possibility is that this machine has a compare insn
		 that includes the comparison code.  In that case, ARG1 would
		 be equivalent to a comparison operation that would set ARG1 to
		 either STORE_FLAG_VALUE or zero.  If this is an NE operation,
		 ORIG_CODE is the actual comparison being done; if it is an EQ,
		 we must reverse ORIG_CODE.  On machine with a negative value
		 for STORE_FLAG_VALUE, also look at LT and GE operations.  */
	      || ((code == NE
		   || (code == LT
		       && val_signbit_known_set_p (inner_mode,
						   STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
		   || (code == LT
		       && SCALAR_FLOAT_MODE_P (inner_mode)
		       && (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
			   REAL_VALUE_NEGATIVE (fsfv)))
#endif
		   )
		  && COMPARISON_P (p->exp)))
	    {
	      x = p->exp;
	      break;
	    }
	  else if ((code == EQ
		    || (code == GE
			&& val_signbit_known_set_p (inner_mode,
						    STORE_FLAG_VALUE))
#ifdef FLOAT_STORE_FLAG_VALUE
		    || (code == GE
			&& SCALAR_FLOAT_MODE_P (inner_mode)
			&& (fsfv = FLOAT_STORE_FLAG_VALUE (GET_MODE (arg1)),
			    REAL_VALUE_NEGATIVE (fsfv)))
#endif
		    )
		   && COMPARISON_P (p->exp))
	    {
	      reverse_code = 1;
	      x = p->exp;
	      break;
	    }

	  /* If this non-trapping address, e.g. fp + constant, the
	     equivalent is a better operand since it may let us predict
	     the value of the comparison.  */
	  else if (!rtx_addr_can_trap_p (p->exp))
	    {
	      arg1 = p->exp;
	      continue;
	    }
	}

      /* If we didn't find a useful equivalence for ARG1, we are done.
	 Otherwise, set up for the next iteration.  */
      if (x == 0)
	break;

      /* If we need to reverse the comparison, make sure that is
	 possible -- we can't necessarily infer the value of GE from LT
	 with floating-point operands.  */
      if (reverse_code)
	{
	  enum rtx_code reversed = reversed_comparison_code (x, NULL);
	  if (reversed == UNKNOWN)
	    break;
	  else
	    code = reversed;
	}
      else if (COMPARISON_P (x))
	code = GET_CODE (x);
      arg1 = XEXP (x, 0), arg2 = XEXP (x, 1);
    }

  /* Return our results.  Return the modes from before fold_rtx
     because fold_rtx might produce const_int, and then it's too late.  */
  *pmode1 = GET_MODE (arg1), *pmode2 = GET_MODE (arg2);
  *parg1 = fold_rtx (arg1, 0), *parg2 = fold_rtx (arg2, 0);

  if (visited)
    delete visited;
  return code;
}

/* If X is a nontrivial arithmetic operation on an argument for which
   a constant value can be determined, return the result of operating
   on that value, as a constant.  Otherwise, return X, possibly with
   one or more operands changed to a forward-propagated constant.

   If X is a register whose contents are known, we do NOT return
   those contents here; equiv_constant is called to perform that task.
   For SUBREGs and MEMs, we do that both here and in equiv_constant.

   INSN is the insn that we may be modifying.  If it is 0, make a copy
   of X before modifying it.  */

static rtx
fold_rtx (rtx x, rtx_insn *insn)
{
  enum rtx_code code;
  machine_mode mode;
  const char *fmt;
  int i;
  rtx new_rtx = 0;
  int changed = 0;
  poly_int64 xval;

  /* Operands of X.  */
  /* Workaround -Wmaybe-uninitialized false positive during
     profiledbootstrap by initializing them.  */
  rtx folded_arg0 = NULL_RTX;
  rtx folded_arg1 = NULL_RTX;

  /* Constant equivalents of first three operands of X;
     0 when no such equivalent is known.  */
  rtx const_arg0;
  rtx const_arg1;
  rtx const_arg2;

  /* The mode of the first operand of X.  We need this for sign and zero
     extends.  */
  machine_mode mode_arg0;

  if (x == 0)
    return x;

  /* Try to perform some initial simplifications on X.  */
  code = GET_CODE (x);
  switch (code)
    {
    case MEM:
    case SUBREG:
    /* The first operand of a SIGN/ZERO_EXTRACT has a different meaning
       than it would in other contexts.  Basically its mode does not
       signify the size of the object read.  That information is carried
       by size operand.    If we happen to have a MEM of the appropriate
       mode in our tables with a constant value we could simplify the
       extraction incorrectly if we allowed substitution of that value
       for the MEM.   */
    case ZERO_EXTRACT:
    case SIGN_EXTRACT:
      if ((new_rtx = equiv_constant (x)) != NULL_RTX)
        return new_rtx;
      return x;

    case CONST:
    CASE_CONST_ANY:
    case SYMBOL_REF:
    case LABEL_REF:
    case REG:
    case PC:
      /* No use simplifying an EXPR_LIST
	 since they are used only for lists of args
	 in a function call's REG_EQUAL note.  */
    case EXPR_LIST:
      return x;

    case CC0:
      return prev_insn_cc0;

    case ASM_OPERANDS:
      if (insn)
	{
	  for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
	    validate_change (insn, &ASM_OPERANDS_INPUT (x, i),
			     fold_rtx (ASM_OPERANDS_INPUT (x, i), insn), 0);
	}
      return x;

    case CALL:
      if (NO_FUNCTION_CSE && CONSTANT_P (XEXP (XEXP (x, 0), 0)))
	return x;
      break;

    /* Anything else goes through the loop below.  */
    default:
      break;
    }

  mode = GET_MODE (x);
  const_arg0 = 0;
  const_arg1 = 0;
  const_arg2 = 0;
  mode_arg0 = VOIDmode;

  /* Try folding our operands.
     Then see which ones have constant values known.  */

  fmt = GET_RTX_FORMAT (code);
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
    if (fmt[i] == 'e')
      {
	rtx folded_arg = XEXP (x, i), const_arg;
	machine_mode mode_arg = GET_MODE (folded_arg);

	switch (GET_CODE (folded_arg))
	  {
	  case MEM:
	  case REG:
	  case SUBREG:
	    const_arg = equiv_constant (folded_arg);
	    break;

	  case CONST:
	  CASE_CONST_ANY:
	  case SYMBOL_REF:
	  case LABEL_REF:
	    const_arg = folded_arg;
	    break;

	  case CC0:
	    /* The cc0-user and cc0-setter may be in different blocks if
	       the cc0-setter potentially traps.  In that case PREV_INSN_CC0
	       will have been cleared as we exited the block with the
	       setter.

	       While we could potentially track cc0 in this case, it just
	       doesn't seem to be worth it given that cc0 targets are not
	       terribly common or important these days and trapping math
	       is rarely used.  The combination of those two conditions
	       necessary to trip this situation is exceedingly rare in the
	       real world.  */
	    if (!prev_insn_cc0)
	      {
		const_arg = NULL_RTX;
	      }
	    else
	      {
		folded_arg = prev_insn_cc0;
		mode_arg = prev_insn_cc0_mode;
		const_arg = equiv_constant (folded_arg);
	      }
	    break;

	  default:
	    folded_arg = fold_rtx (folded_arg, insn);
	    const_arg = equiv_constant (folded_arg);
	    break;
	  }

	/* For the first three operands, see if the operand
	   is constant or equivalent to a constant.  */
	switch (i)
	  {
	  case 0:
	    folded_arg0 = folded_arg;
	    const_arg0 = const_arg;
	    mode_arg0 = mode_arg;
	    break;
	  case 1:
	    folded_arg1 = folded_arg;
	    const_arg1 = const_arg;
	    break;
	  case 2:
	    const_arg2 = const_arg;
	    break;
	  }

	/* Pick the least expensive of the argument and an equivalent constant
	   argument.  */
	if (const_arg != 0
	    && const_arg != folded_arg
	    && (COST_IN (const_arg, mode_arg, code, i)
		<= COST_IN (folded_arg, mode_arg, code, i))

	    /* It's not safe to substitute the operand of a conversion
	       operator with a constant, as the conversion's identity
	       depends upon the mode of its operand.  This optimization
	       is handled by the call to simplify_unary_operation.  */
	    && (GET_RTX_CLASS (code) != RTX_UNARY
		|| GET_MODE (const_arg) == mode_arg0
		|| (code != ZERO_EXTEND
		    && code != SIGN_EXTEND
		    && code != TRUNCATE
		    && code != FLOAT_TRUNCATE
		    && code != FLOAT_EXTEND
		    && code != FLOAT
		    && code != FIX
		    && code != UNSIGNED_FLOAT
		    && code != UNSIGNED_FIX)))
	  folded_arg = const_arg;

	if (folded_arg == XEXP (x, i))
	  continue;

	if (insn == NULL_RTX && !changed)
	  x = copy_rtx (x);
	changed = 1;
	validate_unshare_change (insn, &XEXP (x, i), folded_arg, 1);
      }

  if (changed)
    {
      /* Canonicalize X if necessary, and keep const_argN and folded_argN
	 consistent with the order in X.  */
      if (canonicalize_change_group (insn, x))
	{
	  std::swap (const_arg0, const_arg1);
	  std::swap (folded_arg0, folded_arg1);
	}

      apply_change_group ();
    }

  /* If X is an arithmetic operation, see if we can simplify it.  */

  switch (GET_RTX_CLASS (code))
    {
    case RTX_UNARY:
      {
	/* We can't simplify extension ops unless we know the
	   original mode.  */
	if ((code == ZERO_EXTEND || code == SIGN_EXTEND)
	    && mode_arg0 == VOIDmode)
	  break;

	new_rtx = simplify_unary_operation (code, mode,
					    const_arg0 ? const_arg0 : folded_arg0,
					    mode_arg0);
      }
      break;

    case RTX_COMPARE:
    case RTX_COMM_COMPARE:
      /* See what items are actually being compared and set FOLDED_ARG[01]
	 to those values and CODE to the actual comparison code.  If any are
	 constant, set CONST_ARG0 and CONST_ARG1 appropriately.  We needn't
	 do anything if both operands are already known to be constant.  */

      /* ??? Vector mode comparisons are not supported yet.  */
      if (VECTOR_MODE_P (mode))
	break;

      if (const_arg0 == 0 || const_arg1 == 0)
	{
	  struct table_elt *p0, *p1;
	  rtx true_rtx, false_rtx;
	  machine_mode mode_arg1;

	  if (SCALAR_FLOAT_MODE_P (mode))
	    {
#ifdef FLOAT_STORE_FLAG_VALUE
	      true_rtx = (const_double_from_real_value
			  (FLOAT_STORE_FLAG_VALUE (mode), mode));
#else
	      true_rtx = NULL_RTX;
#endif
	      false_rtx = CONST0_RTX (mode);
	    }
	  else
	    {
	      true_rtx = const_true_rtx;
	      false_rtx = const0_rtx;
	    }

	  code = find_comparison_args (code, &folded_arg0, &folded_arg1,
				       &mode_arg0, &mode_arg1);

	  /* If the mode is VOIDmode or a MODE_CC mode, we don't know
	     what kinds of things are being compared, so we can't do
	     anything with this comparison.  */

	  if (mode_arg0 == VOIDmode || GET_MODE_CLASS (mode_arg0) == MODE_CC)
	    break;

	  const_arg0 = equiv_constant (folded_arg0);
	  const_arg1 = equiv_constant (folded_arg1);

	  /* If we do not now have two constants being compared, see
	     if we can nevertheless deduce some things about the
	     comparison.  */
	  if (const_arg0 == 0 || const_arg1 == 0)
	    {
	      if (const_arg1 != NULL)
		{
		  rtx cheapest_simplification;
		  int cheapest_cost;
		  rtx simp_result;
		  struct table_elt *p;

		  /* See if we can find an equivalent of folded_arg0
		     that gets us a cheaper expression, possibly a
		     constant through simplifications.  */
		  p = lookup (folded_arg0, SAFE_HASH (folded_arg0, mode_arg0),
			      mode_arg0);

		  if (p != NULL)
		    {
		      cheapest_simplification = x;
		      cheapest_cost = COST (x, mode);

		      for (p = p->first_same_value; p != NULL; p = p->next_same_value)
			{
			  int cost;

			  /* If the entry isn't valid, skip it.  */
			  if (! exp_equiv_p (p->exp, p->exp, 1, false))
			    continue;

			  /* Try to simplify using this equivalence.  */
			  simp_result
			    = simplify_relational_operation (code, mode,
							     mode_arg0,
							     p->exp,
							     const_arg1);

			  if (simp_result == NULL)
			    continue;

			  cost = COST (simp_result, mode);
			  if (cost < cheapest_cost)
			    {
			      cheapest_cost = cost;
			      cheapest_simplification = simp_result;
			    }
			}

		      /* If we have a cheaper expression now, use that
			 and try folding it further, from the top.  */
		      if (cheapest_simplification != x)
			return fold_rtx (copy_rtx (cheapest_simplification),
					 insn);
		    }
		}

	      /* See if the two operands are the same.  */

	      if ((REG_P (folded_arg0)
		   && REG_P (folded_arg1)
		   && (REG_QTY (REGNO (folded_arg0))
		       == REG_QTY (REGNO (folded_arg1))))
		  || ((p0 = lookup (folded_arg0,
				    SAFE_HASH (folded_arg0, mode_arg0),
				    mode_arg0))
		      && (p1 = lookup (folded_arg1,
				       SAFE_HASH (folded_arg1, mode_arg0),
				       mode_arg0))
		      && p0->first_same_value == p1->first_same_value))
		folded_arg1 = folded_arg0;

	      /* If FOLDED_ARG0 is a register, see if the comparison we are
		 doing now is either the same as we did before or the reverse
		 (we only check the reverse if not floating-point).  */
	      else if (REG_P (folded_arg0))
		{
		  int qty = REG_QTY (REGNO (folded_arg0));

		  if (REGNO_QTY_VALID_P (REGNO (folded_arg0)))
		    {
		      struct qty_table_elem *ent = &qty_table[qty];

		      if ((comparison_dominates_p (ent->comparison_code, code)
			   || (! FLOAT_MODE_P (mode_arg0)
			       && comparison_dominates_p (ent->comparison_code,
						          reverse_condition (code))))
			  && (rtx_equal_p (ent->comparison_const, folded_arg1)
			      || (const_arg1
				  && rtx_equal_p (ent->comparison_const,
						  const_arg1))
			      || (REG_P (folded_arg1)
				  && (REG_QTY (REGNO (folded_arg1)) == ent->comparison_qty))))
			{
			  if (comparison_dominates_p (ent->comparison_code, code))
			    {
			      if (true_rtx)
				return true_rtx;
			      else
				break;
			    }
			  else
			    return false_rtx;
			}
		    }
		}
	    }
	}

      /* If we are comparing against zero, see if the first operand is
	 equivalent to an IOR with a constant.  If so, we may be able to
	 determine the result of this comparison.  */
      if (const_arg1 == const0_rtx && !const_arg0)
	{
	  rtx y = lookup_as_function (folded_arg0, IOR);
	  rtx inner_const;

	  if (y != 0
	      && (inner_const = equiv_constant (XEXP (y, 1))) != 0
	      && CONST_INT_P (inner_const)
	      && INTVAL (inner_const) != 0)
	    folded_arg0 = gen_rtx_IOR (mode_arg0, XEXP (y, 0), inner_const);
	}

      {
	rtx op0 = const_arg0 ? const_arg0 : copy_rtx (folded_arg0);
	rtx op1 = const_arg1 ? const_arg1 : copy_rtx (folded_arg1);
	new_rtx = simplify_relational_operation (code, mode, mode_arg0,
						 op0, op1);
      }
      break;

    case RTX_BIN_ARITH:
    case RTX_COMM_ARITH:
      switch (code)
	{
	case PLUS:
	  /* If the second operand is a LABEL_REF, see if the first is a MINUS
	     with that LABEL_REF as its second operand.  If so, the result is
	     the first operand of that MINUS.  This handles switches with an
	     ADDR_DIFF_VEC table.  */
	  if (const_arg1 && GET_CODE (const_arg1) == LABEL_REF)
	    {
	      rtx y
		= GET_CODE (folded_arg0) == MINUS ? folded_arg0
		: lookup_as_function (folded_arg0, MINUS);

	      if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
		  && label_ref_label (XEXP (y, 1)) == label_ref_label (const_arg1))
		return XEXP (y, 0);

	      /* Now try for a CONST of a MINUS like the above.  */
	      if ((y = (GET_CODE (folded_arg0) == CONST ? folded_arg0
			: lookup_as_function (folded_arg0, CONST))) != 0
		  && GET_CODE (XEXP (y, 0)) == MINUS
		  && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
		  && label_ref_label (XEXP (XEXP (y, 0), 1)) == label_ref_label (const_arg1))
		return XEXP (XEXP (y, 0), 0);
	    }

	  /* Likewise if the operands are in the other order.  */
	  if (const_arg0 && GET_CODE (const_arg0) == LABEL_REF)
	    {
	      rtx y
		= GET_CODE (folded_arg1) == MINUS ? folded_arg1
		: lookup_as_function (folded_arg1, MINUS);

	      if (y != 0 && GET_CODE (XEXP (y, 1)) == LABEL_REF
		  && label_ref_label (XEXP (y, 1)) == label_ref_label (const_arg0))
		return XEXP (y, 0);

	      /* Now try for a CONST of a MINUS like the above.  */
	      if ((y = (GET_CODE (folded_arg1) == CONST ? folded_arg1
			: lookup_as_function (folded_arg1, CONST))) != 0
		  && GET_CODE (XEXP (y, 0)) == MINUS
		  && GET_CODE (XEXP (XEXP (y, 0), 1)) == LABEL_REF
		  && label_ref_label (XEXP (XEXP (y, 0), 1)) == label_ref_label (const_arg0))
		return XEXP (XEXP (y, 0), 0);
	    }

	  /* If second operand is a register equivalent to a negative
	     CONST_INT, see if we can find a register equivalent to the
	     positive constant.  Make a MINUS if so.  Don't do this for
	     a non-negative constant since we might then alternate between
	     choosing positive and negative constants.  Having the positive
	     constant previously-used is the more common case.  Be sure
	     the resulting constant is non-negative; if const_arg1 were
	     the smallest negative number this would overflow: depending
	     on the mode, this would either just be the same value (and
	     hence not save anything) or be incorrect.  */
	  if (const_arg1 != 0 && CONST_INT_P (const_arg1)
	      && INTVAL (const_arg1) < 0
	      /* This used to test

	         -INTVAL (const_arg1) >= 0

		 But The Sun V5.0 compilers mis-compiled that test.  So
		 instead we test for the problematic value in a more direct
		 manner and hope the Sun compilers get it correct.  */
	      && INTVAL (const_arg1) !=
	        (HOST_WIDE_INT_1 << (HOST_BITS_PER_WIDE_INT - 1))
	      && REG_P (folded_arg1))
	    {
	      rtx new_const = GEN_INT (-INTVAL (const_arg1));
	      struct table_elt *p
		= lookup (new_const, SAFE_HASH (new_const, mode), mode);

	      if (p)
		for (p = p->first_same_value; p; p = p->next_same_value)
		  if (REG_P (p->exp))
		    return simplify_gen_binary (MINUS, mode, folded_arg0,
						canon_reg (p->exp, NULL));
	    }
	  goto from_plus;

	case MINUS:
	  /* If we have (MINUS Y C), see if Y is known to be (PLUS Z C2).
	     If so, produce (PLUS Z C2-C).  */
	  if (const_arg1 != 0 && poly_int_rtx_p (const_arg1, &xval))
	    {
	      rtx y = lookup_as_function (XEXP (x, 0), PLUS);
	      if (y && poly_int_rtx_p (XEXP (y, 1)))
		return fold_rtx (plus_constant (mode, copy_rtx (y), -xval),
				 NULL);
	    }

	  /* Fall through.  */

	from_plus:
	case SMIN:    case SMAX:      case UMIN:    case UMAX:
	case IOR:     case AND:       case XOR:
	case MULT:
	case ASHIFT:  case LSHIFTRT:  case ASHIFTRT:
	  /* If we have (<op> <reg> <const_int>) for an associative OP and REG
	     is known to be of similar form, we may be able to replace the
	     operation with a combined operation.  This may eliminate the
	     intermediate operation if every use is simplified in this way.
	     Note that the similar optimization done by combine.c only works
	     if the intermediate operation's result has only one reference.  */

	  if (REG_P (folded_arg0)
	      && const_arg1 && CONST_INT_P (const_arg1))
	    {
	      int is_shift
		= (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
	      rtx y, inner_const, new_const;
	      rtx canon_const_arg1 = const_arg1;
	      enum rtx_code associate_code;

	      if (is_shift
		  && (INTVAL (const_arg1) >= GET_MODE_UNIT_PRECISION (mode)
		      || INTVAL (const_arg1) < 0))
		{
		  if (SHIFT_COUNT_TRUNCATED)
		    canon_const_arg1 = gen_int_shift_amount
		      (mode, (INTVAL (const_arg1)
			      & (GET_MODE_UNIT_BITSIZE (mode) - 1)));
		  else
		    break;
		}

	      y = lookup_as_function (folded_arg0, code);
	      if (y == 0)
		break;

	      /* If we have compiled a statement like
		 "if (x == (x & mask1))", and now are looking at
		 "x & mask2", we will have a case where the first operand
		 of Y is the same as our first operand.  Unless we detect
		 this case, an infinite loop will result.  */
	      if (XEXP (y, 0) == folded_arg0)
		break;

	      inner_const = equiv_constant (fold_rtx (XEXP (y, 1), 0));
	      if (!inner_const || !CONST_INT_P (inner_const))
		break;

	      /* Don't associate these operations if they are a PLUS with the
		 same constant and it is a power of two.  These might be doable
		 with a pre- or post-increment.  Similarly for two subtracts of
		 identical powers of two with post decrement.  */

	      if (code == PLUS && const_arg1 == inner_const
		  && ((HAVE_PRE_INCREMENT
			  && pow2p_hwi (INTVAL (const_arg1)))
		      || (HAVE_POST_INCREMENT
			  && pow2p_hwi (INTVAL (const_arg1)))
		      || (HAVE_PRE_DECREMENT
			  && pow2p_hwi (- INTVAL (const_arg1)))
		      || (HAVE_POST_DECREMENT
			  && pow2p_hwi (- INTVAL (const_arg1)))))
		break;

	      /* ??? Vector mode shifts by scalar
		 shift operand are not supported yet.  */
	      if (is_shift && VECTOR_MODE_P (mode))
                break;

	      if (is_shift
		  && (INTVAL (inner_const) >= GET_MODE_UNIT_PRECISION (mode)
		      || INTVAL (inner_const) < 0))
		{
		  if (SHIFT_COUNT_TRUNCATED)
		    inner_const = gen_int_shift_amount
		      (mode, (INTVAL (inner_const)
			      & (GET_MODE_UNIT_BITSIZE (mode) - 1)));
		  else
		    break;
		}

	      /* Compute the code used to compose the constants.  For example,
		 A-C1-C2 is A-(C1 + C2), so if CODE == MINUS, we want PLUS.  */

	      associate_code = (is_shift || code == MINUS ? PLUS : code);

	      new_const = simplify_binary_operation (associate_code, mode,
						     canon_const_arg1,
						     inner_const);

	      if (new_const == 0)
		break;

	      /* If we are associating shift operations, don't let this
		 produce a shift of the size of the object or larger.
		 This could occur when we follow a sign-extend by a right
		 shift on a machine that does a sign-extend as a pair
		 of shifts.  */

	      if (is_shift
		  && CONST_INT_P (new_const)
		  && INTVAL (new_const) >= GET_MODE_UNIT_PRECISION (mode))
		{
		  /* As an exception, we can turn an ASHIFTRT of this
		     form into a shift of the number of bits - 1.  */
		  if (code == ASHIFTRT)
		    new_const = gen_int_shift_amount
		      (mode, GET_MODE_UNIT_BITSIZE (mode) - 1);
		  else if (!side_effects_p (XEXP (y, 0)))
		    return CONST0_RTX (mode);
		  else
		    break;
		}

	      y = copy_rtx (XEXP (y, 0));

	      /* If Y contains our first operand (the most common way this
		 can happen is if Y is a MEM), we would do into an infinite
		 loop if we tried to fold it.  So don't in that case.  */

	      if (! reg_mentioned_p (folded_arg0, y))
		y = fold_rtx (y, insn);

	      return simplify_gen_binary (code, mode, y, new_const);
	    }
	  break;

	case DIV:       case UDIV:
	  /* ??? The associative optimization performed immediately above is
	     also possible for DIV and UDIV using associate_code of MULT.
	     However, we would need extra code to verify that the
	     multiplication does not overflow, that is, there is no overflow
	     in the calculation of new_const.  */
	  break;

	default:
	  break;
	}

      new_rtx = simplify_binary_operation (code, mode,
				       const_arg0 ? const_arg0 : folded_arg0,
				       const_arg1 ? const_arg1 : folded_arg1);
      break;

    case RTX_OBJ:
      /* (lo_sum (high X) X) is simply X.  */
      if (code == LO_SUM && const_arg0 != 0
	  && GET_CODE (const_arg0) == HIGH
	  && rtx_equal_p (XEXP (const_arg0, 0), const_arg1))
	return const_arg1;
      break;

    case RTX_TERNARY:
    case RTX_BITFIELD_OPS:
      new_rtx = simplify_ternary_operation (code, mode, mode_arg0,
					const_arg0 ? const_arg0 : folded_arg0,
					const_arg1 ? const_arg1 : folded_arg1,
					const_arg2 ? const_arg2 : XEXP (x, 2));
      break;

    default:
      break;
    }

  return new_rtx ? new_rtx : x;
}

/* Return a constant value currently equivalent to X.
   Return 0 if we don't know one.  */

static rtx
equiv_constant (rtx x)
{
  if (REG_P (x)
      && REGNO_QTY_VALID_P (REGNO (x)))
    {
      int x_q = REG_QTY (REGNO (x));
      struct qty_table_elem *x_ent = &qty_table[x_q];

      if (x_ent->const_rtx)
	x = gen_lowpart (GET_MODE (x), x_ent->const_rtx);
    }

  if (x == 0 || CONSTANT_P (x))
    return x;

  if (GET_CODE (x) == SUBREG)
    {
      machine_mode mode = GET_MODE (x);
      machine_mode imode = GET_MODE (SUBREG_REG (x));
      rtx new_rtx;

      /* See if we previously assigned a constant value to this SUBREG.  */
      if ((new_rtx = lookup_as_function (x, CONST_INT)) != 0
	  || (new_rtx = lookup_as_function (x, CONST_WIDE_INT)) != 0
	  || (NUM_POLY_INT_COEFFS > 1
	      && (new_rtx = lookup_as_function (x, CONST_POLY_INT)) != 0)
          || (new_rtx = lookup_as_function (x, CONST_DOUBLE)) != 0
          || (new_rtx = lookup_as_function (x, CONST_FIXED)) != 0)
        return new_rtx;

      /* If we didn't and if doing so makes sense, see if we previously
	 assigned a constant value to the enclosing word mode SUBREG.  */
      if (known_lt (GET_MODE_SIZE (mode), UNITS_PER_WORD)
	  && known_lt (UNITS_PER_WORD, GET_MODE_SIZE (imode)))
	{
	  poly_int64 byte = (SUBREG_BYTE (x)
			     - subreg_lowpart_offset (mode, word_mode));
	  if (known_ge (byte, 0) && multiple_p (byte, UNITS_PER_WORD))
	    {
	      rtx y = gen_rtx_SUBREG (word_mode, SUBREG_REG (x), byte);
	      new_rtx = lookup_as_function (y, CONST_INT);
	      if (new_rtx)
		return gen_lowpart (mode, new_rtx);
	    }
	}

      /* Otherwise see if we already have a constant for the inner REG,
	 and if that is enough to calculate an equivalent constant for
	 the subreg.  Note that the upper bits of paradoxical subregs
	 are undefined, so they cannot be said to equal anything.  */
      if (REG_P (SUBREG_REG (x))
	  && !paradoxical_subreg_p (x)
	  && (new_rtx = equiv_constant (SUBREG_REG (x))) != 0)
        return simplify_subreg (mode, new_rtx, imode, SUBREG_BYTE (x));

      return 0;
    }

  /* If X is a MEM, see if it is a constant-pool reference, or look it up in
     the hash table in case its value was seen before.  */

  if (MEM_P (x))
    {
      struct table_elt *elt;

      x = avoid_constant_pool_reference (x);
      if (CONSTANT_P (x))
	return x;

      elt = lookup (x, SAFE_HASH (x, GET_MODE (x)), GET_MODE (x));
      if (elt == 0)
	return 0;

      for (elt = elt->first_same_value; elt; elt = elt->next_same_value)
	if (elt->is_const && CONSTANT_P (elt->exp))
	  return elt->exp;
    }

  return 0;
}

/* Given INSN, a jump insn, TAKEN indicates if we are following the
   "taken" branch.

   In certain cases, this can cause us to add an equivalence.  For example,
   if we are following the taken case of
	if (i == 2)
   we can add the fact that `i' and '2' are now equivalent.

   In any case, we can record that this comparison was passed.  If the same
   comparison is seen later, we will know its value.  */

static void
record_jump_equiv (rtx_insn *insn, bool taken)
{
  int cond_known_true;
  rtx op0, op1;
  rtx set;
  machine_mode mode, mode0, mode1;
  int reversed_nonequality = 0;
  enum rtx_code code;

  /* Ensure this is the right kind of insn.  */
  gcc_assert (any_condjump_p (insn));

  set = pc_set (insn);

  /* See if this jump condition is known true or false.  */
  if (taken)
    cond_known_true = (XEXP (SET_SRC (set), 2) == pc_rtx);
  else
    cond_known_true = (XEXP (SET_SRC (set), 1) == pc_rtx);

  /* Get the type of comparison being done and the operands being compared.
     If we had to reverse a non-equality condition, record that fact so we
     know that it isn't valid for floating-point.  */
  code = GET_CODE (XEXP (SET_SRC (set), 0));
  op0 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 0), insn);
  op1 = fold_rtx (XEXP (XEXP (SET_SRC (set), 0), 1), insn);

  /* On a cc0 target the cc0-setter and cc0-user may end up in different
     blocks.  When that happens the tracking of the cc0-setter via
     PREV_INSN_CC0 is spoiled.  That means that fold_rtx may return
     NULL_RTX.  In those cases, there's nothing to record.  */
  if (op0 == NULL_RTX || op1 == NULL_RTX)
    return;

  code = find_comparison_args (code, &op0, &op1, &mode0, &mode1);
  if (! cond_known_true)
    {
      code = reversed_comparison_code_parts (code, op0, op1, insn);

      /* Don't remember if we can't find the inverse.  */
      if (code == UNKNOWN)
	return;
    }

  /* The mode is the mode of the non-constant.  */
  mode = mode0;
  if (mode1 != VOIDmode)
    mode = mode1;

  record_jump_cond (code, mode, op0, op1, reversed_nonequality);
}

/* Yet another form of subreg creation.  In this case, we want something in
   MODE, and we should assume OP has MODE iff it is naturally modeless.  */

static rtx
record_jump_cond_subreg (machine_mode mode, rtx op)
{
  machine_mode op_mode = GET_MODE (op);
  if (op_mode == mode || op_mode == VOIDmode)
    return op;
  return lowpart_subreg (mode, op, op_mode);
}

/* We know that comparison CODE applied to OP0 and OP1 in MODE is true.
   REVERSED_NONEQUALITY is nonzero if CODE had to be swapped.
   Make any useful entries we can with that information.  Called from
   above function and called recursively.  */

static void
record_jump_cond (enum rtx_code code, machine_mode mode, rtx op0,
		  rtx op1, int reversed_nonequality)
{
  unsigned op0_hash, op1_hash;
  int op0_in_memory, op1_in_memory;
  struct table_elt *op0_elt, *op1_elt;

  /* If OP0 and OP1 are known equal, and either is a paradoxical SUBREG,
     we know that they are also equal in the smaller mode (this is also
     true for all smaller modes whether or not there is a SUBREG, but
     is not worth testing for with no SUBREG).  */

  /* Note that GET_MODE (op0) may not equal MODE.  */
  if (code == EQ && paradoxical_subreg_p (op0))
    {
      machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
      rtx tem = record_jump_cond_subreg (inner_mode, op1);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op0), tem,
			  reversed_nonequality);
    }

  if (code == EQ && paradoxical_subreg_p (op1))
    {
      machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
      rtx tem = record_jump_cond_subreg (inner_mode, op0);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op1), tem,
			  reversed_nonequality);
    }

  /* Similarly, if this is an NE comparison, and either is a SUBREG
     making a smaller mode, we know the whole thing is also NE.  */

  /* Note that GET_MODE (op0) may not equal MODE;
     if we test MODE instead, we can get an infinite recursion
     alternating between two modes each wider than MODE.  */

  if (code == NE
      && partial_subreg_p (op0)
      && subreg_lowpart_p (op0))
    {
      machine_mode inner_mode = GET_MODE (SUBREG_REG (op0));
      rtx tem = record_jump_cond_subreg (inner_mode, op1);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op0), tem,
			  reversed_nonequality);
    }

  if (code == NE
      && partial_subreg_p (op1)
      && subreg_lowpart_p (op1))
    {
      machine_mode inner_mode = GET_MODE (SUBREG_REG (op1));
      rtx tem = record_jump_cond_subreg (inner_mode, op0);
      if (tem)
	record_jump_cond (code, mode, SUBREG_REG (op1), tem,
			  reversed_nonequality);
    }

  /* Hash both operands.  */

  do_not_record = 0;
  hash_arg_in_memory = 0;
  op0_hash = HASH (op0, mode);
  op0_in_memory = hash_arg_in_memory;

  if (do_not_record)
    return;

  do_not_record = 0;
  hash_arg_in_memory = 0;
  op1_hash = HASH (op1, mode);
  op1_in_memory = hash_arg_in_memory;

  if (do_not_record)
    return;

  /* Look up both operands.  */
  op0_elt = lookup (op0, op0_hash, mode);
  op1_elt = lookup (op1, op1_hash, mode);

  /* If both operands are already equivalent or if they are not in the
     table but are identical, do nothing.  */
  if ((op0_elt != 0 && op1_elt != 0
       && op0_elt->first_same_value == op1_elt->first_same_value)
      || op0 == op1 || rtx_equal_p (op0, op1))
    return;

  /* If we aren't setting two things equal all we can do is save this
     comparison.   Similarly if this is floating-point.  In the latter
     case, OP1 might be zero and both -0.0 and 0.0 are equal to it.
     If we record the equality, we might inadvertently delete code
     whose intent was to change -0 to +0.  */

  if (code != EQ || FLOAT_MODE_P (GET_MODE (op0)))
    {
      struct qty_table_elem *ent;
      int qty;

      /* If we reversed a floating-point comparison, if OP0 is not a
	 register, or if OP1 is neither a register or constant, we can't
	 do anything.  */

      if (!REG_P (op1))
	op1 = equiv_constant (op1);

      if ((reversed_nonequality && FLOAT_MODE_P (mode))
	  || !REG_P (op0) || op1 == 0)
	return;

      /* Put OP0 in the hash table if it isn't already.  This gives it a
	 new quantity number.  */
      if (op0_elt == 0)
	{
	  if (insert_regs (op0, NULL, 0))
	    {
	      rehash_using_reg (op0);
	      op0_hash = HASH (op0, mode);

	      /* If OP0 is contained in OP1, this changes its hash code
		 as well.  Faster to rehash than to check, except
		 for the simple case of a constant.  */
	      if (! CONSTANT_P (op1))
		op1_hash = HASH (op1,mode);
	    }

	  op0_elt = insert (op0, NULL, op0_hash, mode);
	  op0_elt->in_memory = op0_in_memory;
	}

      qty = REG_QTY (REGNO (op0));
      ent = &qty_table[qty];

      ent->comparison_code = code;
      if (REG_P (op1))
	{
	  /* Look it up again--in case op0 and op1 are the same.  */
	  op1_elt = lookup (op1, op1_hash, mode);

	  /* Put OP1 in the hash table so it gets a new quantity number.  */
	  if (op1_elt == 0)
	    {
	      if (insert_regs (op1, NULL, 0))
		{
		  rehash_using_reg (op1);
		  op1_hash = HASH (op1, mode);
		}

	      op1_elt = insert (op1, NULL, op1_hash, mode);
	      op1_elt->in_memory = op1_in_memory;
	    }

	  ent->comparison_const = NULL_RTX;
	  ent->comparison_qty = REG_QTY (REGNO (op1));
	}
      else
	{
	  ent->comparison_const = op1;
	  ent->comparison_qty = -1;
	}

      return;
    }

  /* If either side is still missing an equivalence, make it now,
     then merge the equivalences.  */

  if (op0_elt == 0)
    {
      if (insert_regs (op0, NULL, 0))
	{
	  rehash_using_reg (op0);
	  op0_hash = HASH (op0, mode);
	}

      op0_elt = insert (op0, NULL, op0_hash, mode);
      op0_elt->in_memory = op0_in_memory;
    }

  if (op1_elt == 0)
    {
      if (insert_regs (op1, NULL, 0))
	{
	  rehash_using_reg (op1);
	  op1_hash = HASH (op1, mode);
	}

      op1_elt = insert (op1, NULL, op1_hash, mode);
      op1_elt->in_memory = op1_in_memory;
    }

  merge_equiv_classes (op0_elt, op1_elt);
}

/* CSE processing for one instruction.

   Most "true" common subexpressions are mostly optimized away in GIMPLE,
   but the few that "leak through" are cleaned up by cse_insn, and complex
   addressing modes are often formed here.

   The main function is cse_insn, and between here and that function
   a couple of helper functions is defined to keep the size of cse_insn
   within reasonable proportions.
   
   Data is shared between the main and helper functions via STRUCT SET,
   that contains all data related for every set in the instruction that
   is being processed.
   
   Note that cse_main processes all sets in the instruction.  Most
   passes in GCC only process simple SET insns or single_set insns, but
   CSE processes insns with multiple sets as well.  */

/* Data on one SET contained in the instruction.  */

struct set
{
  /* The SET rtx itself.  */
  rtx rtl;
  /* The SET_SRC of the rtx (the original value, if it is changing).  */
  rtx src;
  /* The hash-table element for the SET_SRC of the SET.  */
  struct table_elt *src_elt;
  /* Hash value for the SET_SRC.  */
  unsigned src_hash;
  /* Hash value for the SET_DEST.  */
  unsigned dest_hash;
  /* The SET_DEST, with SUBREG, etc., stripped.  */
  rtx inner_dest;
  /* Nonzero if the SET_SRC is in memory.  */
  char src_in_memory;
  /* Nonzero if the SET_SRC contains something
     whose value cannot be predicted and understood.  */
  char src_volatile;
  /* Original machine mode, in case it becomes a CONST_INT.
     The size of this field should match the size of the mode
     field of struct rtx_def (see rtl.h).  */
  ENUM_BITFIELD(machine_mode) mode : 8;
  /* Hash value of constant equivalent for SET_SRC.  */
  unsigned src_const_hash;
  /* A constant equivalent for SET_SRC, if any.  */
  rtx src_const;
  /* Table entry for constant equivalent for SET_SRC, if any.  */
  struct table_elt *src_const_elt;
  /* Table entry for the destination address.  */
  struct table_elt *dest_addr_elt;
};

/* Special handling for (set REG0 REG1) where REG0 is the
   "cheapest", cheaper than REG1.  After cse, REG1 will probably not
   be used in the sequel, so (if easily done) change this insn to
   (set REG1 REG0) and replace REG1 with REG0 in the previous insn
   that computed their value.  Then REG1 will become a dead store
   and won't cloud the situation for later optimizations.

   Do not make this change if REG1 is a hard register, because it will
   then be used in the sequel and we may be changing a two-operand insn
   into a three-operand insn.
   
   This is the last transformation that cse_insn will try to do.  */

static void
try_back_substitute_reg (rtx set, rtx_insn *insn)
{
  rtx dest = SET_DEST (set);
  rtx src = SET_SRC (set);

  if (REG_P (dest)
      && REG_P (src) && ! HARD_REGISTER_P (src)
      && REGNO_QTY_VALID_P (REGNO (src)))
    {
      int src_q = REG_QTY (REGNO (src));
      struct qty_table_elem *src_ent = &qty_table[src_q];

      if (src_ent->first_reg == REGNO (dest))
	{
	  /* Scan for the previous nonnote insn, but stop at a basic
	     block boundary.  */
	  rtx_insn *prev = insn;
	  rtx_insn *bb_head = BB_HEAD (BLOCK_FOR_INSN (insn));
	  do
	    {
	      prev = PREV_INSN (prev);
	    }
	  while (prev != bb_head && (NOTE_P (prev) || DEBUG_INSN_P (prev)));

	  /* Do not swap the registers around if the previous instruction
	     attaches a REG_EQUIV note to REG1.

	     ??? It's not entirely clear whether we can transfer a REG_EQUIV
	     from the pseudo that originally shadowed an incoming argument
	     to another register.  Some uses of REG_EQUIV might rely on it
	     being attached to REG1 rather than REG2.

	     This section previously turned the REG_EQUIV into a REG_EQUAL
	     note.  We cannot do that because REG_EQUIV may provide an
	     uninitialized stack slot when REG_PARM_STACK_SPACE is used.  */
	  if (NONJUMP_INSN_P (prev)
	      && GET_CODE (PATTERN (prev)) == SET
	      && SET_DEST (PATTERN (prev)) == src
	      && ! find_reg_note (prev, REG_EQUIV, NULL_RTX))
	    {
	      rtx note;

	      validate_change (prev, &SET_DEST (PATTERN (prev)), dest, 1);
	      validate_change (insn, &SET_DEST (set), src, 1);
	      validate_change (insn, &SET_SRC (set), dest, 1);
	      apply_change_group ();

	      /* If INSN has a REG_EQUAL note, and this note mentions
		 REG0, then we must delete it, because the value in
		 REG0 has changed.  If the note's value is REG1, we must
		 also delete it because that is now this insn's dest.  */
	      note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
	      if (note != 0
		  && (reg_mentioned_p (dest, XEXP (note, 0))
		      || rtx_equal_p (src, XEXP (note, 0))))
		remove_note (insn, note);

	      /* If INSN has a REG_ARGS_SIZE note, move it to PREV.  */
	      note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX);
	      if (note != 0)
		{
		  remove_note (insn, note);
		  gcc_assert (!find_reg_note (prev, REG_ARGS_SIZE, NULL_RTX));
		  set_unique_reg_note (prev, REG_ARGS_SIZE, XEXP (note, 0));
		}
	    }
	}
    }
}

/* Record all the SETs in this instruction into SETS_PTR,
   and return the number of recorded sets.  */
static int
find_sets_in_insn (rtx_insn *insn, struct set **psets)
{
  struct set *sets = *psets;
  int n_sets = 0;
  rtx x = PATTERN (insn);

  if (GET_CODE (x) == SET)
    {
      /* Ignore SETs that are unconditional jumps.
	 They never need cse processing, so this does not hurt.
	 The reason is not efficiency but rather
	 so that we can test at the end for instructions
	 that have been simplified to unconditional jumps
	 and not be misled by unchanged instructions
	 that were unconditional jumps to begin with.  */
      if (SET_DEST (x) == pc_rtx
	  && GET_CODE (SET_SRC (x)) == LABEL_REF)
	;
      /* Don't count call-insns, (set (reg 0) (call ...)), as a set.
	 The hard function value register is used only once, to copy to
	 someplace else, so it isn't worth cse'ing.  */
      else if (GET_CODE (SET_SRC (x)) == CALL)
	;
      else
	sets[n_sets++].rtl = x;
    }
  else if (GET_CODE (x) == PARALLEL)
    {
      int i, lim = XVECLEN (x, 0);

      /* Go over the expressions of the PARALLEL in forward order, to
	 put them in the same order in the SETS array.  */
      for (i = 0; i < lim; i++)
	{
	  rtx y = XVECEXP (x, 0, i);
	  if (GET_CODE (y) == SET)
	    {
	      /* As above, we ignore unconditional jumps and call-insns and
		 ignore the result of apply_change_group.  */
	      if (SET_DEST (y) == pc_rtx
		  && GET_CODE (SET_SRC (y)) == LABEL_REF)
		;
	      else if (GET_CODE (SET_SRC (y)) == CALL)
		;
	      else
		sets[n_sets++].rtl = y;
	    }
	}
    }

  return n_sets;
}

/* Subroutine of canonicalize_insn.  X is an ASM_OPERANDS in INSN.  */

static void
canon_asm_operands (rtx x, rtx_insn *insn)
{
  for (int i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
    {
      rtx input = ASM_OPERANDS_INPUT (x, i);
      if (!(REG_P (input) && HARD_REGISTER_P (input)))
	{
	  input = canon_reg (input, insn);
	  validate_change (insn, &ASM_OPERANDS_INPUT (x, i), input, 1);
	}
    }
}

/* Where possible, substitute every register reference in the N_SETS
   number of SETS in INSN with the canonical register.

   Register canonicalization propagatest the earliest register (i.e.
   one that is set before INSN) with the same value.  This is a very
   useful, simple form of CSE, to clean up warts from expanding GIMPLE
   to RTL.  For instance, a CONST for an address is usually expanded
   multiple times to loads into different registers, thus creating many
   subexpressions of the form:

   (set (reg1) (some_const))
   (set (mem (... reg1 ...) (thing)))
   (set (reg2) (some_const))
   (set (mem (... reg2 ...) (thing)))

   After canonicalizing, the code takes the following form:

   (set (reg1) (some_const))
   (set (mem (... reg1 ...) (thing)))
   (set (reg2) (some_const))
   (set (mem (... reg1 ...) (thing)))

   The set to reg2 is now trivially dead, and the memory reference (or
   address, or whatever) may be a candidate for further CSEing.

   In this function, the result of apply_change_group can be ignored;
   see canon_reg.  */

static void
canonicalize_insn (rtx_insn *insn, struct set **psets, int n_sets)
{
  struct set *sets = *psets;
  rtx tem;
  rtx x = PATTERN (insn);
  int i;

  if (CALL_P (insn))
    {
      for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
	if (GET_CODE (XEXP (tem, 0)) != SET)
	  XEXP (tem, 0) = canon_reg (XEXP (tem, 0), insn);
    }

  if (GET_CODE (x) == SET && GET_CODE (SET_SRC (x)) == CALL)
    {
      canon_reg (SET_SRC (x), insn);
      apply_change_group ();
      fold_rtx (SET_SRC (x), insn);
    }
  else if (GET_CODE (x) == CLOBBER)
    {
      /* If we clobber memory, canon the address.
	 This does nothing when a register is clobbered
	 because we have already invalidated the reg.  */
      if (MEM_P (XEXP (x, 0)))
	canon_reg (XEXP (x, 0), insn);
    }
  else if (GET_CODE (x) == USE
	   && ! (REG_P (XEXP (x, 0))
		 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER))
    /* Canonicalize a USE of a pseudo register or memory location.  */
    canon_reg (x, insn);
  else if (GET_CODE (x) == ASM_OPERANDS)
    canon_asm_operands (x, insn);
  else if (GET_CODE (x) == CALL)
    {
      canon_reg (x, insn);
      apply_change_group ();
      fold_rtx (x, insn);
    }
  else if (DEBUG_INSN_P (insn))
    canon_reg (PATTERN (insn), insn);
  else if (GET_CODE (x) == PARALLEL)
    {
      for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
	{
	  rtx y = XVECEXP (x, 0, i);
	  if (GET_CODE (y) == SET && GET_CODE (SET_SRC (y)) == CALL)
	    {
	      canon_reg (SET_SRC (y), insn);
	      apply_change_group ();
	      fold_rtx (SET_SRC (y), insn);
	    }
	  else if (GET_CODE (y) == CLOBBER)
	    {
	      if (MEM_P (XEXP (y, 0)))
		canon_reg (XEXP (y, 0), insn);
	    }
	  else if (GET_CODE (y) == USE
		   && ! (REG_P (XEXP (y, 0))
			 && REGNO (XEXP (y, 0)) < FIRST_PSEUDO_REGISTER))
	    canon_reg (y, insn);
	  else if (GET_CODE (y) == ASM_OPERANDS)
	    canon_asm_operands (y, insn);
	  else if (GET_CODE (y) == CALL)
	    {
	      canon_reg (y, insn);
	      apply_change_group ();
	      fold_rtx (y, insn);
	    }
	}
    }

  if (n_sets == 1 && REG_NOTES (insn) != 0
      && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0)
    {
      /* We potentially will process this insn many times.  Therefore,
	 drop the REG_EQUAL note if it is equal to the SET_SRC of the
	 unique set in INSN.

	 Do not do so if the REG_EQUAL note is for a STRICT_LOW_PART,
	 because cse_insn handles those specially.  */
      if (GET_CODE (SET_DEST (sets[0].rtl)) != STRICT_LOW_PART
	  && rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl)))
	remove_note (insn, tem);
      else
	{
	  canon_reg (XEXP (tem, 0), insn);
	  apply_change_group ();
	  XEXP (tem, 0) = fold_rtx (XEXP (tem, 0), insn);
	  df_notes_rescan (insn);
	}
    }

  /* Canonicalize sources and addresses of destinations.
     We do this in a separate pass to avoid problems when a MATCH_DUP is
     present in the insn pattern.  In that case, we want to ensure that
     we don't break the duplicate nature of the pattern.  So we will replace
     both operands at the same time.  Otherwise, we would fail to find an
     equivalent substitution in the loop calling validate_change below.

     We used to suppress canonicalization of DEST if it appears in SRC,
     but we don't do this any more.  */

  for (i = 0; i < n_sets; i++)
    {
      rtx dest = SET_DEST (sets[i].rtl);
      rtx src = SET_SRC (sets[i].rtl);
      rtx new_rtx = canon_reg (src, insn);

      validate_change (insn, &SET_SRC (sets[i].rtl), new_rtx, 1);

      if (GET_CODE (dest) == ZERO_EXTRACT)
	{
	  validate_change (insn, &XEXP (dest, 1),
			   canon_reg (XEXP (dest, 1), insn), 1);
	  validate_change (insn, &XEXP (dest, 2),
			   canon_reg (XEXP (dest, 2), insn), 1);
	}

      while (GET_CODE (dest) == SUBREG
	     || GET_CODE (dest) == ZERO_EXTRACT
	     || GET_CODE (dest) == STRICT_LOW_PART)
	dest = XEXP (dest, 0);

      if (MEM_P (dest))
	canon_reg (dest, insn);
    }

  /* Now that we have done all the replacements, we can apply the change
     group and see if they all work.  Note that this will cause some
     canonicalizations that would have worked individually not to be applied
     because some other canonicalization didn't work, but this should not
     occur often.

     The result of apply_change_group can be ignored; see canon_reg.  */

  apply_change_group ();
}

/* Main function of CSE.
   First simplify sources and addresses of all assignments
   in the instruction, using previously-computed equivalents values.
   Then install the new sources and destinations in the table
   of available values.  */

static void
cse_insn (rtx_insn *insn)
{
  rtx x = PATTERN (insn);
  int i;
  rtx tem;
  int n_sets = 0;

  rtx src_eqv = 0;
  struct table_elt *src_eqv_elt = 0;
  int src_eqv_volatile = 0;
  int src_eqv_in_memory = 0;
  unsigned src_eqv_hash = 0;

  struct set *sets = (struct set *) 0;

  if (GET_CODE (x) == SET)
    sets = XALLOCA (struct set);
  else if (GET_CODE (x) == PARALLEL)
    sets = XALLOCAVEC (struct set, XVECLEN (x, 0));

  this_insn = insn;
  /* Records what this insn does to set CC0.  */
  this_insn_cc0 = 0;
  this_insn_cc0_mode = VOIDmode;

  /* Find all regs explicitly clobbered in this insn,
     to ensure they are not replaced with any other regs
     elsewhere in this insn.  */
  invalidate_from_sets_and_clobbers (insn);

  /* Record all the SETs in this instruction.  */
  n_sets = find_sets_in_insn (insn, &sets);

  /* Substitute the canonical register where possible.  */
  canonicalize_insn (insn, &sets, n_sets);

  /* If this insn has a REG_EQUAL note, store the equivalent value in SRC_EQV,
     if different, or if the DEST is a STRICT_LOW_PART/ZERO_EXTRACT.  The
     latter condition is necessary because SRC_EQV is handled specially for
     this case, and if it isn't set, then there will be no equivalence
     for the destination.  */
  if (n_sets == 1 && REG_NOTES (insn) != 0
      && (tem = find_reg_note (insn, REG_EQUAL, NULL_RTX)) != 0)
    {

      if (GET_CODE (SET_DEST (sets[0].rtl)) != ZERO_EXTRACT
	  && (! rtx_equal_p (XEXP (tem, 0), SET_SRC (sets[0].rtl))
	      || GET_CODE (SET_DEST (sets[0].rtl)) == STRICT_LOW_PART))
	src_eqv = copy_rtx (XEXP (tem, 0));
      /* If DEST is of the form ZERO_EXTACT, as in:
	 (set (zero_extract:SI (reg:SI 119)
		  (const_int 16 [0x10])
		  (const_int 16 [0x10]))
	      (const_int 51154 [0xc7d2]))
	 REG_EQUAL note will specify the value of register (reg:SI 119) at this
	 point.  Note that this is different from SRC_EQV. We can however
	 calculate SRC_EQV with the position and width of ZERO_EXTRACT.  */
      else if (GET_CODE (SET_DEST (sets[0].rtl)) == ZERO_EXTRACT
	       && CONST_INT_P (XEXP (tem, 0))
	       && CONST_INT_P (XEXP (SET_DEST (sets[0].rtl), 1))
	       && CONST_INT_P (XEXP (SET_DEST (sets[0].rtl), 2)))
	{
	  rtx dest_reg = XEXP (SET_DEST (sets[0].rtl), 0);
	  /* This is the mode of XEXP (tem, 0) as well.  */
	  scalar_int_mode dest_mode
	    = as_a <scalar_int_mode> (GET_MODE (dest_reg));
	  rtx width = XEXP (SET_DEST (sets[0].rtl), 1);
	  rtx pos = XEXP (SET_DEST (sets[0].rtl), 2);
	  HOST_WIDE_INT val = INTVAL (XEXP (tem, 0));
	  HOST_WIDE_INT mask;
	  unsigned int shift;
	  if (BITS_BIG_ENDIAN)
	    shift = (GET_MODE_PRECISION (dest_mode)
		     - INTVAL (pos) - INTVAL (width));
	  else
	    shift = INTVAL (pos);
	  if (INTVAL (width) == HOST_BITS_PER_WIDE_INT)
	    mask = HOST_WIDE_INT_M1;
	  else
	    mask = (HOST_WIDE_INT_1 << INTVAL (width)) - 1;
	  val = (val >> shift) & mask;
	  src_eqv = GEN_INT (val);
	}
    }

  /* Set sets[i].src_elt to the class each source belongs to.
     Detect assignments from or to volatile things
     and set set[i] to zero so they will be ignored
     in the rest of this function.

     Nothing in this loop changes the hash table or the register chains.  */

  for (i = 0; i < n_sets; i++)
    {
      bool repeat = false;
      bool noop_insn = false;
      rtx src, dest;
      rtx src_folded;
      struct table_elt *elt = 0, *p;
      machine_mode mode;
      rtx src_eqv_here;
      rtx src_const = 0;
      rtx src_related = 0;
      bool src_related_is_const_anchor = false;
      struct table_elt *src_const_elt = 0;
      int src_cost = MAX_COST;
      int src_eqv_cost = MAX_COST;
      int src_folded_cost = MAX_COST;
      int src_related_cost = MAX_COST;
      int src_elt_cost = MAX_COST;
      int src_regcost = MAX_COST;
      int src_eqv_regcost = MAX_COST;
      int src_folded_regcost = MAX_COST;
      int src_related_regcost = MAX_COST;
      int src_elt_regcost = MAX_COST;
      /* Set nonzero if we need to call force_const_mem on with the
	 contents of src_folded before using it.  */
      int src_folded_force_flag = 0;
      scalar_int_mode int_mode;

      dest = SET_DEST (sets[i].rtl);
      src = SET_SRC (sets[i].rtl);

      /* If SRC is a constant that has no machine mode,
	 hash it with the destination's machine mode.
	 This way we can keep different modes separate.  */

      mode = GET_MODE (src) == VOIDmode ? GET_MODE (dest) : GET_MODE (src);
      sets[i].mode = mode;

      if (src_eqv)
	{
	  machine_mode eqvmode = mode;
	  if (GET_CODE (dest) == STRICT_LOW_PART)
	    eqvmode = GET_MODE (SUBREG_REG (XEXP (dest, 0)));
	  do_not_record = 0;
	  hash_arg_in_memory = 0;
	  src_eqv_hash = HASH (src_eqv, eqvmode);

	  /* Find the equivalence class for the equivalent expression.  */

	  if (!do_not_record)
	    src_eqv_elt = lookup (src_eqv, src_eqv_hash, eqvmode);

	  src_eqv_volatile = do_not_record;
	  src_eqv_in_memory = hash_arg_in_memory;
	}

      /* If this is a STRICT_LOW_PART assignment, src_eqv corresponds to the
	 value of the INNER register, not the destination.  So it is not
	 a valid substitution for the source.  But save it for later.  */
      if (GET_CODE (dest) == STRICT_LOW_PART)
	src_eqv_here = 0;
      else
	src_eqv_here = src_eqv;

      /* Simplify and foldable subexpressions in SRC.  Then get the fully-
	 simplified result, which may not necessarily be valid.  */
      src_folded = fold_rtx (src, NULL);

#if 0
      /* ??? This caused bad code to be generated for the m68k port with -O2.
	 Suppose src is (CONST_INT -1), and that after truncation src_folded
	 is (CONST_INT 3).  Suppose src_folded is then used for src_const.
	 At the end we will add src and src_const to the same equivalence
	 class.  We now have 3 and -1 on the same equivalence class.  This
	 causes later instructions to be mis-optimized.  */
      /* If storing a constant in a bitfield, pre-truncate the constant
	 so we will be able to record it later.  */
      if (GET_CODE (SET_DEST (sets[i].rtl)) == ZERO_EXTRACT)
	{
	  rtx width = XEXP (SET_DEST (sets[i].rtl), 1);

	  if (CONST_INT_P (src)
	      && CONST_INT_P (width)
	      && INTVAL (width) < HOST_BITS_PER_WIDE_INT
	      && (INTVAL (src) & ((HOST_WIDE_INT) (-1) << INTVAL (width))))
	    src_folded
	      = GEN_INT (INTVAL (src) & ((HOST_WIDE_INT_1
					  << INTVAL (width)) - 1));
	}
#endif

      /* Compute SRC's hash code, and also notice if it
	 should not be recorded at all.  In that case,
	 prevent any further processing of this assignment.

	 We set DO_NOT_RECORD if the destination has a REG_UNUSED note.
	 This avoids getting the source register into the tables, where it
	 may be invalidated later (via REG_QTY), then trigger an ICE upon
	 re-insertion.

	 This is only a problem in multi-set insns.  If it were a single
	 set the dead copy would have been removed.  If the RHS were anything
	 but a simple REG, then we won't call insert_regs and thus there's
	 no potential for triggering the ICE.  */
      do_not_record = (REG_P (dest)
		       && REG_P (src)
		       && find_reg_note (insn, REG_UNUSED, dest));
      hash_arg_in_memory = 0;

      sets[i].src = src;
      sets[i].src_hash = HASH (src, mode);
      sets[i].src_volatile = do_not_record;
      sets[i].src_in_memory = hash_arg_in_memory;

      /* If SRC is a MEM, there is a REG_EQUIV note for SRC, and DEST is
	 a pseudo, do not record SRC.  Using SRC as a replacement for
	 anything else will be incorrect in that situation.  Note that
	 this usually occurs only for stack slots, in which case all the
	 RTL would be referring to SRC, so we don't lose any optimization
	 opportunities by not having SRC in the hash table.  */