aboutsummaryrefslogtreecommitdiff
path: root/gcc/cfgloopmanip.c
blob: 0e876e566e1ea0a824a68ad97969f5c0026abe79 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
/* Loop manipulation code for GNU compiler.
   Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "rtl.h"
#include "hard-reg-set.h"
#include "obstack.h"
#include "basic-block.h"
#include "cfgloop.h"
#include "cfglayout.h"
#include "cfghooks.h"
#include "output.h"

static void duplicate_subloops (struct loop *, struct loop *);
static void copy_loops_to (struct loop **, int,
			   struct loop *);
static void loop_redirect_edge (edge, basic_block);
static void remove_bbs (basic_block *, int);
static bool rpe_enum_p (basic_block, void *);
static int find_path (edge, basic_block **);
static void fix_loop_placements (struct loop *, bool *);
static bool fix_bb_placement (basic_block);
static void fix_bb_placements (basic_block, bool *);
static basic_block create_preheader (struct loop *, int);
static void unloop (struct loop *, bool *);

#define RDIV(X,Y) (((X) + (Y) / 2) / (Y))

/* Checks whether basic block BB is dominated by DATA.  */
static bool
rpe_enum_p (basic_block bb, void *data)
{
  return dominated_by_p (CDI_DOMINATORS, bb, data);
}

/* Remove basic blocks BBS.  NBBS is the number of the basic blocks.  */

static void
remove_bbs (basic_block *bbs, int nbbs)
{
  int i;

  for (i = 0; i < nbbs; i++)
    delete_basic_block (bbs[i]);
}

/* Find path -- i.e. the basic blocks dominated by edge E and put them
   into array BBS, that will be allocated large enough to contain them.
   E->dest must have exactly one predecessor for this to work (it is
   easy to achieve and we do not put it here because we do not want to
   alter anything by this function).  The number of basic blocks in the
   path is returned.  */
static int
find_path (edge e, basic_block **bbs)
{
  gcc_assert (EDGE_COUNT (e->dest->preds) <= 1);

  /* Find bbs in the path.  */
  *bbs = XCNEWVEC (basic_block, n_basic_blocks);
  return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
			     n_basic_blocks, e->dest);
}

/* Fix placement of basic block BB inside loop hierarchy --
   Let L be a loop to that BB belongs.  Then every successor of BB must either
     1) belong to some superloop of loop L, or
     2) be a header of loop K such that K->outer is superloop of L
   Returns true if we had to move BB into other loop to enforce this condition,
   false if the placement of BB was already correct (provided that placements
   of its successors are correct).  */
static bool
fix_bb_placement (basic_block bb)
{
  edge e;
  edge_iterator ei;
  struct loop *loop = current_loops->tree_root, *act;

  FOR_EACH_EDGE (e, ei, bb->succs)
    {
      if (e->dest == EXIT_BLOCK_PTR)
	continue;

      act = e->dest->loop_father;
      if (act->header == e->dest)
	act = act->outer;

      if (flow_loop_nested_p (loop, act))
	loop = act;
    }

  if (loop == bb->loop_father)
    return false;

  remove_bb_from_loops (bb);
  add_bb_to_loop (bb, loop);

  return true;
}

/* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
   of LOOP to that leads at least one exit edge of LOOP, and set it
   as the immediate superloop of LOOP.  Return true if the immediate superloop
   of LOOP changed.  */

static bool
fix_loop_placement (struct loop *loop)
{
  unsigned i;
  edge e;
  VEC (edge, heap) *exits = get_loop_exit_edges (loop);
  struct loop *father = current_loops->tree_root, *act;
  bool ret = false;

  for (i = 0; VEC_iterate (edge, exits, i, e); i++)
    {
      act = find_common_loop (loop, e->dest->loop_father);
      if (flow_loop_nested_p (father, act))
	father = act;
    }

  if (father != loop->outer)
    {
      for (act = loop->outer; act != father; act = act->outer)
	act->num_nodes -= loop->num_nodes;
      flow_loop_tree_node_remove (loop);
      flow_loop_tree_node_add (father, loop);

      /* The exit edges of LOOP no longer exits its original immediate
	 superloops; remove them from the appropriate exit lists.  */
      for (i = 0; VEC_iterate (edge, exits, i, e); i++)
	rescan_loop_exit (e, false, false);

      ret = true;
    }

  VEC_free (edge, heap, exits);
  return ret;
}

/* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
   enforce condition condition stated in description of fix_bb_placement. We
   start from basic block FROM that had some of its successors removed, so that
   his placement no longer has to be correct, and iteratively fix placement of
   its predecessors that may change if placement of FROM changed.  Also fix
   placement of subloops of FROM->loop_father, that might also be altered due
   to this change; the condition for them is similar, except that instead of
   successors we consider edges coming out of the loops.
 
   If the changes may invalidate the information about irreducible regions,
   IRRED_INVALIDATED is set to true.  */

static void
fix_bb_placements (basic_block from,
		   bool *irred_invalidated)
{
  sbitmap in_queue;
  basic_block *queue, *qtop, *qbeg, *qend;
  struct loop *base_loop;
  edge e;

  /* We pass through blocks back-reachable from FROM, testing whether some
     of their successors moved to outer loop.  It may be necessary to
     iterate several times, but it is finite, as we stop unless we move
     the basic block up the loop structure.  The whole story is a bit
     more complicated due to presence of subloops, those are moved using
     fix_loop_placement.  */

  base_loop = from->loop_father;
  if (base_loop == current_loops->tree_root)
    return;

  in_queue = sbitmap_alloc (last_basic_block);
  sbitmap_zero (in_queue);
  SET_BIT (in_queue, from->index);
  /* Prevent us from going out of the base_loop.  */
  SET_BIT (in_queue, base_loop->header->index);

  queue = XNEWVEC (basic_block, base_loop->num_nodes + 1);
  qtop = queue + base_loop->num_nodes + 1;
  qbeg = queue;
  qend = queue + 1;
  *qbeg = from;

  while (qbeg != qend)
    {
      edge_iterator ei;
      from = *qbeg;
      qbeg++;
      if (qbeg == qtop)
	qbeg = queue;
      RESET_BIT (in_queue, from->index);

      if (from->loop_father->header == from)
	{
	  /* Subloop header, maybe move the loop upward.  */
	  if (!fix_loop_placement (from->loop_father))
	    continue;
	}
      else
	{
	  /* Ordinary basic block.  */
	  if (!fix_bb_placement (from))
	    continue;
	}

      FOR_EACH_EDGE (e, ei, from->succs)
	{
	  if (e->flags & EDGE_IRREDUCIBLE_LOOP)
	    *irred_invalidated = true;
	}

      /* Something has changed, insert predecessors into queue.  */
      FOR_EACH_EDGE (e, ei, from->preds)
	{
	  basic_block pred = e->src;
	  struct loop *nca;

	  if (e->flags & EDGE_IRREDUCIBLE_LOOP)
	    *irred_invalidated = true;

	  if (TEST_BIT (in_queue, pred->index))
	    continue;

	  /* If it is subloop, then it either was not moved, or
	     the path up the loop tree from base_loop do not contain
	     it.  */
	  nca = find_common_loop (pred->loop_father, base_loop);
	  if (pred->loop_father != base_loop
	      && (nca == base_loop
		  || nca != pred->loop_father))
	    pred = pred->loop_father->header;
	  else if (!flow_loop_nested_p (from->loop_father, pred->loop_father))
	    {
	      /* No point in processing it.  */
	      continue;
	    }

	  if (TEST_BIT (in_queue, pred->index))
	    continue;

	  /* Schedule the basic block.  */
	  *qend = pred;
	  qend++;
	  if (qend == qtop)
	    qend = queue;
	  SET_BIT (in_queue, pred->index);
	}
    }
  free (in_queue);
  free (queue);
}

/* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
   and update loop structures and dominators.  Return true if we were able
   to remove the path, false otherwise (and nothing is affected then).  */
bool
remove_path (edge e)
{
  edge ae;
  basic_block *rem_bbs, *bord_bbs, *dom_bbs, from, bb;
  int i, nrem, n_bord_bbs, n_dom_bbs, nreml;
  sbitmap seen;
  bool irred_invalidated = false;
  struct loop **deleted_loop;

  if (!can_remove_branch_p (e))
    return false;

  /* Keep track of whether we need to update information about irreducible
     regions.  This is the case if the removed area is a part of the
     irreducible region, or if the set of basic blocks that belong to a loop
     that is inside an irreducible region is changed, or if such a loop is
     removed.  */
  if (e->flags & EDGE_IRREDUCIBLE_LOOP)
    irred_invalidated = true;

  /* We need to check whether basic blocks are dominated by the edge
     e, but we only have basic block dominators.  This is easy to
     fix -- when e->dest has exactly one predecessor, this corresponds
     to blocks dominated by e->dest, if not, split the edge.  */
  if (!single_pred_p (e->dest))
    e = single_pred_edge (split_edge (e));

  /* It may happen that by removing path we remove one or more loops
     we belong to.  In this case first unloop the loops, then proceed
     normally.   We may assume that e->dest is not a header of any loop,
     as it now has exactly one predecessor.  */
  while (e->src->loop_father->outer
	 && dominated_by_p (CDI_DOMINATORS,
			    e->src->loop_father->latch, e->dest))
    unloop (e->src->loop_father, &irred_invalidated);

  /* Identify the path.  */
  nrem = find_path (e, &rem_bbs);

  n_bord_bbs = 0;
  bord_bbs = XCNEWVEC (basic_block, n_basic_blocks);
  seen = sbitmap_alloc (last_basic_block);
  sbitmap_zero (seen);

  /* Find "border" hexes -- i.e. those with predecessor in removed path.  */
  for (i = 0; i < nrem; i++)
    SET_BIT (seen, rem_bbs[i]->index);
  for (i = 0; i < nrem; i++)
    {
      edge_iterator ei;
      bb = rem_bbs[i];
      FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)
	if (ae->dest != EXIT_BLOCK_PTR && !TEST_BIT (seen, ae->dest->index))
	  {
	    SET_BIT (seen, ae->dest->index);
	    bord_bbs[n_bord_bbs++] = ae->dest;
	  
	    if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
	      irred_invalidated = true;
	  }
    }

  /* Remove the path.  */
  from = e->src;
  remove_branch (e);
  dom_bbs = XCNEWVEC (basic_block, n_basic_blocks);

  /* Cancel loops contained in the path.  */
  deleted_loop = XNEWVEC (struct loop *, nrem);
  nreml = 0;
  for (i = 0; i < nrem; i++)
    if (rem_bbs[i]->loop_father->header == rem_bbs[i])
      deleted_loop[nreml++] = rem_bbs[i]->loop_father;

  remove_bbs (rem_bbs, nrem);
  free (rem_bbs);

  for (i = 0; i < nreml; i++)
    cancel_loop_tree (deleted_loop[i]);
  free (deleted_loop);

  /* Find blocks whose dominators may be affected.  */
  n_dom_bbs = 0;
  sbitmap_zero (seen);
  for (i = 0; i < n_bord_bbs; i++)
    {
      basic_block ldom;

      bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
      if (TEST_BIT (seen, bb->index))
	continue;
      SET_BIT (seen, bb->index);

      for (ldom = first_dom_son (CDI_DOMINATORS, bb);
	   ldom;
	   ldom = next_dom_son (CDI_DOMINATORS, ldom))
	if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
	  dom_bbs[n_dom_bbs++] = ldom;
    }

  free (seen);

  /* Recount dominators.  */
  iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs);
  free (dom_bbs);
  free (bord_bbs);

  /* Fix placements of basic blocks inside loops and the placement of
     loops in the loop tree.  */
  fix_bb_placements (from, &irred_invalidated);
  fix_loop_placements (from->loop_father, &irred_invalidated);

  if (irred_invalidated
      && (current_loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS) != 0)
    mark_irreducible_loops ();

  return true;
}

/* Creates place for a new LOOP in loops structure.  */

static void
place_new_loop (struct loop *loop)
{
  loop->num = number_of_loops ();
  VEC_safe_push (loop_p, heap, current_loops->larray, loop);
}

/* Given LOOP structure with filled header and latch, find the body of the
   corresponding loop and add it to loops tree.  Insert the LOOP as a son of
   outer.  */

void
add_loop (struct loop *loop, struct loop *outer)
{
  basic_block *bbs;
  int i, n;
  struct loop *subloop;

  /* Add it to loop structure.  */
  place_new_loop (loop);
  flow_loop_tree_node_add (outer, loop);

  /* Find its nodes.  */
  bbs = XNEWVEC (basic_block, n_basic_blocks);
  n = get_loop_body_with_size (loop, bbs, n_basic_blocks);

  for (i = 0; i < n; i++)
    {
      if (bbs[i]->loop_father == outer)
	{
	  remove_bb_from_loops (bbs[i]);
	  add_bb_to_loop (bbs[i], loop);
	  continue;
	}

      loop->num_nodes++;

      /* If we find a direct subloop of OUTER, move it to LOOP.  */
      subloop = bbs[i]->loop_father;
      if (subloop->outer == outer
	  && subloop->header == bbs[i])
	{
	  flow_loop_tree_node_remove (subloop);
	  flow_loop_tree_node_add (loop, subloop);
	}
    }

  free (bbs);
}

/* Multiply all frequencies in LOOP by NUM/DEN.  */
void
scale_loop_frequencies (struct loop *loop, int num, int den)
{
  basic_block *bbs;

  bbs = get_loop_body (loop);
  scale_bbs_frequencies_int (bbs, loop->num_nodes, num, den);
  free (bbs);
}

/* Make area between HEADER_EDGE and LATCH_EDGE a loop by connecting
   latch to header and update loop tree and dominators
   accordingly. Everything between them plus LATCH_EDGE destination must
   be dominated by HEADER_EDGE destination, and back-reachable from
   LATCH_EDGE source.  HEADER_EDGE is redirected to basic block SWITCH_BB,
   FALSE_EDGE of SWITCH_BB to original destination of HEADER_EDGE and
   TRUE_EDGE of SWITCH_BB to original destination of LATCH_EDGE.
   Returns the newly created loop.  Frequencies and counts in the new loop
   are scaled by FALSE_SCALE and in the old one by TRUE_SCALE.  */

struct loop *
loopify (edge latch_edge, edge header_edge,
	 basic_block switch_bb, edge true_edge, edge false_edge,
	 bool redirect_all_edges, unsigned true_scale, unsigned false_scale)
{
  basic_block succ_bb = latch_edge->dest;
  basic_block pred_bb = header_edge->src;
  basic_block *dom_bbs, *body;
  unsigned n_dom_bbs, i;
  sbitmap seen;
  struct loop *loop = alloc_loop ();
  struct loop *outer = succ_bb->loop_father->outer;
  int freq;
  gcov_type cnt;
  edge e;
  edge_iterator ei;

  loop->header = header_edge->dest;
  loop->latch = latch_edge->src;

  freq = EDGE_FREQUENCY (header_edge);
  cnt = header_edge->count;

  /* Redirect edges.  */
  loop_redirect_edge (latch_edge, loop->header);
  loop_redirect_edge (true_edge, succ_bb);

  /* During loop versioning, one of the switch_bb edge is already properly
     set. Do not redirect it again unless redirect_all_edges is true.  */
  if (redirect_all_edges)
    {
      loop_redirect_edge (header_edge, switch_bb);
      loop_redirect_edge (false_edge, loop->header);

      /* Update dominators.  */
      set_immediate_dominator (CDI_DOMINATORS, switch_bb, pred_bb);
      set_immediate_dominator (CDI_DOMINATORS, loop->header, switch_bb);
    }

  set_immediate_dominator (CDI_DOMINATORS, succ_bb, switch_bb);

  /* Compute new loop.  */
  add_loop (loop, outer);

  /* Add switch_bb to appropriate loop.  */
  if (switch_bb->loop_father)
    remove_bb_from_loops (switch_bb);
  add_bb_to_loop (switch_bb, outer);

  /* Fix frequencies.  */
  if (redirect_all_edges)
    {
      switch_bb->frequency = freq;
      switch_bb->count = cnt;
      FOR_EACH_EDGE (e, ei, switch_bb->succs)
	{
	  e->count = (switch_bb->count * e->probability) / REG_BR_PROB_BASE;
	}
    }
  scale_loop_frequencies (loop, false_scale, REG_BR_PROB_BASE);
  scale_loop_frequencies (succ_bb->loop_father, true_scale, REG_BR_PROB_BASE);

  /* Update dominators of blocks outside of LOOP.  */
  dom_bbs = XCNEWVEC (basic_block, n_basic_blocks);
  n_dom_bbs = 0;
  seen = sbitmap_alloc (last_basic_block);
  sbitmap_zero (seen);
  body = get_loop_body (loop);

  for (i = 0; i < loop->num_nodes; i++)
    SET_BIT (seen, body[i]->index);

  for (i = 0; i < loop->num_nodes; i++)
    {
      basic_block ldom;

      for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
	   ldom;
	   ldom = next_dom_son (CDI_DOMINATORS, ldom))
	if (!TEST_BIT (seen, ldom->index))
	  {
	    SET_BIT (seen, ldom->index);
	    dom_bbs[n_dom_bbs++] = ldom;
	  }
    }

  iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, n_dom_bbs);

  free (body);
  free (seen);
  free (dom_bbs);

  return loop;
}

/* Remove the latch edge of a LOOP and update loops to indicate that
   the LOOP was removed.  After this function, original loop latch will
   have no successor, which caller is expected to fix somehow.

   If this may cause the information about irreducible regions to become
   invalid, IRRED_INVALIDATED is set to true.  */

static void
unloop (struct loop *loop, bool *irred_invalidated)
{
  basic_block *body;
  struct loop *ploop;
  unsigned i, n;
  basic_block latch = loop->latch;
  bool dummy = false;

  if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
    *irred_invalidated = true;

  /* This is relatively straightforward.  The dominators are unchanged, as
     loop header dominates loop latch, so the only thing we have to care of
     is the placement of loops and basic blocks inside the loop tree.  We
     move them all to the loop->outer, and then let fix_bb_placements do
     its work.  */

  body = get_loop_body (loop);
  n = loop->num_nodes;
  for (i = 0; i < n; i++)
    if (body[i]->loop_father == loop)
      {
	remove_bb_from_loops (body[i]);
	add_bb_to_loop (body[i], loop->outer);
      }
  free(body);

  while (loop->inner)
    {
      ploop = loop->inner;
      flow_loop_tree_node_remove (ploop);
      flow_loop_tree_node_add (loop->outer, ploop);
    }

  /* Remove the loop and free its data.  */
  delete_loop (loop);

  remove_edge (single_succ_edge (latch));

  /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
     there is an irreducible region inside the cancelled loop, the flags will
     be still correct.  */
  fix_bb_placements (latch, &dummy);
}

/* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
   condition stated in description of fix_loop_placement holds for them.
   It is used in case when we removed some edges coming out of LOOP, which
   may cause the right placement of LOOP inside loop tree to change.
 
   IRRED_INVALIDATED is set to true if a change in the loop structures might
   invalidate the information about irreducible regions.  */

static void
fix_loop_placements (struct loop *loop, bool *irred_invalidated)
{
  struct loop *outer;

  while (loop->outer)
    {
      outer = loop->outer;
      if (!fix_loop_placement (loop))
	break;

      /* Changing the placement of a loop in the loop tree may alter the
	 validity of condition 2) of the description of fix_bb_placement
	 for its preheader, because the successor is the header and belongs
	 to the loop.  So call fix_bb_placements to fix up the placement
	 of the preheader and (possibly) of its predecessors.  */
      fix_bb_placements (loop_preheader_edge (loop)->src,
			 irred_invalidated);
      loop = outer;
    }
}

/* Copies copy of LOOP as subloop of TARGET loop, placing newly
   created loop into loops structure.  */
struct loop *
duplicate_loop (struct loop *loop, struct loop *target)
{
  struct loop *cloop;
  cloop = alloc_loop ();
  place_new_loop (cloop);

  /* Mark the new loop as copy of LOOP.  */
  loop->copy = cloop;

  /* Add it to target.  */
  flow_loop_tree_node_add (target, cloop);

  return cloop;
}

/* Copies structure of subloops of LOOP into TARGET loop, placing
   newly created loops into loop tree.  */
static void
duplicate_subloops (struct loop *loop, struct loop *target)
{
  struct loop *aloop, *cloop;

  for (aloop = loop->inner; aloop; aloop = aloop->next)
    {
      cloop = duplicate_loop (aloop, target);
      duplicate_subloops (aloop, cloop);
    }
}

/* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
   into TARGET loop, placing newly created loops into loop tree.  */
static void
copy_loops_to (struct loop **copied_loops, int n, struct loop *target)
{
  struct loop *aloop;
  int i;

  for (i = 0; i < n; i++)
    {
      aloop = duplicate_loop (copied_loops[i], target);
      duplicate_subloops (copied_loops[i], aloop);
    }
}

/* Redirects edge E to basic block DEST.  */
static void
loop_redirect_edge (edge e, basic_block dest)
{
  if (e->dest == dest)
    return;

  redirect_edge_and_branch_force (e, dest);
}

/* Check whether LOOP's body can be duplicated.  */
bool
can_duplicate_loop_p (struct loop *loop)
{
  int ret;
  basic_block *bbs = get_loop_body (loop);

  ret = can_copy_bbs_p (bbs, loop->num_nodes);
  free (bbs);

  return ret;
}

/* Sets probability and count of edge E to zero.  The probability and count
   is redistributed evenly to the remaining edges coming from E->src.  */

static void
set_zero_probability (edge e)
{
  basic_block bb = e->src;
  edge_iterator ei;
  edge ae, last = NULL;
  unsigned n = EDGE_COUNT (bb->succs);
  gcov_type cnt = e->count, cnt1;
  unsigned prob = e->probability, prob1;

  gcc_assert (n > 1);
  cnt1 = cnt / (n - 1);
  prob1 = prob / (n - 1);

  FOR_EACH_EDGE (ae, ei, bb->succs)
    {
      if (ae == e)
	continue;

      ae->probability += prob1;
      ae->count += cnt1;
      last = ae;
    }

  /* Move the rest to one of the edges.  */
  last->probability += prob % (n - 1);
  last->count += cnt % (n - 1);

  e->probability = 0;
  e->count = 0;
}

/* Duplicates body of LOOP to given edge E NDUPL times.  Takes care of updating
   loop structure and dominators.  E's destination must be LOOP header for
   this to work, i.e. it must be entry or latch edge of this loop; these are
   unique, as the loops must have preheaders for this function to work
   correctly (in case E is latch, the function unrolls the loop, if E is entry
   edge, it peels the loop).  Store edges created by copying ORIG edge from
   copies corresponding to set bits in WONT_EXIT bitmap (bit 0 corresponds to
   original LOOP body, the other copies are numbered in order given by control
   flow through them) into TO_REMOVE array.  Returns false if duplication is
   impossible.  */

bool
duplicate_loop_to_header_edge (struct loop *loop, edge e,
			       unsigned int ndupl, sbitmap wont_exit,
			       edge orig, VEC (edge, heap) **to_remove,
			       int flags)
{
  struct loop *target, *aloop;
  struct loop **orig_loops;
  unsigned n_orig_loops;
  basic_block header = loop->header, latch = loop->latch;
  basic_block *new_bbs, *bbs, *first_active;
  basic_block new_bb, bb, first_active_latch = NULL;
  edge ae, latch_edge;
  edge spec_edges[2], new_spec_edges[2];
#define SE_LATCH 0
#define SE_ORIG 1
  unsigned i, j, n;
  int is_latch = (latch == e->src);
  int scale_act = 0, *scale_step = NULL, scale_main = 0;
  int scale_after_exit = 0;
  int p, freq_in, freq_le, freq_out_orig;
  int prob_pass_thru, prob_pass_wont_exit, prob_pass_main;
  int add_irreducible_flag;
  basic_block place_after;
  bitmap bbs_to_scale = NULL;
  bitmap_iterator bi;

  gcc_assert (e->dest == loop->header);
  gcc_assert (ndupl > 0);

  if (orig)
    {
      /* Orig must be edge out of the loop.  */
      gcc_assert (flow_bb_inside_loop_p (loop, orig->src));
      gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest));
    }

  n = loop->num_nodes;
  bbs = get_loop_body_in_dom_order (loop);
  gcc_assert (bbs[0] == loop->header);
  gcc_assert (bbs[n  - 1] == loop->latch);

  /* Check whether duplication is possible.  */
  if (!can_copy_bbs_p (bbs, loop->num_nodes))
    {
      free (bbs);
      return false;
    }
  new_bbs = XNEWVEC (basic_block, loop->num_nodes);

  /* In case we are doing loop peeling and the loop is in the middle of
     irreducible region, the peeled copies will be inside it too.  */
  add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
  gcc_assert (!is_latch || !add_irreducible_flag);

  /* Find edge from latch.  */
  latch_edge = loop_latch_edge (loop);

  if (flags & DLTHE_FLAG_UPDATE_FREQ)
    {
      /* Calculate coefficients by that we have to scale frequencies
	 of duplicated loop bodies.  */
      freq_in = header->frequency;
      freq_le = EDGE_FREQUENCY (latch_edge);
      if (freq_in == 0)
	freq_in = 1;
      if (freq_in < freq_le)
	freq_in = freq_le;
      freq_out_orig = orig ? EDGE_FREQUENCY (orig) : freq_in - freq_le;
      if (freq_out_orig > freq_in - freq_le)
	freq_out_orig = freq_in - freq_le;
      prob_pass_thru = RDIV (REG_BR_PROB_BASE * freq_le, freq_in);
      prob_pass_wont_exit =
	      RDIV (REG_BR_PROB_BASE * (freq_le + freq_out_orig), freq_in);

      if (orig
	  && REG_BR_PROB_BASE - orig->probability != 0)
	{
	  /* The blocks that are dominated by a removed exit edge ORIG have
	     frequencies scaled by this.  */
	  scale_after_exit = RDIV (REG_BR_PROB_BASE * REG_BR_PROB_BASE,
				   REG_BR_PROB_BASE - orig->probability);
	  bbs_to_scale = BITMAP_ALLOC (NULL);
	  for (i = 0; i < n; i++)
	    {
	      if (bbs[i] != orig->src
		  && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
		bitmap_set_bit (bbs_to_scale, i);
	    }
	}

      scale_step = XNEWVEC (int, ndupl);

      for (i = 1; i <= ndupl; i++)
	scale_step[i - 1] = TEST_BIT (wont_exit, i)
				? prob_pass_wont_exit
				: prob_pass_thru;

      /* Complete peeling is special as the probability of exit in last
	 copy becomes 1.  */
      if (flags & DLTHE_FLAG_COMPLETTE_PEEL)
	{
	  int wanted_freq = EDGE_FREQUENCY (e);

	  if (wanted_freq > freq_in)
	    wanted_freq = freq_in;

	  gcc_assert (!is_latch);
	  /* First copy has frequency of incoming edge.  Each subsequent
	     frequency should be reduced by prob_pass_wont_exit.  Caller
	     should've managed the flags so all except for original loop
	     has won't exist set.  */
	  scale_act = RDIV (wanted_freq * REG_BR_PROB_BASE, freq_in);
	  /* Now simulate the duplication adjustments and compute header
	     frequency of the last copy.  */
	  for (i = 0; i < ndupl; i++)
	    wanted_freq = RDIV (wanted_freq * scale_step[i], REG_BR_PROB_BASE);
	  scale_main = RDIV (wanted_freq * REG_BR_PROB_BASE, freq_in);
	}
      else if (is_latch)
	{
	  prob_pass_main = TEST_BIT (wont_exit, 0)
				? prob_pass_wont_exit
				: prob_pass_thru;
	  p = prob_pass_main;
	  scale_main = REG_BR_PROB_BASE;
	  for (i = 0; i < ndupl; i++)
	    {
	      scale_main += p;
	      p = RDIV (p * scale_step[i], REG_BR_PROB_BASE);
	    }
	  scale_main = RDIV (REG_BR_PROB_BASE * REG_BR_PROB_BASE, scale_main);
	  scale_act = RDIV (scale_main * prob_pass_main, REG_BR_PROB_BASE);
	}
      else
	{
	  scale_main = REG_BR_PROB_BASE;
	  for (i = 0; i < ndupl; i++)
	    scale_main = RDIV (scale_main * scale_step[i], REG_BR_PROB_BASE);
	  scale_act = REG_BR_PROB_BASE - prob_pass_thru;
	}
      for (i = 0; i < ndupl; i++)
	gcc_assert (scale_step[i] >= 0 && scale_step[i] <= REG_BR_PROB_BASE);
      gcc_assert (scale_main >= 0 && scale_main <= REG_BR_PROB_BASE
		  && scale_act >= 0  && scale_act <= REG_BR_PROB_BASE);
    }

  /* Loop the new bbs will belong to.  */
  target = e->src->loop_father;

  /* Original loops.  */
  n_orig_loops = 0;
  for (aloop = loop->inner; aloop; aloop = aloop->next)
    n_orig_loops++;
  orig_loops = XCNEWVEC (struct loop *, n_orig_loops);
  for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
    orig_loops[i] = aloop;

  loop->copy = target;

  first_active = XNEWVEC (basic_block, n);
  if (is_latch)
    {
      memcpy (first_active, bbs, n * sizeof (basic_block));
      first_active_latch = latch;
    }

  spec_edges[SE_ORIG] = orig;
  spec_edges[SE_LATCH] = latch_edge;

  place_after = e->src;
  for (j = 0; j < ndupl; j++)
    {
      /* Copy loops.  */
      copy_loops_to (orig_loops, n_orig_loops, target);

      /* Copy bbs.  */
      copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
		place_after);
      place_after = new_spec_edges[SE_LATCH]->src;

      if (flags & DLTHE_RECORD_COPY_NUMBER)
	for (i = 0; i < n; i++)
	  {
	    gcc_assert (!new_bbs[i]->aux);
	    new_bbs[i]->aux = (void *)(size_t)(j + 1);
	  }

      /* Note whether the blocks and edges belong to an irreducible loop.  */
      if (add_irreducible_flag)
	{
	  for (i = 0; i < n; i++)
	    new_bbs[i]->flags |= BB_DUPLICATED;
	  for (i = 0; i < n; i++)
	    {
	      edge_iterator ei;
	      new_bb = new_bbs[i];
	      if (new_bb->loop_father == target)
		new_bb->flags |= BB_IRREDUCIBLE_LOOP;

	      FOR_EACH_EDGE (ae, ei, new_bb->succs)
		if ((ae->dest->flags & BB_DUPLICATED)
		    && (ae->src->loop_father == target
			|| ae->dest->loop_father == target))
		  ae->flags |= EDGE_IRREDUCIBLE_LOOP;
	    }
	  for (i = 0; i < n; i++)
	    new_bbs[i]->flags &= ~BB_DUPLICATED;
	}

      /* Redirect the special edges.  */
      if (is_latch)
	{
	  redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
	  redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
					  loop->header);
	  set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
	  latch = loop->latch = new_bbs[n - 1];
	  e = latch_edge = new_spec_edges[SE_LATCH];
	}
      else
	{
	  redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
					  loop->header);
	  redirect_edge_and_branch_force (e, new_bbs[0]);
	  set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
	  e = new_spec_edges[SE_LATCH];
	}

      /* Record exit edge in this copy.  */
      if (orig && TEST_BIT (wont_exit, j + 1))
	{
	  if (to_remove)
	    VEC_safe_push (edge, heap, *to_remove, new_spec_edges[SE_ORIG]);
	  set_zero_probability (new_spec_edges[SE_ORIG]);

	  /* Scale the frequencies of the blocks dominated by the exit.  */
	  if (bbs_to_scale)
	    {
	      EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
		{
		  scale_bbs_frequencies_int (new_bbs + i, 1, scale_after_exit,
					     REG_BR_PROB_BASE);
		}
	    }
	}

      /* Record the first copy in the control flow order if it is not
	 the original loop (i.e. in case of peeling).  */
      if (!first_active_latch)
	{
	  memcpy (first_active, new_bbs, n * sizeof (basic_block));
	  first_active_latch = new_bbs[n - 1];
	}

      /* Set counts and frequencies.  */
      if (flags & DLTHE_FLAG_UPDATE_FREQ)
	{
	  scale_bbs_frequencies_int (new_bbs, n, scale_act, REG_BR_PROB_BASE);
	  scale_act = RDIV (scale_act * scale_step[j], REG_BR_PROB_BASE);
	}
    }
  free (new_bbs);
  free (orig_loops);

  /* Record the exit edge in the original loop body, and update the frequencies.  */
  if (orig && TEST_BIT (wont_exit, 0))
    {
      if (to_remove)
	VEC_safe_push (edge, heap, *to_remove, orig);
      set_zero_probability (orig);

      /* Scale the frequencies of the blocks dominated by the exit.  */
      if (bbs_to_scale)
	{
	  EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)
	    {
	      scale_bbs_frequencies_int (bbs + i, 1, scale_after_exit,
					 REG_BR_PROB_BASE);
	    }
	}
    }

  /* Update the original loop.  */
  if (!is_latch)
    set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
  if (flags & DLTHE_FLAG_UPDATE_FREQ)
    {
      scale_bbs_frequencies_int (bbs, n, scale_main, REG_BR_PROB_BASE);
      free (scale_step);
    }

  /* Update dominators of outer blocks if affected.  */
  for (i = 0; i < n; i++)
    {
      basic_block dominated, dom_bb, *dom_bbs;
      int n_dom_bbs,j;

      bb = bbs[i];
      bb->aux = 0;

      n_dom_bbs = get_dominated_by (CDI_DOMINATORS, bb, &dom_bbs);
      for (j = 0; j < n_dom_bbs; j++)
	{
	  dominated = dom_bbs[j];
	  if (flow_bb_inside_loop_p (loop, dominated))
	    continue;
	  dom_bb = nearest_common_dominator (
			CDI_DOMINATORS, first_active[i], first_active_latch);
	  set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
	}
      free (dom_bbs);
    }
  free (first_active);

  free (bbs);
  BITMAP_FREE (bbs_to_scale);

  return true;
}

/* A callback for make_forwarder block, to redirect all edges except for
   MFB_KJ_EDGE to the entry part.  E is the edge for that we should decide
   whether to redirect it.  */

static edge mfb_kj_edge;
static bool
mfb_keep_just (edge e)
{
  return e != mfb_kj_edge;
}

/* Creates a pre-header for a LOOP.  Returns newly created block.  Unless
   CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
   entry; otherwise we also force preheader block to have only one successor.
   The function also updates dominators.  */

static basic_block
create_preheader (struct loop *loop, int flags)
{
  edge e, fallthru;
  basic_block dummy;
  int nentry = 0;
  bool irred = false;
  bool latch_edge_was_fallthru;
  edge one_succ_pred = NULL, single_entry = NULL;
  edge_iterator ei;

  FOR_EACH_EDGE (e, ei, loop->header->preds)
    {
      if (e->src == loop->latch)
	continue;
      irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
      nentry++;
      single_entry = e;
      if (single_succ_p (e->src))
	one_succ_pred = e;
    }
  gcc_assert (nentry);
  if (nentry == 1)
    {
      if (/* We do not allow entry block to be the loop preheader, since we
	     cannot emit code there.  */
	  single_entry->src != ENTRY_BLOCK_PTR
	  /* If we want simple preheaders, also force the preheader to have
	     just a single successor.  */
	  && !((flags & CP_SIMPLE_PREHEADERS)
	       && !single_succ_p (single_entry->src)))
	return NULL;
    }

  mfb_kj_edge = loop_latch_edge (loop);
  latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
  fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULL);
  dummy = fallthru->src;
  loop->header = fallthru->dest;

  /* Try to be clever in placing the newly created preheader.  The idea is to
     avoid breaking any "fallthruness" relationship between blocks.

     The preheader was created just before the header and all incoming edges
     to the header were redirected to the preheader, except the latch edge.
     So the only problematic case is when this latch edge was a fallthru
     edge: it is not anymore after the preheader creation so we have broken
     the fallthruness.  We're therefore going to look for a better place.  */
  if (latch_edge_was_fallthru)
    {
      if (one_succ_pred)
	e = one_succ_pred;
      else
	e = EDGE_PRED (dummy, 0);

      move_block_after (dummy, e->src);
    }

  if (irred)
    {
      dummy->flags |= BB_IRREDUCIBLE_LOOP;
      single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
    }

  if (dump_file)
    fprintf (dump_file, "Created preheader block for loop %i\n",
	     loop->num);

  return dummy;
}

/* Create preheaders for each loop; for meaning of FLAGS see create_preheader.  */

void
create_preheaders (int flags)
{
  loop_iterator li;
  struct loop *loop;

  if (!current_loops)
    return;

  FOR_EACH_LOOP (li, loop, 0)
    create_preheader (loop, flags);
  current_loops->state |= LOOPS_HAVE_PREHEADERS;
}

/* Forces all loop latches to have only single successor.  */

void
force_single_succ_latches (void)
{
  loop_iterator li;
  struct loop *loop;
  edge e;

  FOR_EACH_LOOP (li, loop, 0)
    {
      if (loop->latch != loop->header && single_succ_p (loop->latch))
	continue;

      e = find_edge (loop->latch, loop->header);

      split_edge (e);
    }
  current_loops->state |= LOOPS_HAVE_SIMPLE_LATCHES;
}

/* This function is called from loop_version.  It splits the entry edge
   of the loop we want to version, adds the versioning condition, and
   adjust the edges to the two versions of the loop appropriately.
   e is an incoming edge. Returns the basic block containing the
   condition.

   --- edge e ---- > [second_head]

   Split it and insert new conditional expression and adjust edges.

    --- edge e ---> [cond expr] ---> [first_head]
			|
			+---------> [second_head]

  THEN_PROB is the probability of then branch of the condition.  */

static basic_block
lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
			   edge e, void *cond_expr, unsigned then_prob)
{
  basic_block new_head = NULL;
  edge e1;

  gcc_assert (e->dest == second_head);

  /* Split edge 'e'. This will create a new basic block, where we can
     insert conditional expr.  */
  new_head = split_edge (e);

  lv_add_condition_to_bb (first_head, second_head, new_head,
			  cond_expr);

  /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there.  */
  e = single_succ_edge (new_head);
  e1 = make_edge (new_head, first_head,
		  current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
  e1->probability = then_prob;
  e->probability = REG_BR_PROB_BASE - then_prob;
  e1->count = RDIV (e->count * e1->probability, REG_BR_PROB_BASE);
  e->count = RDIV (e->count * e->probability, REG_BR_PROB_BASE);

  set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
  set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);

  /* Adjust loop header phi nodes.  */
  lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);

  return new_head;
}

/* Main entry point for Loop Versioning transformation.

   This transformation given a condition and a loop, creates
   -if (condition) { loop_copy1 } else { loop_copy2 },
   where loop_copy1 is the loop transformed in one way, and loop_copy2
   is the loop transformed in another way (or unchanged). 'condition'
   may be a run time test for things that were not resolved by static
   analysis (overlapping ranges (anti-aliasing), alignment, etc.).

   THEN_PROB is the probability of the then edge of the if.  THEN_SCALE
   is the ratio by that the frequencies in the original loop should
   be scaled.  ELSE_SCALE is the ratio by that the frequencies in the
   new loop should be scaled.
   
   If PLACE_AFTER is true, we place the new loop after LOOP in the
   instruction stream, otherwise it is placed before LOOP.  */

struct loop *
loop_version (struct loop *loop,
	      void *cond_expr, basic_block *condition_bb,
	      unsigned then_prob, unsigned then_scale, unsigned else_scale,
	      bool place_after)
{
  basic_block first_head, second_head;
  edge entry, latch_edge, true_edge, false_edge;
  int irred_flag;
  struct loop *nloop;
  basic_block cond_bb;

  /* CHECKME: Loop versioning does not handle nested loop at this point.  */
  if (loop->inner)
    return NULL;

  /* Record entry and latch edges for the loop */
  entry = loop_preheader_edge (loop);
  irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
  entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;

  /* Note down head of loop as first_head.  */
  first_head = entry->dest;

  /* Duplicate loop.  */
  if (!cfg_hook_duplicate_loop_to_header_edge (loop, entry, 1,
					       NULL, NULL, NULL, 0))
    return NULL;

  /* After duplication entry edge now points to new loop head block.
     Note down new head as second_head.  */
  second_head = entry->dest;

  /* Split loop entry edge and insert new block with cond expr.  */
  cond_bb =  lv_adjust_loop_entry_edge (first_head, second_head,
					entry, cond_expr, then_prob);
  if (condition_bb)
    *condition_bb = cond_bb;

  if (!cond_bb)
    {
      entry->flags |= irred_flag;
      return NULL;
    }

  latch_edge = single_succ_edge (get_bb_copy (loop->latch));

  extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
  nloop = loopify (latch_edge,
		   single_pred_edge (get_bb_copy (loop->header)),
		   cond_bb, true_edge, false_edge,
		   false /* Do not redirect all edges.  */,
		   then_scale, else_scale);

  /* loopify redirected latch_edge. Update its PENDING_STMTS.  */
  lv_flush_pending_stmts (latch_edge);

  /* loopify redirected condition_bb's succ edge. Update its PENDING_STMTS.  */
  extract_cond_bb_edges (cond_bb, &true_edge, &false_edge);
  lv_flush_pending_stmts (false_edge);
  /* Adjust irreducible flag.  */
  if (irred_flag)
    {
      cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
      loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
      loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
      single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
    }

  if (place_after)
    {
      basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
      unsigned i;

      after = loop->latch;

      for (i = 0; i < nloop->num_nodes; i++)
	{
	  move_block_after (bbs[i], after);
	  after = bbs[i];
	}
      free (bbs);
    }

  /* At this point condition_bb is loop predheader with two successors,
     first_head and second_head.   Make sure that loop predheader has only
     one successor.  */
  split_edge (loop_preheader_edge (loop));
  split_edge (loop_preheader_edge (nloop));

  return nloop;
}

/* The structure of loops might have changed.  Some loops might get removed
   (and their headers and latches were set to NULL), loop exists might get
   removed (thus the loop nesting may be wrong), and some blocks and edges
   were changed (so the information about bb --> loop mapping does not have
   to be correct).  But still for the remaining loops the header dominates
   the latch, and loops did not get new subloobs (new loops might possibly
   get created, but we are not interested in them).  Fix up the mess.

   If CHANGED_BBS is not NULL, basic blocks whose loop has changed are
   marked in it.  */

void
fix_loop_structure (bitmap changed_bbs)
{
  basic_block bb;
  struct loop *loop, *ploop;
  loop_iterator li;
  bool record_exits = false;
  struct loop **superloop = XNEWVEC (struct loop *, number_of_loops ());

  gcc_assert (current_loops->state & LOOPS_HAVE_SIMPLE_LATCHES);

  /* Remove the old bb -> loop mapping.  Remember the depth of the blocks in
     the loop hierarchy, so that we can recognize blocks whose loop nesting
     relationship has changed.  */
  FOR_EACH_BB (bb)
    {
      if (changed_bbs)
	bb->aux = (void *) (size_t) bb->loop_father->depth;
      bb->loop_father = current_loops->tree_root;
    }

  if (current_loops->state & LOOPS_HAVE_RECORDED_EXITS)
    {
      release_recorded_exits ();
      record_exits = true;
    }

  /* Remove the dead loops from structures.  We start from the innermost
     loops, so that when we remove the loops, we know that the loops inside
     are preserved, and do not waste time relinking loops that will be
     removed later.  */
  FOR_EACH_LOOP (li, loop, LI_FROM_INNERMOST)
    {
      if (loop->header)
	continue;

      while (loop->inner)
	{
	  ploop = loop->inner;
	  flow_loop_tree_node_remove (ploop);
	  flow_loop_tree_node_add (loop->outer, ploop);
	}

      /* Remove the loop and free its data.  */
      delete_loop (loop);
    }

  /* Rescan the bodies of loops, starting from the outermost ones.  We assume
     that no optimization interchanges the order of the loops, i.e., it cannot
     happen that L1 was superloop of L2 before and it is subloop of L2 now
     (without explicitly updating loop information).  At the same time, we also
     determine the new loop structure.  */
  current_loops->tree_root->num_nodes = n_basic_blocks;
  FOR_EACH_LOOP (li, loop, 0)
    {
      superloop[loop->num] = loop->header->loop_father;
      loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
    }

  /* Now fix the loop nesting.  */
  FOR_EACH_LOOP (li, loop, 0)
    {
      ploop = superloop[loop->num];
      if (ploop != loop->outer)
	{
	  flow_loop_tree_node_remove (loop);
	  flow_loop_tree_node_add (ploop, loop);
	}
    }
  free (superloop);

  /* Mark the blocks whose loop has changed.  */
  if (changed_bbs)
    {
      FOR_EACH_BB (bb)
	{
	  if ((void *) (size_t) bb->loop_father->depth != bb->aux)
	    bitmap_set_bit (changed_bbs, bb->index);

    	  bb->aux = NULL;
	}
    }

  if (current_loops->state & LOOPS_HAVE_PREHEADERS)
    create_preheaders (CP_SIMPLE_PREHEADERS);

  if (current_loops->state & LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
    mark_irreducible_loops ();

  if (record_exits)
    record_loop_exits ();

#ifdef ENABLE_CHECKING
  verify_loop_structure ();
#endif
}