1 /* Natural loop discovery code for GNU compiler.
2    Copyright (C) 2000-2014 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 #include "config.h"
21 #include "system.h"
22 #include "coretypes.h"
23 #include "tm.h"
24 #include "rtl.h"
25 #include "function.h"
26 #include "basic-block.h"
27 #include "cfgloop.h"
28 #include "diagnostic-core.h"
29 #include "flags.h"
30 #include "tree.h"
31 #include "pointer-set.h"
32 #include "tree-ssa-alias.h"
33 #include "internal-fn.h"
34 #include "gimple-expr.h"
35 #include "is-a.h"
36 #include "gimple.h"
37 #include "gimple-iterator.h"
38 #include "gimple-ssa.h"
39 #include "dumpfile.h"
40 
41 static void flow_loops_cfg_dump (FILE *);
42 
43 /* Dump loop related CFG information.  */
44 
45 static void
flow_loops_cfg_dump(FILE * file)46 flow_loops_cfg_dump (FILE *file)
47 {
48   basic_block bb;
49 
50   if (!file)
51     return;
52 
53   FOR_EACH_BB_FN (bb, cfun)
54     {
55       edge succ;
56       edge_iterator ei;
57 
58       fprintf (file, ";; %d succs { ", bb->index);
59       FOR_EACH_EDGE (succ, ei, bb->succs)
60 	fprintf (file, "%d ", succ->dest->index);
61       fprintf (file, "}\n");
62     }
63 }
64 
65 /* Return nonzero if the nodes of LOOP are a subset of OUTER.  */
66 
67 bool
flow_loop_nested_p(const struct loop * outer,const struct loop * loop)68 flow_loop_nested_p (const struct loop *outer, const struct loop *loop)
69 {
70   unsigned odepth = loop_depth (outer);
71 
72   return (loop_depth (loop) > odepth
73 	  && (*loop->superloops)[odepth] == outer);
74 }
75 
76 /* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
77    loops within LOOP.  */
78 
79 struct loop *
superloop_at_depth(struct loop * loop,unsigned depth)80 superloop_at_depth (struct loop *loop, unsigned depth)
81 {
82   unsigned ldepth = loop_depth (loop);
83 
84   gcc_assert (depth <= ldepth);
85 
86   if (depth == ldepth)
87     return loop;
88 
89   return (*loop->superloops)[depth];
90 }
91 
92 /* Returns the list of the latch edges of LOOP.  */
93 
94 static vec<edge>
get_loop_latch_edges(const struct loop * loop)95 get_loop_latch_edges (const struct loop *loop)
96 {
97   edge_iterator ei;
98   edge e;
99   vec<edge> ret = vNULL;
100 
101   FOR_EACH_EDGE (e, ei, loop->header->preds)
102     {
103       if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
104 	ret.safe_push (e);
105     }
106 
107   return ret;
108 }
109 
110 /* Dump the loop information specified by LOOP to the stream FILE
111    using auxiliary dump callback function LOOP_DUMP_AUX if non null.  */
112 
113 void
flow_loop_dump(const struct loop * loop,FILE * file,void (* loop_dump_aux)(const struct loop *,FILE *,int),int verbose)114 flow_loop_dump (const struct loop *loop, FILE *file,
115 		void (*loop_dump_aux) (const struct loop *, FILE *, int),
116 		int verbose)
117 {
118   basic_block *bbs;
119   unsigned i;
120   vec<edge> latches;
121   edge e;
122 
123   if (! loop || ! loop->header)
124     return;
125 
126   fprintf (file, ";;\n;; Loop %d\n", loop->num);
127 
128   fprintf (file, ";;  header %d, ", loop->header->index);
129   if (loop->latch)
130     fprintf (file, "latch %d\n", loop->latch->index);
131   else
132     {
133       fprintf (file, "multiple latches:");
134       latches = get_loop_latch_edges (loop);
135       FOR_EACH_VEC_ELT (latches, i, e)
136 	fprintf (file, " %d", e->src->index);
137       latches.release ();
138       fprintf (file, "\n");
139     }
140 
141   fprintf (file, ";;  depth %d, outer %ld\n",
142 	   loop_depth (loop), (long) (loop_outer (loop)
143 				      ? loop_outer (loop)->num : -1));
144 
145   fprintf (file, ";;  nodes:");
146   bbs = get_loop_body (loop);
147   for (i = 0; i < loop->num_nodes; i++)
148     fprintf (file, " %d", bbs[i]->index);
149   free (bbs);
150   fprintf (file, "\n");
151 
152   if (loop_dump_aux)
153     loop_dump_aux (loop, file, verbose);
154 }
155 
156 /* Dump the loop information about loops to the stream FILE,
157    using auxiliary dump callback function LOOP_DUMP_AUX if non null.  */
158 
159 void
flow_loops_dump(FILE * file,void (* loop_dump_aux)(const struct loop *,FILE *,int),int verbose)160 flow_loops_dump (FILE *file, void (*loop_dump_aux) (const struct loop *, FILE *, int), int verbose)
161 {
162   struct loop *loop;
163 
164   if (!current_loops || ! file)
165     return;
166 
167   fprintf (file, ";; %d loops found\n", number_of_loops (cfun));
168 
169   FOR_EACH_LOOP (loop, LI_INCLUDE_ROOT)
170     {
171       flow_loop_dump (loop, file, loop_dump_aux, verbose);
172     }
173 
174   if (verbose)
175     flow_loops_cfg_dump (file);
176 }
177 
178 /* Free data allocated for LOOP.  */
179 
180 void
flow_loop_free(struct loop * loop)181 flow_loop_free (struct loop *loop)
182 {
183   struct loop_exit *exit, *next;
184 
185   vec_free (loop->superloops);
186 
187   /* Break the list of the loop exit records.  They will be freed when the
188      corresponding edge is rescanned or removed, and this avoids
189      accessing the (already released) head of the list stored in the
190      loop structure.  */
191   for (exit = loop->exits->next; exit != loop->exits; exit = next)
192     {
193       next = exit->next;
194       exit->next = exit;
195       exit->prev = exit;
196     }
197 
198   ggc_free (loop->exits);
199   ggc_free (loop);
200 }
201 
202 /* Free all the memory allocated for LOOPS.  */
203 
204 void
flow_loops_free(struct loops * loops)205 flow_loops_free (struct loops *loops)
206 {
207   if (loops->larray)
208     {
209       unsigned i;
210       loop_p loop;
211 
212       /* Free the loop descriptors.  */
213       FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)
214 	{
215 	  if (!loop)
216 	    continue;
217 
218 	  flow_loop_free (loop);
219 	}
220 
221       vec_free (loops->larray);
222     }
223 }
224 
225 /* Find the nodes contained within the LOOP with header HEADER.
226    Return the number of nodes within the loop.  */
227 
228 int
flow_loop_nodes_find(basic_block header,struct loop * loop)229 flow_loop_nodes_find (basic_block header, struct loop *loop)
230 {
231   vec<basic_block> stack = vNULL;
232   int num_nodes = 1;
233   edge latch;
234   edge_iterator latch_ei;
235 
236   header->loop_father = loop;
237 
238   FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)
239     {
240       if (latch->src->loop_father == loop
241 	  || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
242 	continue;
243 
244       num_nodes++;
245       stack.safe_push (latch->src);
246       latch->src->loop_father = loop;
247 
248       while (!stack.is_empty ())
249 	{
250 	  basic_block node;
251 	  edge e;
252 	  edge_iterator ei;
253 
254 	  node = stack.pop ();
255 
256 	  FOR_EACH_EDGE (e, ei, node->preds)
257 	    {
258 	      basic_block ancestor = e->src;
259 
260 	      if (ancestor->loop_father != loop)
261 		{
262 		  ancestor->loop_father = loop;
263 		  num_nodes++;
264 		  stack.safe_push (ancestor);
265 		}
266 	    }
267 	}
268     }
269   stack.release ();
270 
271   return num_nodes;
272 }
273 
274 /* Records the vector of superloops of the loop LOOP, whose immediate
275    superloop is FATHER.  */
276 
277 static void
establish_preds(struct loop * loop,struct loop * father)278 establish_preds (struct loop *loop, struct loop *father)
279 {
280   loop_p ploop;
281   unsigned depth = loop_depth (father) + 1;
282   unsigned i;
283 
284   loop->superloops = 0;
285   vec_alloc (loop->superloops, depth);
286   FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)
287     loop->superloops->quick_push (ploop);
288   loop->superloops->quick_push (father);
289 
290   for (ploop = loop->inner; ploop; ploop = ploop->next)
291     establish_preds (ploop, loop);
292 }
293 
294 /* Add LOOP to the loop hierarchy tree where FATHER is father of the
295    added loop.  If LOOP has some children, take care of that their
296    pred field will be initialized correctly.  */
297 
298 void
flow_loop_tree_node_add(struct loop * father,struct loop * loop)299 flow_loop_tree_node_add (struct loop *father, struct loop *loop)
300 {
301   loop->next = father->inner;
302   father->inner = loop;
303 
304   establish_preds (loop, father);
305 }
306 
307 /* Remove LOOP from the loop hierarchy tree.  */
308 
309 void
flow_loop_tree_node_remove(struct loop * loop)310 flow_loop_tree_node_remove (struct loop *loop)
311 {
312   struct loop *prev, *father;
313 
314   father = loop_outer (loop);
315 
316   /* Remove loop from the list of sons.  */
317   if (father->inner == loop)
318     father->inner = loop->next;
319   else
320     {
321       for (prev = father->inner; prev->next != loop; prev = prev->next)
322 	continue;
323       prev->next = loop->next;
324     }
325 
326   loop->superloops = NULL;
327 }
328 
329 /* Allocates and returns new loop structure.  */
330 
331 struct loop *
alloc_loop(void)332 alloc_loop (void)
333 {
334   struct loop *loop = ggc_alloc_cleared_loop ();
335 
336   loop->exits = ggc_alloc_cleared_loop_exit ();
337   loop->exits->next = loop->exits->prev = loop->exits;
338   loop->can_be_parallel = false;
339 
340   return loop;
341 }
342 
343 /* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
344    (including the root of the loop tree).  */
345 
346 void
init_loops_structure(struct function * fn,struct loops * loops,unsigned num_loops)347 init_loops_structure (struct function *fn,
348 		      struct loops *loops, unsigned num_loops)
349 {
350   struct loop *root;
351 
352   memset (loops, 0, sizeof *loops);
353   vec_alloc (loops->larray, num_loops);
354 
355   /* Dummy loop containing whole function.  */
356   root = alloc_loop ();
357   root->num_nodes = n_basic_blocks_for_fn (fn);
358   root->latch = EXIT_BLOCK_PTR_FOR_FN (fn);
359   root->header = ENTRY_BLOCK_PTR_FOR_FN (fn);
360   ENTRY_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
361   EXIT_BLOCK_PTR_FOR_FN (fn)->loop_father = root;
362 
363   loops->larray->quick_push (root);
364   loops->tree_root = root;
365 }
366 
367 /* Returns whether HEADER is a loop header.  */
368 
369 bool
bb_loop_header_p(basic_block header)370 bb_loop_header_p (basic_block header)
371 {
372   edge_iterator ei;
373   edge e;
374 
375   /* If we have an abnormal predecessor, do not consider the
376      loop (not worth the problems).  */
377   if (bb_has_abnormal_pred (header))
378     return false;
379 
380   /* Look for back edges where a predecessor is dominated
381      by this block.  A natural loop has a single entry
382      node (header) that dominates all the nodes in the
383      loop.  It also has single back edge to the header
384      from a latch node.  */
385   FOR_EACH_EDGE (e, ei, header->preds)
386     {
387       basic_block latch = e->src;
388       if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)
389 	  && dominated_by_p (CDI_DOMINATORS, latch, header))
390 	return true;
391     }
392 
393   return false;
394 }
395 
396 /* Find all the natural loops in the function and save in LOOPS structure and
397    recalculate loop_father information in basic block structures.
398    If LOOPS is non-NULL then the loop structures for already recorded loops
399    will be re-used and their number will not change.  We assume that no
400    stale loops exist in LOOPS.
401    When LOOPS is NULL it is allocated and re-built from scratch.
402    Return the built LOOPS structure.  */
403 
404 struct loops *
flow_loops_find(struct loops * loops)405 flow_loops_find (struct loops *loops)
406 {
407   bool from_scratch = (loops == NULL);
408   int *rc_order;
409   int b;
410   unsigned i;
411 
412   /* Ensure that the dominators are computed.  */
413   calculate_dominance_info (CDI_DOMINATORS);
414 
415   if (!loops)
416     {
417       loops = ggc_alloc_cleared_loops ();
418       init_loops_structure (cfun, loops, 1);
419     }
420 
421   /* Ensure that loop exits were released.  */
422   gcc_assert (loops->exits == NULL);
423 
424   /* Taking care of this degenerate case makes the rest of
425      this code simpler.  */
426   if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
427     return loops;
428 
429   /* The root loop node contains all basic-blocks.  */
430   loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun);
431 
432   /* Compute depth first search order of the CFG so that outer
433      natural loops will be found before inner natural loops.  */
434   rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun));
435   pre_and_rev_post_order_compute (NULL, rc_order, false);
436 
437   /* Gather all loop headers in reverse completion order and allocate
438      loop structures for loops that are not already present.  */
439   auto_vec<loop_p> larray (loops->larray->length ());
440   for (b = 0; b < n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; b++)
441     {
442       basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b]);
443       if (bb_loop_header_p (header))
444 	{
445 	  struct loop *loop;
446 
447 	  /* The current active loop tree has valid loop-fathers for
448 	     header blocks.  */
449 	  if (!from_scratch
450 	      && header->loop_father->header == header)
451 	    {
452 	      loop = header->loop_father;
453 	      /* If we found an existing loop remove it from the
454 		 loop tree.  It is going to be inserted again
455 		 below.  */
456 	      flow_loop_tree_node_remove (loop);
457 	    }
458 	  else
459 	    {
460 	      /* Otherwise allocate a new loop structure for the loop.  */
461 	      loop = alloc_loop ();
462 	      /* ???  We could re-use unused loop slots here.  */
463 	      loop->num = loops->larray->length ();
464 	      vec_safe_push (loops->larray, loop);
465 	      loop->header = header;
466 
467 	      if (!from_scratch
468 		  && dump_file && (dump_flags & TDF_DETAILS))
469 		fprintf (dump_file, "flow_loops_find: discovered new "
470 			 "loop %d with header %d\n",
471 			 loop->num, header->index);
472 	    }
473 	  /* Reset latch, we recompute it below.  */
474 	  loop->latch = NULL;
475 	  larray.safe_push (loop);
476 	}
477 
478       /* Make blocks part of the loop root node at start.  */
479       header->loop_father = loops->tree_root;
480     }
481 
482   free (rc_order);
483 
484   /* Now iterate over the loops found, insert them into the loop tree
485      and assign basic-block ownership.  */
486   for (i = 0; i < larray.length (); ++i)
487     {
488       struct loop *loop = larray[i];
489       basic_block header = loop->header;
490       edge_iterator ei;
491       edge e;
492 
493       flow_loop_tree_node_add (header->loop_father, loop);
494       loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
495 
496       /* Look for the latch for this header block, if it has just a
497 	 single one.  */
498       FOR_EACH_EDGE (e, ei, header->preds)
499 	{
500 	  basic_block latch = e->src;
501 
502 	  if (flow_bb_inside_loop_p (loop, latch))
503 	    {
504 	      if (loop->latch != NULL)
505 		{
506 		  /* More than one latch edge.  */
507 		  loop->latch = NULL;
508 		  break;
509 		}
510 	      loop->latch = latch;
511 	    }
512 	}
513     }
514 
515   return loops;
516 }
517 
518 /* Ratio of frequencies of edges so that one of more latch edges is
519    considered to belong to inner loop with same header.  */
520 #define HEAVY_EDGE_RATIO 8
521 
522 /* Minimum number of samples for that we apply
523    find_subloop_latch_edge_by_profile heuristics.  */
524 #define HEAVY_EDGE_MIN_SAMPLES 10
525 
526 /* If the profile info is available, finds an edge in LATCHES that much more
527    frequent than the remaining edges.  Returns such an edge, or NULL if we do
528    not find one.
529 
530    We do not use guessed profile here, only the measured one.  The guessed
531    profile is usually too flat and unreliable for this (and it is mostly based
532    on the loop structure of the program, so it does not make much sense to
533    derive the loop structure from it).  */
534 
535 static edge
find_subloop_latch_edge_by_profile(vec<edge> latches)536 find_subloop_latch_edge_by_profile (vec<edge> latches)
537 {
538   unsigned i;
539   edge e, me = NULL;
540   gcov_type mcount = 0, tcount = 0;
541 
542   FOR_EACH_VEC_ELT (latches, i, e)
543     {
544       if (e->count > mcount)
545 	{
546 	  me = e;
547 	  mcount = e->count;
548 	}
549       tcount += e->count;
550     }
551 
552   if (tcount < HEAVY_EDGE_MIN_SAMPLES
553       || (tcount - mcount) * HEAVY_EDGE_RATIO > tcount)
554     return NULL;
555 
556   if (dump_file)
557     fprintf (dump_file,
558 	     "Found latch edge %d -> %d using profile information.\n",
559 	     me->src->index, me->dest->index);
560   return me;
561 }
562 
563 /* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
564    on the structure of induction variables.  Returns this edge, or NULL if we
565    do not find any.
566 
567    We are quite conservative, and look just for an obvious simple innermost
568    loop (which is the case where we would lose the most performance by not
569    disambiguating the loop).  More precisely, we look for the following
570    situation: The source of the chosen latch edge dominates sources of all
571    the other latch edges.  Additionally, the header does not contain a phi node
572    such that the argument from the chosen edge is equal to the argument from
573    another edge.  */
574 
575 static edge
find_subloop_latch_edge_by_ivs(struct loop * loop ATTRIBUTE_UNUSED,vec<edge> latches)576 find_subloop_latch_edge_by_ivs (struct loop *loop ATTRIBUTE_UNUSED, vec<edge> latches)
577 {
578   edge e, latch = latches[0];
579   unsigned i;
580   gimple phi;
581   gimple_stmt_iterator psi;
582   tree lop;
583   basic_block bb;
584 
585   /* Find the candidate for the latch edge.  */
586   for (i = 1; latches.iterate (i, &e); i++)
587     if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
588       latch = e;
589 
590   /* Verify that it dominates all the latch edges.  */
591   FOR_EACH_VEC_ELT (latches, i, e)
592     if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
593       return NULL;
594 
595   /* Check for a phi node that would deny that this is a latch edge of
596      a subloop.  */
597   for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
598     {
599       phi = gsi_stmt (psi);
600       lop = PHI_ARG_DEF_FROM_EDGE (phi, latch);
601 
602       /* Ignore the values that are not changed inside the subloop.  */
603       if (TREE_CODE (lop) != SSA_NAME
604 	  || SSA_NAME_DEF_STMT (lop) == phi)
605 	continue;
606       bb = gimple_bb (SSA_NAME_DEF_STMT (lop));
607       if (!bb || !flow_bb_inside_loop_p (loop, bb))
608 	continue;
609 
610       FOR_EACH_VEC_ELT (latches, i, e)
611 	if (e != latch
612 	    && PHI_ARG_DEF_FROM_EDGE (phi, e) == lop)
613 	  return NULL;
614     }
615 
616   if (dump_file)
617     fprintf (dump_file,
618 	     "Found latch edge %d -> %d using iv structure.\n",
619 	     latch->src->index, latch->dest->index);
620   return latch;
621 }
622 
623 /* If we can determine that one of the several latch edges of LOOP behaves
624    as a latch edge of a separate subloop, returns this edge.  Otherwise
625    returns NULL.  */
626 
627 static edge
find_subloop_latch_edge(struct loop * loop)628 find_subloop_latch_edge (struct loop *loop)
629 {
630   vec<edge> latches = get_loop_latch_edges (loop);
631   edge latch = NULL;
632 
633   if (latches.length () > 1)
634     {
635       latch = find_subloop_latch_edge_by_profile (latches);
636 
637       if (!latch
638 	  /* We consider ivs to guess the latch edge only in SSA.  Perhaps we
639 	     should use cfghook for this, but it is hard to imagine it would
640 	     be useful elsewhere.  */
641 	  && current_ir_type () == IR_GIMPLE)
642 	latch = find_subloop_latch_edge_by_ivs (loop, latches);
643     }
644 
645   latches.release ();
646   return latch;
647 }
648 
649 /* Callback for make_forwarder_block.  Returns true if the edge E is marked
650    in the set MFB_REIS_SET.  */
651 
652 static struct pointer_set_t *mfb_reis_set;
653 static bool
mfb_redirect_edges_in_set(edge e)654 mfb_redirect_edges_in_set (edge e)
655 {
656   return pointer_set_contains (mfb_reis_set, e);
657 }
658 
659 /* Creates a subloop of LOOP with latch edge LATCH.  */
660 
661 static void
form_subloop(struct loop * loop,edge latch)662 form_subloop (struct loop *loop, edge latch)
663 {
664   edge_iterator ei;
665   edge e, new_entry;
666   struct loop *new_loop;
667 
668   mfb_reis_set = pointer_set_create ();
669   FOR_EACH_EDGE (e, ei, loop->header->preds)
670     {
671       if (e != latch)
672 	pointer_set_insert (mfb_reis_set, e);
673     }
674   new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
675 				    NULL);
676   pointer_set_destroy (mfb_reis_set);
677 
678   loop->header = new_entry->src;
679 
680   /* Find the blocks and subloops that belong to the new loop, and add it to
681      the appropriate place in the loop tree.  */
682   new_loop = alloc_loop ();
683   new_loop->header = new_entry->dest;
684   new_loop->latch = latch->src;
685   add_loop (new_loop, loop);
686 }
687 
688 /* Make all the latch edges of LOOP to go to a single forwarder block --
689    a new latch of LOOP.  */
690 
691 static void
merge_latch_edges(struct loop * loop)692 merge_latch_edges (struct loop *loop)
693 {
694   vec<edge> latches = get_loop_latch_edges (loop);
695   edge latch, e;
696   unsigned i;
697 
698   gcc_assert (latches.length () > 0);
699 
700   if (latches.length () == 1)
701     loop->latch = latches[0]->src;
702   else
703     {
704       if (dump_file)
705 	fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
706 
707       mfb_reis_set = pointer_set_create ();
708       FOR_EACH_VEC_ELT (latches, i, e)
709 	pointer_set_insert (mfb_reis_set, e);
710       latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
711 				    NULL);
712       pointer_set_destroy (mfb_reis_set);
713 
714       loop->header = latch->dest;
715       loop->latch = latch->src;
716     }
717 
718   latches.release ();
719 }
720 
721 /* LOOP may have several latch edges.  Transform it into (possibly several)
722    loops with single latch edge.  */
723 
724 static void
disambiguate_multiple_latches(struct loop * loop)725 disambiguate_multiple_latches (struct loop *loop)
726 {
727   edge e;
728 
729   /* We eliminate the multiple latches by splitting the header to the forwarder
730      block F and the rest R, and redirecting the edges.  There are two cases:
731 
732      1) If there is a latch edge E that corresponds to a subloop (we guess
733         that based on profile -- if it is taken much more often than the
734 	remaining edges; and on trees, using the information about induction
735 	variables of the loops), we redirect E to R, all the remaining edges to
736 	F, then rescan the loops and try again for the outer loop.
737      2) If there is no such edge, we redirect all latch edges to F, and the
738         entry edges to R, thus making F the single latch of the loop.  */
739 
740   if (dump_file)
741     fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
742 	     loop->num);
743 
744   /* During latch merging, we may need to redirect the entry edges to a new
745      block.  This would cause problems if the entry edge was the one from the
746      entry block.  To avoid having to handle this case specially, split
747      such entry edge.  */
748   e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), loop->header);
749   if (e)
750     split_edge (e);
751 
752   while (1)
753     {
754       e = find_subloop_latch_edge (loop);
755       if (!e)
756 	break;
757 
758       form_subloop (loop, e);
759     }
760 
761   merge_latch_edges (loop);
762 }
763 
764 /* Split loops with multiple latch edges.  */
765 
766 void
disambiguate_loops_with_multiple_latches(void)767 disambiguate_loops_with_multiple_latches (void)
768 {
769   struct loop *loop;
770 
771   FOR_EACH_LOOP (loop, 0)
772     {
773       if (!loop->latch)
774 	disambiguate_multiple_latches (loop);
775     }
776 }
777 
778 /* Return nonzero if basic block BB belongs to LOOP.  */
779 bool
flow_bb_inside_loop_p(const struct loop * loop,const_basic_block bb)780 flow_bb_inside_loop_p (const struct loop *loop, const_basic_block bb)
781 {
782   struct loop *source_loop;
783 
784   if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
785       || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
786     return 0;
787 
788   source_loop = bb->loop_father;
789   return loop == source_loop || flow_loop_nested_p (loop, source_loop);
790 }
791 
792 /* Enumeration predicate for get_loop_body_with_size.  */
793 static bool
glb_enum_p(const_basic_block bb,const void * glb_loop)794 glb_enum_p (const_basic_block bb, const void *glb_loop)
795 {
796   const struct loop *const loop = (const struct loop *) glb_loop;
797   return (bb != loop->header
798 	  && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
799 }
800 
801 /* Gets basic blocks of a LOOP.  Header is the 0-th block, rest is in dfs
802    order against direction of edges from latch.  Specially, if
803    header != latch, latch is the 1-st block.  LOOP cannot be the fake
804    loop tree root, and its size must be at most MAX_SIZE.  The blocks
805    in the LOOP body are stored to BODY, and the size of the LOOP is
806    returned.  */
807 
808 unsigned
get_loop_body_with_size(const struct loop * loop,basic_block * body,unsigned max_size)809 get_loop_body_with_size (const struct loop *loop, basic_block *body,
810 			 unsigned max_size)
811 {
812   return dfs_enumerate_from (loop->header, 1, glb_enum_p,
813 			     body, max_size, loop);
814 }
815 
816 /* Gets basic blocks of a LOOP.  Header is the 0-th block, rest is in dfs
817    order against direction of edges from latch.  Specially, if
818    header != latch, latch is the 1-st block.  */
819 
820 basic_block *
get_loop_body(const struct loop * loop)821 get_loop_body (const struct loop *loop)
822 {
823   basic_block *body, bb;
824   unsigned tv = 0;
825 
826   gcc_assert (loop->num_nodes);
827 
828   body = XNEWVEC (basic_block, loop->num_nodes);
829 
830   if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun))
831     {
832       /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
833 	 special-case the fake loop that contains the whole function.  */
834       gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun));
835       body[tv++] = loop->header;
836       body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun);
837       FOR_EACH_BB_FN (bb, cfun)
838 	body[tv++] = bb;
839     }
840   else
841     tv = get_loop_body_with_size (loop, body, loop->num_nodes);
842 
843   gcc_assert (tv == loop->num_nodes);
844   return body;
845 }
846 
847 /* Fills dominance descendants inside LOOP of the basic block BB into
848    array TOVISIT from index *TV.  */
849 
850 static void
fill_sons_in_loop(const struct loop * loop,basic_block bb,basic_block * tovisit,int * tv)851 fill_sons_in_loop (const struct loop *loop, basic_block bb,
852 		   basic_block *tovisit, int *tv)
853 {
854   basic_block son, postpone = NULL;
855 
856   tovisit[(*tv)++] = bb;
857   for (son = first_dom_son (CDI_DOMINATORS, bb);
858        son;
859        son = next_dom_son (CDI_DOMINATORS, son))
860     {
861       if (!flow_bb_inside_loop_p (loop, son))
862 	continue;
863 
864       if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
865 	{
866 	  postpone = son;
867 	  continue;
868 	}
869       fill_sons_in_loop (loop, son, tovisit, tv);
870     }
871 
872   if (postpone)
873     fill_sons_in_loop (loop, postpone, tovisit, tv);
874 }
875 
876 /* Gets body of a LOOP (that must be different from the outermost loop)
877    sorted by dominance relation.  Additionally, if a basic block s dominates
878    the latch, then only blocks dominated by s are be after it.  */
879 
880 basic_block *
get_loop_body_in_dom_order(const struct loop * loop)881 get_loop_body_in_dom_order (const struct loop *loop)
882 {
883   basic_block *tovisit;
884   int tv;
885 
886   gcc_assert (loop->num_nodes);
887 
888   tovisit = XNEWVEC (basic_block, loop->num_nodes);
889 
890   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
891 
892   tv = 0;
893   fill_sons_in_loop (loop, loop->header, tovisit, &tv);
894 
895   gcc_assert (tv == (int) loop->num_nodes);
896 
897   return tovisit;
898 }
899 
900 /* Gets body of a LOOP sorted via provided BB_COMPARATOR.  */
901 
902 basic_block *
get_loop_body_in_custom_order(const struct loop * loop,int (* bb_comparator)(const void *,const void *))903 get_loop_body_in_custom_order (const struct loop *loop,
904 			       int (*bb_comparator) (const void *, const void *))
905 {
906   basic_block *bbs = get_loop_body (loop);
907 
908   qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator);
909 
910   return bbs;
911 }
912 
913 /* Get body of a LOOP in breadth first sort order.  */
914 
915 basic_block *
get_loop_body_in_bfs_order(const struct loop * loop)916 get_loop_body_in_bfs_order (const struct loop *loop)
917 {
918   basic_block *blocks;
919   basic_block bb;
920   bitmap visited;
921   unsigned int i = 0;
922   unsigned int vc = 1;
923 
924   gcc_assert (loop->num_nodes);
925   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
926 
927   blocks = XNEWVEC (basic_block, loop->num_nodes);
928   visited = BITMAP_ALLOC (NULL);
929 
930   bb = loop->header;
931   while (i < loop->num_nodes)
932     {
933       edge e;
934       edge_iterator ei;
935 
936       if (bitmap_set_bit (visited, bb->index))
937 	/* This basic block is now visited */
938 	blocks[i++] = bb;
939 
940       FOR_EACH_EDGE (e, ei, bb->succs)
941 	{
942 	  if (flow_bb_inside_loop_p (loop, e->dest))
943 	    {
944 	      if (bitmap_set_bit (visited, e->dest->index))
945 		blocks[i++] = e->dest;
946 	    }
947 	}
948 
949       gcc_assert (i >= vc);
950 
951       bb = blocks[vc++];
952     }
953 
954   BITMAP_FREE (visited);
955   return blocks;
956 }
957 
958 /* Hash function for struct loop_exit.  */
959 
960 static hashval_t
loop_exit_hash(const void * ex)961 loop_exit_hash (const void *ex)
962 {
963   const struct loop_exit *const exit = (const struct loop_exit *) ex;
964 
965   return htab_hash_pointer (exit->e);
966 }
967 
968 /* Equality function for struct loop_exit.  Compares with edge.  */
969 
970 static int
loop_exit_eq(const void * ex,const void * e)971 loop_exit_eq (const void *ex, const void *e)
972 {
973   const struct loop_exit *const exit = (const struct loop_exit *) ex;
974 
975   return exit->e == e;
976 }
977 
978 /* Frees the list of loop exit descriptions EX.  */
979 
980 static void
loop_exit_free(void * ex)981 loop_exit_free (void *ex)
982 {
983   struct loop_exit *exit = (struct loop_exit *) ex, *next;
984 
985   for (; exit; exit = next)
986     {
987       next = exit->next_e;
988 
989       exit->next->prev = exit->prev;
990       exit->prev->next = exit->next;
991 
992       ggc_free (exit);
993     }
994 }
995 
996 /* Returns the list of records for E as an exit of a loop.  */
997 
998 static struct loop_exit *
get_exit_descriptions(edge e)999 get_exit_descriptions (edge e)
1000 {
1001   return (struct loop_exit *) htab_find_with_hash (current_loops->exits, e,
1002 			                           htab_hash_pointer (e));
1003 }
1004 
1005 /* Updates the lists of loop exits in that E appears.
1006    If REMOVED is true, E is being removed, and we
1007    just remove it from the lists of exits.
1008    If NEW_EDGE is true and E is not a loop exit, we
1009    do not try to remove it from loop exit lists.  */
1010 
1011 void
rescan_loop_exit(edge e,bool new_edge,bool removed)1012 rescan_loop_exit (edge e, bool new_edge, bool removed)
1013 {
1014   void **slot;
1015   struct loop_exit *exits = NULL, *exit;
1016   struct loop *aloop, *cloop;
1017 
1018   if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1019     return;
1020 
1021   if (!removed
1022       && e->src->loop_father != NULL
1023       && e->dest->loop_father != NULL
1024       && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1025     {
1026       cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1027       for (aloop = e->src->loop_father;
1028 	   aloop != cloop;
1029 	   aloop = loop_outer (aloop))
1030 	{
1031 	  exit = ggc_alloc_loop_exit ();
1032 	  exit->e = e;
1033 
1034 	  exit->next = aloop->exits->next;
1035 	  exit->prev = aloop->exits;
1036 	  exit->next->prev = exit;
1037 	  exit->prev->next = exit;
1038 
1039 	  exit->next_e = exits;
1040 	  exits = exit;
1041 	}
1042     }
1043 
1044   if (!exits && new_edge)
1045     return;
1046 
1047   slot = htab_find_slot_with_hash (current_loops->exits, e,
1048 				   htab_hash_pointer (e),
1049 				   exits ? INSERT : NO_INSERT);
1050   if (!slot)
1051     return;
1052 
1053   if (exits)
1054     {
1055       if (*slot)
1056 	loop_exit_free (*slot);
1057       *slot = exits;
1058     }
1059   else
1060     htab_clear_slot (current_loops->exits, slot);
1061 }
1062 
1063 /* For each loop, record list of exit edges, and start maintaining these
1064    lists.  */
1065 
1066 void
record_loop_exits(void)1067 record_loop_exits (void)
1068 {
1069   basic_block bb;
1070   edge_iterator ei;
1071   edge e;
1072 
1073   if (!current_loops)
1074     return;
1075 
1076   if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1077     return;
1078   loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1079 
1080   gcc_assert (current_loops->exits == NULL);
1081   current_loops->exits = htab_create_ggc (2 * number_of_loops (cfun),
1082 					  loop_exit_hash, loop_exit_eq,
1083 					  loop_exit_free);
1084 
1085   FOR_EACH_BB_FN (bb, cfun)
1086     {
1087       FOR_EACH_EDGE (e, ei, bb->succs)
1088 	{
1089 	  rescan_loop_exit (e, true, false);
1090 	}
1091     }
1092 }
1093 
1094 /* Dumps information about the exit in *SLOT to FILE.
1095    Callback for htab_traverse.  */
1096 
1097 static int
dump_recorded_exit(void ** slot,void * file)1098 dump_recorded_exit (void **slot, void *file)
1099 {
1100   struct loop_exit *exit = (struct loop_exit *) *slot;
1101   unsigned n = 0;
1102   edge e = exit->e;
1103 
1104   for (; exit != NULL; exit = exit->next_e)
1105     n++;
1106 
1107   fprintf ((FILE*) file, "Edge %d->%d exits %u loops\n",
1108 	   e->src->index, e->dest->index, n);
1109 
1110   return 1;
1111 }
1112 
1113 /* Dumps the recorded exits of loops to FILE.  */
1114 
1115 extern void dump_recorded_exits (FILE *);
1116 void
dump_recorded_exits(FILE * file)1117 dump_recorded_exits (FILE *file)
1118 {
1119   if (!current_loops->exits)
1120     return;
1121   htab_traverse (current_loops->exits, dump_recorded_exit, file);
1122 }
1123 
1124 /* Releases lists of loop exits.  */
1125 
1126 void
release_recorded_exits(void)1127 release_recorded_exits (void)
1128 {
1129   gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS));
1130   htab_delete (current_loops->exits);
1131   current_loops->exits = NULL;
1132   loops_state_clear (LOOPS_HAVE_RECORDED_EXITS);
1133 }
1134 
1135 /* Returns the list of the exit edges of a LOOP.  */
1136 
1137 vec<edge>
get_loop_exit_edges(const struct loop * loop)1138 get_loop_exit_edges (const struct loop *loop)
1139 {
1140   vec<edge> edges = vNULL;
1141   edge e;
1142   unsigned i;
1143   basic_block *body;
1144   edge_iterator ei;
1145   struct loop_exit *exit;
1146 
1147   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1148 
1149   /* If we maintain the lists of exits, use them.  Otherwise we must
1150      scan the body of the loop.  */
1151   if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1152     {
1153       for (exit = loop->exits->next; exit->e; exit = exit->next)
1154 	edges.safe_push (exit->e);
1155     }
1156   else
1157     {
1158       body = get_loop_body (loop);
1159       for (i = 0; i < loop->num_nodes; i++)
1160 	FOR_EACH_EDGE (e, ei, body[i]->succs)
1161 	  {
1162 	    if (!flow_bb_inside_loop_p (loop, e->dest))
1163 	      edges.safe_push (e);
1164 	  }
1165       free (body);
1166     }
1167 
1168   return edges;
1169 }
1170 
1171 /* Counts the number of conditional branches inside LOOP.  */
1172 
1173 unsigned
num_loop_branches(const struct loop * loop)1174 num_loop_branches (const struct loop *loop)
1175 {
1176   unsigned i, n;
1177   basic_block * body;
1178 
1179   gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1180 
1181   body = get_loop_body (loop);
1182   n = 0;
1183   for (i = 0; i < loop->num_nodes; i++)
1184     if (EDGE_COUNT (body[i]->succs) >= 2)
1185       n++;
1186   free (body);
1187 
1188   return n;
1189 }
1190 
1191 /* Adds basic block BB to LOOP.  */
1192 void
add_bb_to_loop(basic_block bb,struct loop * loop)1193 add_bb_to_loop (basic_block bb, struct loop *loop)
1194 {
1195   unsigned i;
1196   loop_p ploop;
1197   edge_iterator ei;
1198   edge e;
1199 
1200   gcc_assert (bb->loop_father == NULL);
1201   bb->loop_father = loop;
1202   loop->num_nodes++;
1203   FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1204     ploop->num_nodes++;
1205 
1206   FOR_EACH_EDGE (e, ei, bb->succs)
1207     {
1208       rescan_loop_exit (e, true, false);
1209     }
1210   FOR_EACH_EDGE (e, ei, bb->preds)
1211     {
1212       rescan_loop_exit (e, true, false);
1213     }
1214 }
1215 
1216 /* Remove basic block BB from loops.  */
1217 void
remove_bb_from_loops(basic_block bb)1218 remove_bb_from_loops (basic_block bb)
1219 {
1220   unsigned i;
1221   struct loop *loop = bb->loop_father;
1222   loop_p ploop;
1223   edge_iterator ei;
1224   edge e;
1225 
1226   gcc_assert (loop != NULL);
1227   loop->num_nodes--;
1228   FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)
1229     ploop->num_nodes--;
1230   bb->loop_father = NULL;
1231 
1232   FOR_EACH_EDGE (e, ei, bb->succs)
1233     {
1234       rescan_loop_exit (e, false, true);
1235     }
1236   FOR_EACH_EDGE (e, ei, bb->preds)
1237     {
1238       rescan_loop_exit (e, false, true);
1239     }
1240 }
1241 
1242 /* Finds nearest common ancestor in loop tree for given loops.  */
1243 struct loop *
find_common_loop(struct loop * loop_s,struct loop * loop_d)1244 find_common_loop (struct loop *loop_s, struct loop *loop_d)
1245 {
1246   unsigned sdepth, ddepth;
1247 
1248   if (!loop_s) return loop_d;
1249   if (!loop_d) return loop_s;
1250 
1251   sdepth = loop_depth (loop_s);
1252   ddepth = loop_depth (loop_d);
1253 
1254   if (sdepth < ddepth)
1255     loop_d = (*loop_d->superloops)[sdepth];
1256   else if (sdepth > ddepth)
1257     loop_s = (*loop_s->superloops)[ddepth];
1258 
1259   while (loop_s != loop_d)
1260     {
1261       loop_s = loop_outer (loop_s);
1262       loop_d = loop_outer (loop_d);
1263     }
1264   return loop_s;
1265 }
1266 
1267 /* Removes LOOP from structures and frees its data.  */
1268 
1269 void
delete_loop(struct loop * loop)1270 delete_loop (struct loop *loop)
1271 {
1272   /* Remove the loop from structure.  */
1273   flow_loop_tree_node_remove (loop);
1274 
1275   /* Remove loop from loops array.  */
1276   (*current_loops->larray)[loop->num] = NULL;
1277 
1278   /* Free loop data.  */
1279   flow_loop_free (loop);
1280 }
1281 
1282 /* Cancels the LOOP; it must be innermost one.  */
1283 
1284 static void
cancel_loop(struct loop * loop)1285 cancel_loop (struct loop *loop)
1286 {
1287   basic_block *bbs;
1288   unsigned i;
1289   struct loop *outer = loop_outer (loop);
1290 
1291   gcc_assert (!loop->inner);
1292 
1293   /* Move blocks up one level (they should be removed as soon as possible).  */
1294   bbs = get_loop_body (loop);
1295   for (i = 0; i < loop->num_nodes; i++)
1296     bbs[i]->loop_father = outer;
1297 
1298   free (bbs);
1299   delete_loop (loop);
1300 }
1301 
1302 /* Cancels LOOP and all its subloops.  */
1303 void
cancel_loop_tree(struct loop * loop)1304 cancel_loop_tree (struct loop *loop)
1305 {
1306   while (loop->inner)
1307     cancel_loop_tree (loop->inner);
1308   cancel_loop (loop);
1309 }
1310 
1311 /* Checks that information about loops is correct
1312      -- sizes of loops are all right
1313      -- results of get_loop_body really belong to the loop
1314      -- loop header have just single entry edge and single latch edge
1315      -- loop latches have only single successor that is header of their loop
1316      -- irreducible loops are correctly marked
1317      -- the cached loop depth and loop father of each bb is correct
1318   */
1319 DEBUG_FUNCTION void
verify_loop_structure(void)1320 verify_loop_structure (void)
1321 {
1322   unsigned *sizes, i, j;
1323   sbitmap irreds;
1324   basic_block bb, *bbs;
1325   struct loop *loop;
1326   int err = 0;
1327   edge e;
1328   unsigned num = number_of_loops (cfun);
1329   struct loop_exit *exit, *mexit;
1330   bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1331   sbitmap visited;
1332 
1333   if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1334     {
1335       error ("loop verification on loop tree that needs fixup");
1336       err = 1;
1337     }
1338 
1339   /* We need up-to-date dominators, compute or verify them.  */
1340   if (!dom_available)
1341     calculate_dominance_info (CDI_DOMINATORS);
1342   else
1343     verify_dominators (CDI_DOMINATORS);
1344 
1345   /* Check the headers.  */
1346   FOR_EACH_BB_FN (bb, cfun)
1347     if (bb_loop_header_p (bb))
1348       {
1349 	if (bb->loop_father->header == NULL)
1350 	  {
1351 	    error ("loop with header %d marked for removal", bb->index);
1352 	    err = 1;
1353 	  }
1354 	else if (bb->loop_father->header != bb)
1355 	  {
1356 	    error ("loop with header %d not in loop tree", bb->index);
1357 	    err = 1;
1358 	  }
1359       }
1360     else if (bb->loop_father->header == bb)
1361       {
1362 	error ("non-loop with header %d not marked for removal", bb->index);
1363 	err = 1;
1364       }
1365 
1366   /* Check the recorded loop father and sizes of loops.  */
1367   visited = sbitmap_alloc (last_basic_block_for_fn (cfun));
1368   bitmap_clear (visited);
1369   bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1370   FOR_EACH_LOOP (loop, LI_FROM_INNERMOST)
1371     {
1372       unsigned n;
1373 
1374       if (loop->header == NULL)
1375 	{
1376 	  error ("removed loop %d in loop tree", loop->num);
1377 	  err = 1;
1378 	  continue;
1379 	}
1380 
1381       n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun));
1382       if (loop->num_nodes != n)
1383 	{
1384 	  error ("size of loop %d should be %d, not %d",
1385 		 loop->num, n, loop->num_nodes);
1386 	  err = 1;
1387 	}
1388 
1389       for (j = 0; j < n; j++)
1390 	{
1391 	  bb = bbs[j];
1392 
1393 	  if (!flow_bb_inside_loop_p (loop, bb))
1394 	    {
1395 	      error ("bb %d does not belong to loop %d",
1396 		     bb->index, loop->num);
1397 	      err = 1;
1398 	    }
1399 
1400 	  /* Ignore this block if it is in an inner loop.  */
1401 	  if (bitmap_bit_p (visited, bb->index))
1402 	    continue;
1403 	  bitmap_set_bit (visited, bb->index);
1404 
1405 	  if (bb->loop_father != loop)
1406 	    {
1407 	      error ("bb %d has father loop %d, should be loop %d",
1408 		     bb->index, bb->loop_father->num, loop->num);
1409 	      err = 1;
1410 	    }
1411 	}
1412     }
1413   free (bbs);
1414   sbitmap_free (visited);
1415 
1416   /* Check headers and latches.  */
1417   FOR_EACH_LOOP (loop, 0)
1418     {
1419       i = loop->num;
1420       if (loop->header == NULL)
1421 	continue;
1422       if (!bb_loop_header_p (loop->header))
1423 	{
1424 	  error ("loop %d%'s header is not a loop header", i);
1425 	  err = 1;
1426 	}
1427       if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1428 	  && EDGE_COUNT (loop->header->preds) != 2)
1429 	{
1430 	  error ("loop %d%'s header does not have exactly 2 entries", i);
1431 	  err = 1;
1432 	}
1433       if (loop->latch)
1434 	{
1435 	  if (!find_edge (loop->latch, loop->header))
1436 	    {
1437 	      error ("loop %d%'s latch does not have an edge to its header", i);
1438 	      err = 1;
1439 	    }
1440 	  if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1441 	    {
1442 	      error ("loop %d%'s latch is not dominated by its header", i);
1443 	      err = 1;
1444 	    }
1445 	}
1446       if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1447 	{
1448 	  if (!single_succ_p (loop->latch))
1449 	    {
1450 	      error ("loop %d%'s latch does not have exactly 1 successor", i);
1451 	      err = 1;
1452 	    }
1453 	  if (single_succ (loop->latch) != loop->header)
1454 	    {
1455 	      error ("loop %d%'s latch does not have header as successor", i);
1456 	      err = 1;
1457 	    }
1458 	  if (loop->latch->loop_father != loop)
1459 	    {
1460 	      error ("loop %d%'s latch does not belong directly to it", i);
1461 	      err = 1;
1462 	    }
1463 	}
1464       if (loop->header->loop_father != loop)
1465 	{
1466 	  error ("loop %d%'s header does not belong directly to it", i);
1467 	  err = 1;
1468 	}
1469       if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
1470 	  && (loop_latch_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP))
1471 	{
1472 	  error ("loop %d%'s latch is marked as part of irreducible region", i);
1473 	  err = 1;
1474 	}
1475     }
1476 
1477   /* Check irreducible loops.  */
1478   if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1479     {
1480       /* Record old info.  */
1481       irreds = sbitmap_alloc (last_basic_block_for_fn (cfun));
1482       FOR_EACH_BB_FN (bb, cfun)
1483 	{
1484 	  edge_iterator ei;
1485 	  if (bb->flags & BB_IRREDUCIBLE_LOOP)
1486 	    bitmap_set_bit (irreds, bb->index);
1487 	  else
1488 	    bitmap_clear_bit (irreds, bb->index);
1489 	  FOR_EACH_EDGE (e, ei, bb->succs)
1490 	    if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1491 	      e->flags |= EDGE_ALL_FLAGS + 1;
1492 	}
1493 
1494       /* Recount it.  */
1495       mark_irreducible_loops ();
1496 
1497       /* Compare.  */
1498       FOR_EACH_BB_FN (bb, cfun)
1499 	{
1500 	  edge_iterator ei;
1501 
1502 	  if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1503 	      && !bitmap_bit_p (irreds, bb->index))
1504 	    {
1505 	      error ("basic block %d should be marked irreducible", bb->index);
1506 	      err = 1;
1507 	    }
1508 	  else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1509 	      && bitmap_bit_p (irreds, bb->index))
1510 	    {
1511 	      error ("basic block %d should not be marked irreducible", bb->index);
1512 	      err = 1;
1513 	    }
1514 	  FOR_EACH_EDGE (e, ei, bb->succs)
1515 	    {
1516 	      if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1517 		  && !(e->flags & (EDGE_ALL_FLAGS + 1)))
1518 		{
1519 		  error ("edge from %d to %d should be marked irreducible",
1520 			 e->src->index, e->dest->index);
1521 		  err = 1;
1522 		}
1523 	      else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1524 		       && (e->flags & (EDGE_ALL_FLAGS + 1)))
1525 		{
1526 		  error ("edge from %d to %d should not be marked irreducible",
1527 			 e->src->index, e->dest->index);
1528 		  err = 1;
1529 		}
1530 	      e->flags &= ~(EDGE_ALL_FLAGS + 1);
1531 	    }
1532 	}
1533       free (irreds);
1534     }
1535 
1536   /* Check the recorded loop exits.  */
1537   FOR_EACH_LOOP (loop, 0)
1538     {
1539       if (!loop->exits || loop->exits->e != NULL)
1540 	{
1541 	  error ("corrupted head of the exits list of loop %d",
1542 		 loop->num);
1543 	  err = 1;
1544 	}
1545       else
1546 	{
1547 	  /* Check that the list forms a cycle, and all elements except
1548 	     for the head are nonnull.  */
1549 	  for (mexit = loop->exits, exit = mexit->next, i = 0;
1550 	       exit->e && exit != mexit;
1551 	       exit = exit->next)
1552 	    {
1553 	      if (i++ & 1)
1554 		mexit = mexit->next;
1555 	    }
1556 
1557 	  if (exit != loop->exits)
1558 	    {
1559 	      error ("corrupted exits list of loop %d", loop->num);
1560 	      err = 1;
1561 	    }
1562 	}
1563 
1564       if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1565 	{
1566 	  if (loop->exits->next != loop->exits)
1567 	    {
1568 	      error ("nonempty exits list of loop %d, but exits are not recorded",
1569 		     loop->num);
1570 	      err = 1;
1571 	    }
1572 	}
1573     }
1574 
1575   if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1576     {
1577       unsigned n_exits = 0, eloops;
1578 
1579       sizes = XCNEWVEC (unsigned, num);
1580       memset (sizes, 0, sizeof (unsigned) * num);
1581       FOR_EACH_BB_FN (bb, cfun)
1582 	{
1583 	  edge_iterator ei;
1584 	  if (bb->loop_father == current_loops->tree_root)
1585 	    continue;
1586 	  FOR_EACH_EDGE (e, ei, bb->succs)
1587 	    {
1588 	      if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1589 		continue;
1590 
1591 	      n_exits++;
1592 	      exit = get_exit_descriptions (e);
1593 	      if (!exit)
1594 		{
1595 		  error ("exit %d->%d not recorded",
1596 			 e->src->index, e->dest->index);
1597 		  err = 1;
1598 		}
1599 	      eloops = 0;
1600 	      for (; exit; exit = exit->next_e)
1601 		eloops++;
1602 
1603 	      for (loop = bb->loop_father;
1604 		   loop != e->dest->loop_father
1605 		   /* When a loop exit is also an entry edge which
1606 		      can happen when avoiding CFG manipulations
1607 		      then the last loop exited is the outer loop
1608 		      of the loop entered.  */
1609 		   && loop != loop_outer (e->dest->loop_father);
1610 		   loop = loop_outer (loop))
1611 		{
1612 		  eloops--;
1613 		  sizes[loop->num]++;
1614 		}
1615 
1616 	      if (eloops != 0)
1617 		{
1618 		  error ("wrong list of exited loops for edge  %d->%d",
1619 			 e->src->index, e->dest->index);
1620 		  err = 1;
1621 		}
1622 	    }
1623 	}
1624 
1625       if (n_exits != htab_elements (current_loops->exits))
1626 	{
1627 	  error ("too many loop exits recorded");
1628 	  err = 1;
1629 	}
1630 
1631       FOR_EACH_LOOP (loop, 0)
1632 	{
1633 	  eloops = 0;
1634 	  for (exit = loop->exits->next; exit->e; exit = exit->next)
1635 	    eloops++;
1636 	  if (eloops != sizes[loop->num])
1637 	    {
1638 	      error ("%d exits recorded for loop %d (having %d exits)",
1639 		     eloops, loop->num, sizes[loop->num]);
1640 	      err = 1;
1641 	    }
1642 	}
1643 
1644       free (sizes);
1645     }
1646 
1647   gcc_assert (!err);
1648 
1649   if (!dom_available)
1650     free_dominance_info (CDI_DOMINATORS);
1651 }
1652 
1653 /* Returns latch edge of LOOP.  */
1654 edge
loop_latch_edge(const struct loop * loop)1655 loop_latch_edge (const struct loop *loop)
1656 {
1657   return find_edge (loop->latch, loop->header);
1658 }
1659 
1660 /* Returns preheader edge of LOOP.  */
1661 edge
loop_preheader_edge(const struct loop * loop)1662 loop_preheader_edge (const struct loop *loop)
1663 {
1664   edge e;
1665   edge_iterator ei;
1666 
1667   gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS));
1668 
1669   FOR_EACH_EDGE (e, ei, loop->header->preds)
1670     if (e->src != loop->latch)
1671       break;
1672 
1673   return e;
1674 }
1675 
1676 /* Returns true if E is an exit of LOOP.  */
1677 
1678 bool
loop_exit_edge_p(const struct loop * loop,const_edge e)1679 loop_exit_edge_p (const struct loop *loop, const_edge e)
1680 {
1681   return (flow_bb_inside_loop_p (loop, e->src)
1682 	  && !flow_bb_inside_loop_p (loop, e->dest));
1683 }
1684 
1685 /* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1686    or more than one exit.  If loops do not have the exits recorded, NULL
1687    is returned always.  */
1688 
1689 edge
single_exit(const struct loop * loop)1690 single_exit (const struct loop *loop)
1691 {
1692   struct loop_exit *exit = loop->exits->next;
1693 
1694   if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1695     return NULL;
1696 
1697   if (exit->e && exit->next == loop->exits)
1698     return exit->e;
1699   else
1700     return NULL;
1701 }
1702 
1703 /* Returns true when BB has an incoming edge exiting LOOP.  */
1704 
1705 bool
loop_exits_to_bb_p(struct loop * loop,basic_block bb)1706 loop_exits_to_bb_p (struct loop *loop, basic_block bb)
1707 {
1708   edge e;
1709   edge_iterator ei;
1710 
1711   FOR_EACH_EDGE (e, ei, bb->preds)
1712     if (loop_exit_edge_p (loop, e))
1713       return true;
1714 
1715   return false;
1716 }
1717 
1718 /* Returns true when BB has an outgoing edge exiting LOOP.  */
1719 
1720 bool
loop_exits_from_bb_p(struct loop * loop,basic_block bb)1721 loop_exits_from_bb_p (struct loop *loop, basic_block bb)
1722 {
1723   edge e;
1724   edge_iterator ei;
1725 
1726   FOR_EACH_EDGE (e, ei, bb->succs)
1727     if (loop_exit_edge_p (loop, e))
1728       return true;
1729 
1730   return false;
1731 }
1732 
1733 /* Return location corresponding to the loop control condition if possible.  */
1734 
1735 location_t
get_loop_location(struct loop * loop)1736 get_loop_location (struct loop *loop)
1737 {
1738   rtx insn = NULL;
1739   struct niter_desc *desc = NULL;
1740   edge exit;
1741 
1742   /* For a for or while loop, we would like to return the location
1743      of the for or while statement, if possible.  To do this, look
1744      for the branch guarding the loop back-edge.  */
1745 
1746   /* If this is a simple loop with an in_edge, then the loop control
1747      branch is typically at the end of its source.  */
1748   desc = get_simple_loop_desc (loop);
1749   if (desc->in_edge)
1750     {
1751       FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)
1752         {
1753           if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1754             return INSN_LOCATION (insn);
1755         }
1756     }
1757   /* If loop has a single exit, then the loop control branch
1758      must be at the end of its source.  */
1759   if ((exit = single_exit (loop)))
1760     {
1761       FOR_BB_INSNS_REVERSE (exit->src, insn)
1762         {
1763           if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1764             return INSN_LOCATION (insn);
1765         }
1766     }
1767   /* Next check the latch, to see if it is non-empty.  */
1768   FOR_BB_INSNS_REVERSE (loop->latch, insn)
1769     {
1770       if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1771         return INSN_LOCATION (insn);
1772     }
1773   /* Finally, if none of the above identifies the loop control branch,
1774      return the first location in the loop header.  */
1775   FOR_BB_INSNS (loop->header, insn)
1776     {
1777       if (INSN_P (insn) && INSN_HAS_LOCATION (insn))
1778         return INSN_LOCATION (insn);
1779     }
1780   /* If all else fails, simply return the current function location.  */
1781   return DECL_SOURCE_LOCATION (current_function_decl);
1782 }
1783 
1784 /* Records that every statement in LOOP is executed I_BOUND times.
1785    REALISTIC is true if I_BOUND is expected to be close to the real number
1786    of iterations.  UPPER is true if we are sure the loop iterates at most
1787    I_BOUND times.  */
1788 
1789 void
record_niter_bound(struct loop * loop,double_int i_bound,bool realistic,bool upper)1790 record_niter_bound (struct loop *loop, double_int i_bound, bool realistic,
1791 		    bool upper)
1792 {
1793   /* Update the bounds only when there is no previous estimation, or when the
1794      current estimation is smaller.  */
1795   if (upper
1796       && (!loop->any_upper_bound
1797 	  || i_bound.ult (loop->nb_iterations_upper_bound)))
1798     {
1799       loop->any_upper_bound = true;
1800       loop->nb_iterations_upper_bound = i_bound;
1801     }
1802   if (realistic
1803       && (!loop->any_estimate
1804 	  || i_bound.ult (loop->nb_iterations_estimate)))
1805     {
1806       loop->any_estimate = true;
1807       loop->nb_iterations_estimate = i_bound;
1808     }
1809 
1810   /* If an upper bound is smaller than the realistic estimate of the
1811      number of iterations, use the upper bound instead.  */
1812   if (loop->any_upper_bound
1813       && loop->any_estimate
1814       && loop->nb_iterations_upper_bound.ult (loop->nb_iterations_estimate))
1815     loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1816 }
1817 
1818 /* Similar to get_estimated_loop_iterations, but returns the estimate only
1819    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
1820    on the number of iterations of LOOP could not be derived, returns -1.  */
1821 
1822 HOST_WIDE_INT
get_estimated_loop_iterations_int(struct loop * loop)1823 get_estimated_loop_iterations_int (struct loop *loop)
1824 {
1825   double_int nit;
1826   HOST_WIDE_INT hwi_nit;
1827 
1828   if (!get_estimated_loop_iterations (loop, &nit))
1829     return -1;
1830 
1831   if (!nit.fits_shwi ())
1832     return -1;
1833   hwi_nit = nit.to_shwi ();
1834 
1835   return hwi_nit < 0 ? -1 : hwi_nit;
1836 }
1837 
1838 /* Returns an upper bound on the number of executions of statements
1839    in the LOOP.  For statements before the loop exit, this exceeds
1840    the number of execution of the latch by one.  */
1841 
1842 HOST_WIDE_INT
max_stmt_executions_int(struct loop * loop)1843 max_stmt_executions_int (struct loop *loop)
1844 {
1845   HOST_WIDE_INT nit = get_max_loop_iterations_int (loop);
1846   HOST_WIDE_INT snit;
1847 
1848   if (nit == -1)
1849     return -1;
1850 
1851   snit = (HOST_WIDE_INT) ((unsigned HOST_WIDE_INT) nit + 1);
1852 
1853   /* If the computation overflows, return -1.  */
1854   return snit < 0 ? -1 : snit;
1855 }
1856 
1857 /* Sets NIT to the estimated number of executions of the latch of the
1858    LOOP.  If we have no reliable estimate, the function returns false, otherwise
1859    returns true.  */
1860 
1861 bool
get_estimated_loop_iterations(struct loop * loop,double_int * nit)1862 get_estimated_loop_iterations (struct loop *loop, double_int *nit)
1863 {
1864   /* Even if the bound is not recorded, possibly we can derrive one from
1865      profile.  */
1866   if (!loop->any_estimate)
1867     {
1868       if (loop->header->count)
1869 	{
1870           *nit = gcov_type_to_double_int
1871 		   (expected_loop_iterations_unbounded (loop) + 1);
1872 	  return true;
1873 	}
1874       return false;
1875     }
1876 
1877   *nit = loop->nb_iterations_estimate;
1878   return true;
1879 }
1880 
1881 /* Sets NIT to an upper bound for the maximum number of executions of the
1882    latch of the LOOP.  If we have no reliable estimate, the function returns
1883    false, otherwise returns true.  */
1884 
1885 bool
get_max_loop_iterations(struct loop * loop,double_int * nit)1886 get_max_loop_iterations (struct loop *loop, double_int *nit)
1887 {
1888   if (!loop->any_upper_bound)
1889     return false;
1890 
1891   *nit = loop->nb_iterations_upper_bound;
1892   return true;
1893 }
1894 
1895 /* Similar to get_max_loop_iterations, but returns the estimate only
1896    if it fits to HOST_WIDE_INT.  If this is not the case, or the estimate
1897    on the number of iterations of LOOP could not be derived, returns -1.  */
1898 
1899 HOST_WIDE_INT
get_max_loop_iterations_int(struct loop * loop)1900 get_max_loop_iterations_int (struct loop *loop)
1901 {
1902   double_int nit;
1903   HOST_WIDE_INT hwi_nit;
1904 
1905   if (!get_max_loop_iterations (loop, &nit))
1906     return -1;
1907 
1908   if (!nit.fits_shwi ())
1909     return -1;
1910   hwi_nit = nit.to_shwi ();
1911 
1912   return hwi_nit < 0 ? -1 : hwi_nit;
1913 }
1914 
1915 /* Returns the loop depth of the loop BB belongs to.  */
1916 
1917 int
bb_loop_depth(const_basic_block bb)1918 bb_loop_depth (const_basic_block bb)
1919 {
1920   return bb->loop_father ? loop_depth (bb->loop_father) : 0;
1921 }
1922