1 /* Control flow graph manipulation code for GNU compiler.
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This file contains low level functions to manipulate the CFG and
21    analyze it.  All other modules should not transform the data structure
22    directly and use abstraction instead.  The file is supposed to be
23    ordered bottom-up and should not contain any code dependent on a
24    particular intermediate language (RTL or trees).
25 
26    Available functionality:
27      - Initialization/deallocation
28 	 init_flow, clear_edges
29      - Low level basic block manipulation
30 	 alloc_block, expunge_block
31      - Edge manipulation
32 	 make_edge, make_single_succ_edge, cached_make_edge, remove_edge
33 	 - Low level edge redirection (without updating instruction chain)
34 	     redirect_edge_succ, redirect_edge_succ_nodup, redirect_edge_pred
35      - Dumping and debugging
36 	 dump_flow_info, debug_flow_info, dump_edge_info
37      - Allocation of AUX fields for basic blocks
38 	 alloc_aux_for_blocks, free_aux_for_blocks, alloc_aux_for_block
39      - clear_bb_flags
40      - Consistency checking
41 	 verify_flow_info
42      - Dumping and debugging
43 	 print_rtl_with_bb, dump_bb, debug_bb, debug_bb_n
44 
45    TODO: Document these "Available functionality" functions in the files
46    that implement them.
47  */
48 
49 #include "config.h"
50 #include "system.h"
51 #include "coretypes.h"
52 #include "backend.h"
53 #include "hard-reg-set.h"
54 #include "tree.h"
55 #include "cfghooks.h"
56 #include "df.h"
57 #include "cfganal.h"
58 #include "cfgloop.h" /* FIXME: For struct loop.  */
59 #include "dumpfile.h"
60 
61 
62 
63 /* Called once at initialization time.  */
64 
65 void
init_flow(struct function * the_fun)66 init_flow (struct function *the_fun)
67 {
68   if (!the_fun->cfg)
69     the_fun->cfg = ggc_cleared_alloc<control_flow_graph> ();
70   n_edges_for_fn (the_fun) = 0;
71   the_fun->cfg->count_max = profile_count::uninitialized ();
72   ENTRY_BLOCK_PTR_FOR_FN (the_fun)
73     = alloc_block ();
74   ENTRY_BLOCK_PTR_FOR_FN (the_fun)->index = ENTRY_BLOCK;
75   EXIT_BLOCK_PTR_FOR_FN (the_fun)
76     = alloc_block ();
77   EXIT_BLOCK_PTR_FOR_FN (the_fun)->index = EXIT_BLOCK;
78   ENTRY_BLOCK_PTR_FOR_FN (the_fun)->next_bb
79     = EXIT_BLOCK_PTR_FOR_FN (the_fun);
80   EXIT_BLOCK_PTR_FOR_FN (the_fun)->prev_bb
81     = ENTRY_BLOCK_PTR_FOR_FN (the_fun);
82 }
83 
84 /* Helper function for remove_edge and clear_edges.  Frees edge structure
85    without actually removing it from the pred/succ arrays.  */
86 
87 static void
free_edge(function * fn,edge e)88 free_edge (function *fn, edge e)
89 {
90   n_edges_for_fn (fn)--;
91   ggc_free (e);
92 }
93 
94 /* Free the memory associated with the edge structures.  */
95 
96 void
clear_edges(struct function * fn)97 clear_edges (struct function *fn)
98 {
99   basic_block bb;
100   edge e;
101   edge_iterator ei;
102 
103   FOR_EACH_BB_FN (bb, fn)
104     {
105       FOR_EACH_EDGE (e, ei, bb->succs)
106 	free_edge (fn, e);
107       vec_safe_truncate (bb->succs, 0);
108       vec_safe_truncate (bb->preds, 0);
109     }
110 
111   FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (fn)->succs)
112     free_edge (fn, e);
113   vec_safe_truncate (EXIT_BLOCK_PTR_FOR_FN (fn)->preds, 0);
114   vec_safe_truncate (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs, 0);
115 
116   gcc_assert (!n_edges_for_fn (fn));
117 }
118 
119 /* Allocate memory for basic_block.  */
120 
121 basic_block
alloc_block(void)122 alloc_block (void)
123 {
124   basic_block bb;
125   bb = ggc_cleared_alloc<basic_block_def> ();
126   bb->count = profile_count::uninitialized ();
127   return bb;
128 }
129 
130 /* Link block B to chain after AFTER.  */
131 void
link_block(basic_block b,basic_block after)132 link_block (basic_block b, basic_block after)
133 {
134   b->next_bb = after->next_bb;
135   b->prev_bb = after;
136   after->next_bb = b;
137   b->next_bb->prev_bb = b;
138 }
139 
140 /* Unlink block B from chain.  */
141 void
unlink_block(basic_block b)142 unlink_block (basic_block b)
143 {
144   b->next_bb->prev_bb = b->prev_bb;
145   b->prev_bb->next_bb = b->next_bb;
146   b->prev_bb = NULL;
147   b->next_bb = NULL;
148 }
149 
150 /* Sequentially order blocks and compact the arrays.  */
151 void
compact_blocks(void)152 compact_blocks (void)
153 {
154   int i;
155 
156   SET_BASIC_BLOCK_FOR_FN (cfun, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (cfun));
157   SET_BASIC_BLOCK_FOR_FN (cfun, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (cfun));
158 
159   if (df)
160     df_compact_blocks ();
161   else
162     {
163       basic_block bb;
164 
165       i = NUM_FIXED_BLOCKS;
166       FOR_EACH_BB_FN (bb, cfun)
167 	{
168 	  SET_BASIC_BLOCK_FOR_FN (cfun, i, bb);
169 	  bb->index = i;
170 	  i++;
171 	}
172       gcc_assert (i == n_basic_blocks_for_fn (cfun));
173 
174       for (; i < last_basic_block_for_fn (cfun); i++)
175 	SET_BASIC_BLOCK_FOR_FN (cfun, i, NULL);
176     }
177   last_basic_block_for_fn (cfun) = n_basic_blocks_for_fn (cfun);
178 }
179 
180 /* Remove block B from the basic block array.  */
181 
182 void
expunge_block(basic_block b)183 expunge_block (basic_block b)
184 {
185   unlink_block (b);
186   SET_BASIC_BLOCK_FOR_FN (cfun, b->index, NULL);
187   n_basic_blocks_for_fn (cfun)--;
188   /* We should be able to ggc_free here, but we are not.
189      The dead SSA_NAMES are left pointing to dead statements that are pointing
190      to dead basic blocks making garbage collector to die.
191      We should be able to release all dead SSA_NAMES and at the same time we should
192      clear out BB pointer of dead statements consistently.  */
193 }
194 
195 /* Connect E to E->src.  */
196 
197 static inline void
connect_src(edge e)198 connect_src (edge e)
199 {
200   vec_safe_push (e->src->succs, e);
201   df_mark_solutions_dirty ();
202 }
203 
204 /* Connect E to E->dest.  */
205 
206 static inline void
connect_dest(edge e)207 connect_dest (edge e)
208 {
209   basic_block dest = e->dest;
210   vec_safe_push (dest->preds, e);
211   e->dest_idx = EDGE_COUNT (dest->preds) - 1;
212   df_mark_solutions_dirty ();
213 }
214 
215 /* Disconnect edge E from E->src.  */
216 
217 static inline void
disconnect_src(edge e)218 disconnect_src (edge e)
219 {
220   basic_block src = e->src;
221   edge_iterator ei;
222   edge tmp;
223 
224   for (ei = ei_start (src->succs); (tmp = ei_safe_edge (ei)); )
225     {
226       if (tmp == e)
227 	{
228 	  src->succs->unordered_remove (ei.index);
229 	  df_mark_solutions_dirty ();
230 	  return;
231 	}
232       else
233 	ei_next (&ei);
234     }
235 
236   gcc_unreachable ();
237 }
238 
239 /* Disconnect edge E from E->dest.  */
240 
241 static inline void
disconnect_dest(edge e)242 disconnect_dest (edge e)
243 {
244   basic_block dest = e->dest;
245   unsigned int dest_idx = e->dest_idx;
246 
247   dest->preds->unordered_remove (dest_idx);
248 
249   /* If we removed an edge in the middle of the edge vector, we need
250      to update dest_idx of the edge that moved into the "hole".  */
251   if (dest_idx < EDGE_COUNT (dest->preds))
252     EDGE_PRED (dest, dest_idx)->dest_idx = dest_idx;
253   df_mark_solutions_dirty ();
254 }
255 
256 /* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
257    created edge.  Use this only if you are sure that this edge can't
258    possibly already exist.  */
259 
260 edge
unchecked_make_edge(basic_block src,basic_block dst,int flags)261 unchecked_make_edge (basic_block src, basic_block dst, int flags)
262 {
263   edge e;
264   e = ggc_cleared_alloc<edge_def> ();
265   n_edges_for_fn (cfun)++;
266 
267   e->probability = profile_probability::uninitialized ();
268   e->src = src;
269   e->dest = dst;
270   e->flags = flags;
271 
272   connect_src (e);
273   connect_dest (e);
274 
275   execute_on_growing_pred (e);
276   return e;
277 }
278 
279 /* Create an edge connecting SRC and DST with FLAGS optionally using
280    edge cache CACHE.  Return the new edge, NULL if already exist.  */
281 
282 edge
cached_make_edge(sbitmap edge_cache,basic_block src,basic_block dst,int flags)283 cached_make_edge (sbitmap edge_cache, basic_block src, basic_block dst, int flags)
284 {
285   if (edge_cache == NULL
286       || src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
287       || dst == EXIT_BLOCK_PTR_FOR_FN (cfun))
288     return make_edge (src, dst, flags);
289 
290   /* Does the requested edge already exist?  */
291   if (! bitmap_bit_p (edge_cache, dst->index))
292     {
293       /* The edge does not exist.  Create one and update the
294 	 cache.  */
295       bitmap_set_bit (edge_cache, dst->index);
296       return unchecked_make_edge (src, dst, flags);
297     }
298 
299   /* At this point, we know that the requested edge exists.  Adjust
300      flags if necessary.  */
301   if (flags)
302     {
303       edge e = find_edge (src, dst);
304       e->flags |= flags;
305     }
306 
307   return NULL;
308 }
309 
310 /* Create an edge connecting SRC and DEST with flags FLAGS.  Return newly
311    created edge or NULL if already exist.  */
312 
313 edge
make_edge(basic_block src,basic_block dest,int flags)314 make_edge (basic_block src, basic_block dest, int flags)
315 {
316   edge e = find_edge (src, dest);
317 
318   /* Make sure we don't add duplicate edges.  */
319   if (e)
320     {
321       e->flags |= flags;
322       return NULL;
323     }
324 
325   return unchecked_make_edge (src, dest, flags);
326 }
327 
328 /* Create an edge connecting SRC to DEST and set probability by knowing
329    that it is the single edge leaving SRC.  */
330 
331 edge
make_single_succ_edge(basic_block src,basic_block dest,int flags)332 make_single_succ_edge (basic_block src, basic_block dest, int flags)
333 {
334   edge e = make_edge (src, dest, flags);
335 
336   e->probability = profile_probability::always ();
337   return e;
338 }
339 
340 /* This function will remove an edge from the flow graph.  */
341 
342 void
remove_edge_raw(edge e)343 remove_edge_raw (edge e)
344 {
345   remove_predictions_associated_with_edge (e);
346   execute_on_shrinking_pred (e);
347 
348   disconnect_src (e);
349   disconnect_dest (e);
350 
351   free_edge (cfun, e);
352 }
353 
354 /* Redirect an edge's successor from one block to another.  */
355 
356 void
redirect_edge_succ(edge e,basic_block new_succ)357 redirect_edge_succ (edge e, basic_block new_succ)
358 {
359   execute_on_shrinking_pred (e);
360 
361   disconnect_dest (e);
362 
363   e->dest = new_succ;
364 
365   /* Reconnect the edge to the new successor block.  */
366   connect_dest (e);
367 
368   execute_on_growing_pred (e);
369 }
370 
371 /* Redirect an edge's predecessor from one block to another.  */
372 
373 void
redirect_edge_pred(edge e,basic_block new_pred)374 redirect_edge_pred (edge e, basic_block new_pred)
375 {
376   disconnect_src (e);
377 
378   e->src = new_pred;
379 
380   /* Reconnect the edge to the new predecessor block.  */
381   connect_src (e);
382 }
383 
384 /* Clear all basic block flags that do not have to be preserved.  */
385 void
clear_bb_flags(void)386 clear_bb_flags (void)
387 {
388   basic_block bb;
389   int flags_to_preserve = BB_FLAGS_TO_PRESERVE;
390   if (current_loops
391       && loops_state_satisfies_p (cfun, LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
392     flags_to_preserve |= BB_IRREDUCIBLE_LOOP;
393 
394   FOR_ALL_BB_FN (bb, cfun)
395     bb->flags &= flags_to_preserve;
396 }
397 
398 /* Check the consistency of profile information.  We can't do that
399    in verify_flow_info, as the counts may get invalid for incompletely
400    solved graphs, later eliminating of conditionals or roundoff errors.
401    It is still practical to have them reported for debugging of simple
402    testcases.  */
403 static void
check_bb_profile(basic_block bb,FILE * file,int indent)404 check_bb_profile (basic_block bb, FILE * file, int indent)
405 {
406   edge e;
407   edge_iterator ei;
408   struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
409   char *s_indent = (char *) alloca ((size_t) indent + 1);
410   memset ((void *) s_indent, ' ', (size_t) indent);
411   s_indent[indent] = '\0';
412 
413   if (profile_status_for_fn (fun) == PROFILE_ABSENT)
414     return;
415 
416   if (bb != EXIT_BLOCK_PTR_FOR_FN (fun))
417     {
418       bool found = false;
419       profile_probability sum = profile_probability::never ();
420       int isum = 0;
421 
422       FOR_EACH_EDGE (e, ei, bb->succs)
423 	{
424 	  if (!(e->flags & (EDGE_EH | EDGE_FAKE)))
425 	    found = true;
426 	  sum += e->probability;
427 	  if (e->probability.initialized_p ())
428 	    isum += e->probability.to_reg_br_prob_base ();
429 	}
430       /* Only report mismatches for non-EH control flow. If there are only EH
431 	 edges it means that the BB ends by noreturn call.  Here the control
432 	 flow may just terminate.  */
433       if (found)
434 	{
435 	  if (sum.differs_from_p (profile_probability::always ()))
436 	    {
437 	      fprintf (file,
438 		       ";; %sInvalid sum of outgoing probabilities ",
439 		       s_indent);
440 	      sum.dump (file);
441 	      fprintf (file, "\n");
442 	    }
443 	  /* Probabilities caps to 100% and thus the previous test will never
444 	     fire if the sum of probabilities is too large.  */
445 	  else if (isum > REG_BR_PROB_BASE + 100)
446 	    {
447 	      fprintf (file,
448 		       ";; %sInvalid sum of outgoing probabilities %.1f%%\n",
449 		       s_indent, isum * 100.0 / REG_BR_PROB_BASE);
450 	    }
451 	}
452     }
453   if (bb != ENTRY_BLOCK_PTR_FOR_FN (fun))
454     {
455       profile_count sum = profile_count::zero ();
456       FOR_EACH_EDGE (e, ei, bb->preds)
457 	sum += e->count ();
458       if (sum.differs_from_p (bb->count))
459 	{
460 	  fprintf (file, ";; %sInvalid sum of incoming counts ",
461 		   s_indent);
462 	  sum.dump (file);
463 	  fprintf (file, ", should be ");
464 	  bb->count.dump (file);
465 	  fprintf (file, "\n");
466 	}
467     }
468   if (BB_PARTITION (bb) == BB_COLD_PARTITION)
469     {
470       /* Warn about inconsistencies in the partitioning that are
471          currently caused by profile insanities created via optimization.  */
472       if (!probably_never_executed_bb_p (fun, bb))
473 	fprintf (file, ";; %sBlock in cold partition with hot count\n",
474 		 s_indent);
475       FOR_EACH_EDGE (e, ei, bb->preds)
476         {
477           if (!probably_never_executed_edge_p (fun, e))
478             fprintf (file,
479 		     ";; %sBlock in cold partition with incoming hot edge\n",
480 		     s_indent);
481         }
482     }
483 }
484 
485 void
dump_edge_info(FILE * file,edge e,dump_flags_t flags,int do_succ)486 dump_edge_info (FILE *file, edge e, dump_flags_t flags, int do_succ)
487 {
488   basic_block side = (do_succ ? e->dest : e->src);
489   bool do_details = false;
490 
491   if ((flags & TDF_DETAILS) != 0
492       && (flags & TDF_SLIM) == 0)
493     do_details = true;
494 
495   if (side->index == ENTRY_BLOCK)
496     fputs (" ENTRY", file);
497   else if (side->index == EXIT_BLOCK)
498     fputs (" EXIT", file);
499   else
500     fprintf (file, " %d", side->index);
501 
502   if (e->probability.initialized_p () && do_details)
503     {
504       fprintf (file, " [");
505       e->probability.dump (file);
506       fprintf (file, "] ");
507     }
508 
509   if (e->count ().initialized_p () && do_details)
510     {
511       fputs (" count:", file);
512       e->count ().dump (file);
513     }
514 
515   if (e->flags && do_details)
516     {
517       static const char * const bitnames[] =
518 	{
519 #define DEF_EDGE_FLAG(NAME,IDX) #NAME ,
520 #include "cfg-flags.def"
521 	  NULL
522 #undef DEF_EDGE_FLAG
523 	};
524       bool comma = false;
525       int i, flags = e->flags;
526 
527       gcc_assert (e->flags <= EDGE_ALL_FLAGS);
528       fputs (" (", file);
529       for (i = 0; flags; i++)
530 	if (flags & (1 << i))
531 	  {
532 	    flags &= ~(1 << i);
533 
534 	    if (comma)
535 	      fputc (',', file);
536 	    fputs (bitnames[i], file);
537 	    comma = true;
538 	  }
539 
540       fputc (')', file);
541     }
542 }
543 
544 DEBUG_FUNCTION void
debug(edge_def & ref)545 debug (edge_def &ref)
546 {
547   /* FIXME (crowl): Is this desireable?  */
548   dump_edge_info (stderr, &ref, 0, false);
549   dump_edge_info (stderr, &ref, 0, true);
550 }
551 
552 DEBUG_FUNCTION void
debug(edge_def * ptr)553 debug (edge_def *ptr)
554 {
555   if (ptr)
556     debug (*ptr);
557   else
558     fprintf (stderr, "<nil>\n");
559 }
560 
561 static void
debug_slim(edge e)562 debug_slim (edge e)
563 {
564   fprintf (stderr, "<edge 0x%p (%d -> %d)>", (void *) e,
565 	   e->src->index, e->dest->index);
566 }
567 
568 DEFINE_DEBUG_VEC (edge)
569 DEFINE_DEBUG_HASH_SET (edge)
570 
571 /* Simple routines to easily allocate AUX fields of basic blocks.  */
572 
573 static struct obstack block_aux_obstack;
574 static void *first_block_aux_obj = 0;
575 static struct obstack edge_aux_obstack;
576 static void *first_edge_aux_obj = 0;
577 
578 /* Allocate a memory block of SIZE as BB->aux.  The obstack must
579    be first initialized by alloc_aux_for_blocks.  */
580 
581 static void
alloc_aux_for_block(basic_block bb,int size)582 alloc_aux_for_block (basic_block bb, int size)
583 {
584   /* Verify that aux field is clear.  */
585   gcc_assert (!bb->aux && first_block_aux_obj);
586   bb->aux = obstack_alloc (&block_aux_obstack, size);
587   memset (bb->aux, 0, size);
588 }
589 
590 /* Initialize the block_aux_obstack and if SIZE is nonzero, call
591    alloc_aux_for_block for each basic block.  */
592 
593 void
alloc_aux_for_blocks(int size)594 alloc_aux_for_blocks (int size)
595 {
596   static int initialized;
597 
598   if (!initialized)
599     {
600       gcc_obstack_init (&block_aux_obstack);
601       initialized = 1;
602     }
603   else
604     /* Check whether AUX data are still allocated.  */
605     gcc_assert (!first_block_aux_obj);
606 
607   first_block_aux_obj = obstack_alloc (&block_aux_obstack, 0);
608   if (size)
609     {
610       basic_block bb;
611 
612       FOR_ALL_BB_FN (bb, cfun)
613 	alloc_aux_for_block (bb, size);
614     }
615 }
616 
617 /* Clear AUX pointers of all blocks.  */
618 
619 void
clear_aux_for_blocks(void)620 clear_aux_for_blocks (void)
621 {
622   basic_block bb;
623 
624   FOR_ALL_BB_FN (bb, cfun)
625     bb->aux = NULL;
626 }
627 
628 /* Free data allocated in block_aux_obstack and clear AUX pointers
629    of all blocks.  */
630 
631 void
free_aux_for_blocks(void)632 free_aux_for_blocks (void)
633 {
634   gcc_assert (first_block_aux_obj);
635   obstack_free (&block_aux_obstack, first_block_aux_obj);
636   first_block_aux_obj = NULL;
637 
638   clear_aux_for_blocks ();
639 }
640 
641 /* Allocate a memory edge of SIZE as E->aux.  The obstack must
642    be first initialized by alloc_aux_for_edges.  */
643 
644 void
alloc_aux_for_edge(edge e,int size)645 alloc_aux_for_edge (edge e, int size)
646 {
647   /* Verify that aux field is clear.  */
648   gcc_assert (!e->aux && first_edge_aux_obj);
649   e->aux = obstack_alloc (&edge_aux_obstack, size);
650   memset (e->aux, 0, size);
651 }
652 
653 /* Initialize the edge_aux_obstack and if SIZE is nonzero, call
654    alloc_aux_for_edge for each basic edge.  */
655 
656 void
alloc_aux_for_edges(int size)657 alloc_aux_for_edges (int size)
658 {
659   static int initialized;
660 
661   if (!initialized)
662     {
663       gcc_obstack_init (&edge_aux_obstack);
664       initialized = 1;
665     }
666   else
667     /* Check whether AUX data are still allocated.  */
668     gcc_assert (!first_edge_aux_obj);
669 
670   first_edge_aux_obj = obstack_alloc (&edge_aux_obstack, 0);
671   if (size)
672     {
673       basic_block bb;
674 
675       FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
676 		      EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
677 	{
678 	  edge e;
679 	  edge_iterator ei;
680 
681 	  FOR_EACH_EDGE (e, ei, bb->succs)
682 	    alloc_aux_for_edge (e, size);
683 	}
684     }
685 }
686 
687 /* Clear AUX pointers of all edges.  */
688 
689 void
clear_aux_for_edges(void)690 clear_aux_for_edges (void)
691 {
692   basic_block bb;
693   edge e;
694 
695   FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
696 		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
697     {
698       edge_iterator ei;
699       FOR_EACH_EDGE (e, ei, bb->succs)
700 	e->aux = NULL;
701     }
702 }
703 
704 /* Free data allocated in edge_aux_obstack and clear AUX pointers
705    of all edges.  */
706 
707 void
free_aux_for_edges(void)708 free_aux_for_edges (void)
709 {
710   gcc_assert (first_edge_aux_obj);
711   obstack_free (&edge_aux_obstack, first_edge_aux_obj);
712   first_edge_aux_obj = NULL;
713 
714   clear_aux_for_edges ();
715 }
716 
717 DEBUG_FUNCTION void
debug_bb(basic_block bb)718 debug_bb (basic_block bb)
719 {
720   dump_bb (stderr, bb, 0, dump_flags);
721 }
722 
723 DEBUG_FUNCTION basic_block
debug_bb_n(int n)724 debug_bb_n (int n)
725 {
726   basic_block bb = BASIC_BLOCK_FOR_FN (cfun, n);
727   debug_bb (bb);
728   return bb;
729 }
730 
731 /* Dumps cfg related information about basic block BB to OUTF.
732    If HEADER is true, dump things that appear before the instructions
733    contained in BB.  If FOOTER is true, dump things that appear after.
734    Flags are the TDF_* masks as documented in dumpfile.h.
735    NB: With TDF_DETAILS, it is assumed that cfun is available, so
736    that maybe_hot_bb_p and probably_never_executed_bb_p don't ICE.  */
737 
738 void
dump_bb_info(FILE * outf,basic_block bb,int indent,dump_flags_t flags,bool do_header,bool do_footer)739 dump_bb_info (FILE *outf, basic_block bb, int indent, dump_flags_t flags,
740 	      bool do_header, bool do_footer)
741 {
742   edge_iterator ei;
743   edge e;
744   static const char * const bb_bitnames[] =
745     {
746 #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) #NAME ,
747 #include "cfg-flags.def"
748       NULL
749 #undef DEF_BASIC_BLOCK_FLAG
750     };
751   const unsigned n_bitnames = sizeof (bb_bitnames) / sizeof (char *);
752   bool first;
753   char *s_indent = (char *) alloca ((size_t) indent + 1);
754   memset ((void *) s_indent, ' ', (size_t) indent);
755   s_indent[indent] = '\0';
756 
757   gcc_assert (bb->flags <= BB_ALL_FLAGS);
758 
759   if (do_header)
760     {
761       unsigned i;
762 
763       fputs (";; ", outf);
764       fprintf (outf, "%sbasic block %d, loop depth %d",
765 	       s_indent, bb->index, bb_loop_depth (bb));
766       if (flags & TDF_DETAILS)
767 	{
768 	  struct function *fun = DECL_STRUCT_FUNCTION (current_function_decl);
769 	  if (bb->count.initialized_p ())
770 	    {
771 	      fputs (", count ", outf);
772 	      bb->count.dump (outf);
773 	    }
774 	  if (maybe_hot_bb_p (fun, bb))
775 	    fputs (", maybe hot", outf);
776 	  if (probably_never_executed_bb_p (fun, bb))
777 	    fputs (", probably never executed", outf);
778 	}
779       fputc ('\n', outf);
780 
781       if (flags & TDF_DETAILS)
782 	{
783 	  check_bb_profile (bb, outf, indent);
784 	  fputs (";; ", outf);
785 	  fprintf (outf, "%s prev block ", s_indent);
786 	  if (bb->prev_bb)
787 	    fprintf (outf, "%d", bb->prev_bb->index);
788 	  else
789 	    fprintf (outf, "(nil)");
790 	  fprintf (outf, ", next block ");
791 	  if (bb->next_bb)
792 	    fprintf (outf, "%d", bb->next_bb->index);
793 	  else
794 	    fprintf (outf, "(nil)");
795 
796 	  fputs (", flags:", outf);
797 	  first = true;
798 	  for (i = 0; i < n_bitnames; i++)
799 	    if (bb->flags & (1 << i))
800 	      {
801 		if (first)
802 		  fputs (" (", outf);
803 		else
804 		  fputs (", ", outf);
805 		first = false;
806 		fputs (bb_bitnames[i], outf);
807 	      }
808 	  if (!first)
809 	    fputc (')', outf);
810 	  fputc ('\n', outf);
811 	}
812 
813       fputs (";; ", outf);
814       fprintf (outf, "%s pred:      ", s_indent);
815       first = true;
816       FOR_EACH_EDGE (e, ei, bb->preds)
817 	{
818 	  if (! first)
819 	    {
820 	      fputs (";; ", outf);
821 	      fprintf (outf, "%s            ", s_indent);
822 	    }
823 	  first = false;
824 	  dump_edge_info (outf, e, flags, 0);
825 	  fputc ('\n', outf);
826 	}
827       if (first)
828 	fputc ('\n', outf);
829     }
830 
831   if (do_footer)
832     {
833       fputs (";; ", outf);
834       fprintf (outf, "%s succ:      ", s_indent);
835       first = true;
836       FOR_EACH_EDGE (e, ei, bb->succs)
837         {
838 	  if (! first)
839 	    {
840 	      fputs (";; ", outf);
841 	      fprintf (outf, "%s            ", s_indent);
842 	    }
843 	  first = false;
844 	  dump_edge_info (outf, e, flags, 1);
845 	  fputc ('\n', outf);
846 	}
847       if (first)
848 	fputc ('\n', outf);
849     }
850 }
851 
852 /* Dumps a brief description of cfg to FILE.  */
853 
854 void
brief_dump_cfg(FILE * file,dump_flags_t flags)855 brief_dump_cfg (FILE *file, dump_flags_t flags)
856 {
857   basic_block bb;
858 
859   FOR_EACH_BB_FN (bb, cfun)
860     {
861       dump_bb_info (file, bb, 0, flags & TDF_DETAILS, true, true);
862     }
863 }
864 
865 /* An edge originally destinating BB of COUNT has been proved to
866    leave the block by TAKEN_EDGE.  Update profile of BB such that edge E can be
867    redirected to destination of TAKEN_EDGE.
868 
869    This function may leave the profile inconsistent in the case TAKEN_EDGE
870    frequency or count is believed to be lower than COUNT
871    respectively.  */
872 void
update_bb_profile_for_threading(basic_block bb,profile_count count,edge taken_edge)873 update_bb_profile_for_threading (basic_block bb,
874 				 profile_count count, edge taken_edge)
875 {
876   edge c;
877   profile_probability prob;
878   edge_iterator ei;
879 
880   if (bb->count < count)
881     {
882       if (dump_file)
883 	fprintf (dump_file, "bb %i count became negative after threading",
884 		 bb->index);
885     }
886   bb->count -= count;
887 
888   /* Compute the probability of TAKEN_EDGE being reached via threaded edge.
889      Watch for overflows.  */
890   if (bb->count.nonzero_p ())
891     prob = count.probability_in (bb->count);
892   else
893     prob = profile_probability::never ();
894   if (prob > taken_edge->probability)
895     {
896       if (dump_file)
897 	{
898 	  fprintf (dump_file, "Jump threading proved probability of edge "
899 		   "%i->%i too small (it is ",
900 		   taken_edge->src->index, taken_edge->dest->index);
901 	  taken_edge->probability.dump (dump_file);
902 	  fprintf (dump_file, " should be ");
903 	  prob.dump (dump_file);
904 	  fprintf (dump_file, ")\n");
905 	}
906       prob = taken_edge->probability.apply_scale (6, 8);
907     }
908 
909   /* Now rescale the probabilities.  */
910   taken_edge->probability -= prob;
911   prob = prob.invert ();
912   if (prob == profile_probability::never ())
913     {
914       if (dump_file)
915 	fprintf (dump_file, "Edge probabilities of bb %i has been reset, "
916 		 "count of block should end up being 0, it is non-zero\n",
917 		 bb->index);
918       EDGE_SUCC (bb, 0)->probability = profile_probability::guessed_always ();
919       ei = ei_start (bb->succs);
920       ei_next (&ei);
921       for (; (c = ei_safe_edge (ei)); ei_next (&ei))
922 	c->probability = profile_probability::guessed_never ();
923     }
924   else if (!(prob == profile_probability::always ()))
925     {
926       FOR_EACH_EDGE (c, ei, bb->succs)
927 	c->probability /= prob;
928     }
929 
930   gcc_assert (bb == taken_edge->src);
931 }
932 
933 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
934    by NUM/DEN, in profile_count arithmetic.  More accurate than previous
935    function but considerably slower.  */
936 void
scale_bbs_frequencies_profile_count(basic_block * bbs,int nbbs,profile_count num,profile_count den)937 scale_bbs_frequencies_profile_count (basic_block *bbs, int nbbs,
938 				     profile_count num, profile_count den)
939 {
940   int i;
941   if (num == profile_count::zero () || den.nonzero_p ())
942     for (i = 0; i < nbbs; i++)
943       bbs[i]->count = bbs[i]->count.apply_scale (num, den);
944 }
945 
946 /* Multiply all frequencies of basic blocks in array BBS of length NBBS
947    by NUM/DEN, in profile_count arithmetic.  More accurate than previous
948    function but considerably slower.  */
949 void
scale_bbs_frequencies(basic_block * bbs,int nbbs,profile_probability p)950 scale_bbs_frequencies (basic_block *bbs, int nbbs,
951 		       profile_probability p)
952 {
953   int i;
954 
955   for (i = 0; i < nbbs; i++)
956     bbs[i]->count = bbs[i]->count.apply_probability (p);
957 }
958 
959 /* Helper types for hash tables.  */
960 
961 struct htab_bb_copy_original_entry
962 {
963   /* Block we are attaching info to.  */
964   int index1;
965   /* Index of original or copy (depending on the hashtable) */
966   int index2;
967 };
968 
969 struct bb_copy_hasher : nofree_ptr_hash <htab_bb_copy_original_entry>
970 {
971   static inline hashval_t hash (const htab_bb_copy_original_entry *);
972   static inline bool equal (const htab_bb_copy_original_entry *existing,
973 			    const htab_bb_copy_original_entry * candidate);
974 };
975 
976 inline hashval_t
hash(const htab_bb_copy_original_entry * data)977 bb_copy_hasher::hash (const htab_bb_copy_original_entry *data)
978 {
979   return data->index1;
980 }
981 
982 inline bool
equal(const htab_bb_copy_original_entry * data,const htab_bb_copy_original_entry * data2)983 bb_copy_hasher::equal (const htab_bb_copy_original_entry *data,
984 		       const htab_bb_copy_original_entry *data2)
985 {
986   return data->index1 == data2->index1;
987 }
988 
989 /* Data structures used to maintain mapping between basic blocks and
990    copies.  */
991 static hash_table<bb_copy_hasher> *bb_original;
992 static hash_table<bb_copy_hasher> *bb_copy;
993 
994 /* And between loops and copies.  */
995 static hash_table<bb_copy_hasher> *loop_copy;
996 static object_allocator<htab_bb_copy_original_entry> *original_copy_bb_pool;
997 
998 /* Initialize the data structures to maintain mapping between blocks
999    and its copies.  */
1000 void
initialize_original_copy_tables(void)1001 initialize_original_copy_tables (void)
1002 {
1003   original_copy_bb_pool = new object_allocator<htab_bb_copy_original_entry>
1004     ("original_copy");
1005   bb_original = new hash_table<bb_copy_hasher> (10);
1006   bb_copy = new hash_table<bb_copy_hasher> (10);
1007   loop_copy = new hash_table<bb_copy_hasher> (10);
1008 }
1009 
1010 /* Reset the data structures to maintain mapping between blocks and
1011    its copies.  */
1012 
1013 void
reset_original_copy_tables(void)1014 reset_original_copy_tables (void)
1015 {
1016   gcc_assert (original_copy_bb_pool);
1017   bb_original->empty ();
1018   bb_copy->empty ();
1019   loop_copy->empty ();
1020 }
1021 
1022 /* Free the data structures to maintain mapping between blocks and
1023    its copies.  */
1024 void
free_original_copy_tables(void)1025 free_original_copy_tables (void)
1026 {
1027   gcc_assert (original_copy_bb_pool);
1028   delete bb_copy;
1029   bb_copy = NULL;
1030   delete bb_original;
1031   bb_original = NULL;
1032   delete loop_copy;
1033   loop_copy = NULL;
1034   delete original_copy_bb_pool;
1035   original_copy_bb_pool = NULL;
1036 }
1037 
1038 /* Return true iff we have had a call to initialize_original_copy_tables
1039    without a corresponding call to free_original_copy_tables.  */
1040 
1041 bool
original_copy_tables_initialized_p(void)1042 original_copy_tables_initialized_p (void)
1043 {
1044   return original_copy_bb_pool != NULL;
1045 }
1046 
1047 /* Removes the value associated with OBJ from table TAB.  */
1048 
1049 static void
copy_original_table_clear(hash_table<bb_copy_hasher> * tab,unsigned obj)1050 copy_original_table_clear (hash_table<bb_copy_hasher> *tab, unsigned obj)
1051 {
1052   htab_bb_copy_original_entry **slot;
1053   struct htab_bb_copy_original_entry key, *elt;
1054 
1055   if (!original_copy_bb_pool)
1056     return;
1057 
1058   key.index1 = obj;
1059   slot = tab->find_slot (&key, NO_INSERT);
1060   if (!slot)
1061     return;
1062 
1063   elt = *slot;
1064   tab->clear_slot (slot);
1065   original_copy_bb_pool->remove (elt);
1066 }
1067 
1068 /* Sets the value associated with OBJ in table TAB to VAL.
1069    Do nothing when data structures are not initialized.  */
1070 
1071 static void
copy_original_table_set(hash_table<bb_copy_hasher> * tab,unsigned obj,unsigned val)1072 copy_original_table_set (hash_table<bb_copy_hasher> *tab,
1073 			 unsigned obj, unsigned val)
1074 {
1075   struct htab_bb_copy_original_entry **slot;
1076   struct htab_bb_copy_original_entry key;
1077 
1078   if (!original_copy_bb_pool)
1079     return;
1080 
1081   key.index1 = obj;
1082   slot = tab->find_slot (&key, INSERT);
1083   if (!*slot)
1084     {
1085       *slot = original_copy_bb_pool->allocate ();
1086       (*slot)->index1 = obj;
1087     }
1088   (*slot)->index2 = val;
1089 }
1090 
1091 /* Set original for basic block.  Do nothing when data structures are not
1092    initialized so passes not needing this don't need to care.  */
1093 void
set_bb_original(basic_block bb,basic_block original)1094 set_bb_original (basic_block bb, basic_block original)
1095 {
1096   copy_original_table_set (bb_original, bb->index, original->index);
1097 }
1098 
1099 /* Get the original basic block.  */
1100 basic_block
get_bb_original(basic_block bb)1101 get_bb_original (basic_block bb)
1102 {
1103   struct htab_bb_copy_original_entry *entry;
1104   struct htab_bb_copy_original_entry key;
1105 
1106   gcc_assert (original_copy_bb_pool);
1107 
1108   key.index1 = bb->index;
1109   entry = bb_original->find (&key);
1110   if (entry)
1111     return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1112   else
1113     return NULL;
1114 }
1115 
1116 /* Set copy for basic block.  Do nothing when data structures are not
1117    initialized so passes not needing this don't need to care.  */
1118 void
set_bb_copy(basic_block bb,basic_block copy)1119 set_bb_copy (basic_block bb, basic_block copy)
1120 {
1121   copy_original_table_set (bb_copy, bb->index, copy->index);
1122 }
1123 
1124 /* Get the copy of basic block.  */
1125 basic_block
get_bb_copy(basic_block bb)1126 get_bb_copy (basic_block bb)
1127 {
1128   struct htab_bb_copy_original_entry *entry;
1129   struct htab_bb_copy_original_entry key;
1130 
1131   gcc_assert (original_copy_bb_pool);
1132 
1133   key.index1 = bb->index;
1134   entry = bb_copy->find (&key);
1135   if (entry)
1136     return BASIC_BLOCK_FOR_FN (cfun, entry->index2);
1137   else
1138     return NULL;
1139 }
1140 
1141 /* Set copy for LOOP to COPY.  Do nothing when data structures are not
1142    initialized so passes not needing this don't need to care.  */
1143 
1144 void
set_loop_copy(struct loop * loop,struct loop * copy)1145 set_loop_copy (struct loop *loop, struct loop *copy)
1146 {
1147   if (!copy)
1148     copy_original_table_clear (loop_copy, loop->num);
1149   else
1150     copy_original_table_set (loop_copy, loop->num, copy->num);
1151 }
1152 
1153 /* Get the copy of LOOP.  */
1154 
1155 struct loop *
get_loop_copy(struct loop * loop)1156 get_loop_copy (struct loop *loop)
1157 {
1158   struct htab_bb_copy_original_entry *entry;
1159   struct htab_bb_copy_original_entry key;
1160 
1161   gcc_assert (original_copy_bb_pool);
1162 
1163   key.index1 = loop->num;
1164   entry = loop_copy->find (&key);
1165   if (entry)
1166     return get_loop (cfun, entry->index2);
1167   else
1168     return NULL;
1169 }
1170