1 /* Utilities for ipa analysis.
2    Copyright (C) 2005-2018 Free Software Foundation, Inc.
3    Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "tree.h"
26 #include "gimple.h"
27 #include "predict.h"
28 #include "alloc-pool.h"
29 #include "cgraph.h"
30 #include "lto-streamer.h"
31 #include "dumpfile.h"
32 #include "splay-tree.h"
33 #include "ipa-utils.h"
34 #include "symbol-summary.h"
35 #include "tree-vrp.h"
36 #include "ipa-prop.h"
37 #include "ipa-fnsummary.h"
38 
39 /* Debugging function for postorder and inorder code. NOTE is a string
40    that is printed before the nodes are printed.  ORDER is an array of
41    cgraph_nodes that has COUNT useful nodes in it.  */
42 
43 void
ipa_print_order(FILE * out,const char * note,struct cgraph_node ** order,int count)44 ipa_print_order (FILE* out,
45 		 const char * note,
46 		 struct cgraph_node** order,
47 		 int count)
48 {
49   int i;
50   fprintf (out, "\n\n ordered call graph: %s\n", note);
51 
52   for (i = count - 1; i >= 0; i--)
53     order[i]->dump (out);
54   fprintf (out, "\n");
55   fflush (out);
56 }
57 
58 
59 struct searchc_env {
60   struct cgraph_node **stack;
61   struct cgraph_node **result;
62   int stack_size;
63   int order_pos;
64   splay_tree nodes_marked_new;
65   bool reduce;
66   int count;
67 };
68 
69 /* This is an implementation of Tarjan's strongly connected region
70    finder as reprinted in Aho Hopcraft and Ullman's The Design and
71    Analysis of Computer Programs (1975) pages 192-193.  This version
72    has been customized for cgraph_nodes.  The env parameter is because
73    it is recursive and there are no nested functions here.  This
74    function should only be called from itself or
75    ipa_reduced_postorder.  ENV is a stack env and would be
76    unnecessary if C had nested functions.  V is the node to start
77    searching from.  */
78 
79 static void
searchc(struct searchc_env * env,struct cgraph_node * v,bool (* ignore_edge)(struct cgraph_edge *))80 searchc (struct searchc_env* env, struct cgraph_node *v,
81 	 bool (*ignore_edge) (struct cgraph_edge *))
82 {
83   struct cgraph_edge *edge;
84   struct ipa_dfs_info *v_info = (struct ipa_dfs_info *) v->aux;
85 
86   /* mark node as old */
87   v_info->new_node = false;
88   splay_tree_remove (env->nodes_marked_new, v->uid);
89 
90   v_info->dfn_number = env->count;
91   v_info->low_link = env->count;
92   env->count++;
93   env->stack[(env->stack_size)++] = v;
94   v_info->on_stack = true;
95 
96   for (edge = v->callees; edge; edge = edge->next_callee)
97     {
98       struct ipa_dfs_info * w_info;
99       enum availability avail;
100       struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
101 
102       if (!w || (ignore_edge && ignore_edge (edge)))
103         continue;
104 
105       if (w->aux
106 	  && (avail > AVAIL_INTERPOSABLE
107 	      || avail == AVAIL_INTERPOSABLE))
108 	{
109 	  w_info = (struct ipa_dfs_info *) w->aux;
110 	  if (w_info->new_node)
111 	    {
112 	      searchc (env, w, ignore_edge);
113 	      v_info->low_link =
114 		(v_info->low_link < w_info->low_link) ?
115 		v_info->low_link : w_info->low_link;
116 	    }
117 	  else
118 	    if ((w_info->dfn_number < v_info->dfn_number)
119 		&& (w_info->on_stack))
120 	      v_info->low_link =
121 		(w_info->dfn_number < v_info->low_link) ?
122 		w_info->dfn_number : v_info->low_link;
123 	}
124     }
125 
126 
127   if (v_info->low_link == v_info->dfn_number)
128     {
129       struct cgraph_node *last = NULL;
130       struct cgraph_node *x;
131       struct ipa_dfs_info *x_info;
132       do {
133 	x = env->stack[--(env->stack_size)];
134 	x_info = (struct ipa_dfs_info *) x->aux;
135 	x_info->on_stack = false;
136 	x_info->scc_no = v_info->dfn_number;
137 
138 	if (env->reduce)
139 	  {
140 	    x_info->next_cycle = last;
141 	    last = x;
142 	  }
143 	else
144 	  env->result[env->order_pos++] = x;
145       }
146       while (v != x);
147       if (env->reduce)
148 	env->result[env->order_pos++] = v;
149     }
150 }
151 
152 /* Topsort the call graph by caller relation.  Put the result in ORDER.
153 
154    The REDUCE flag is true if you want the cycles reduced to single nodes.
155    You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
156    call graph nodes in a reduced node.
157 
158    Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
159    IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
160    for the topological sort.   */
161 
162 int
ipa_reduced_postorder(struct cgraph_node ** order,bool reduce,bool (* ignore_edge)(struct cgraph_edge *))163 ipa_reduced_postorder (struct cgraph_node **order,
164 		       bool reduce,
165 		       bool (*ignore_edge) (struct cgraph_edge *))
166 {
167   struct cgraph_node *node;
168   struct searchc_env env;
169   splay_tree_node result;
170   env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
171   env.stack_size = 0;
172   env.result = order;
173   env.order_pos = 0;
174   env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
175   env.count = 1;
176   env.reduce = reduce;
177 
178   FOR_EACH_DEFINED_FUNCTION (node)
179     {
180       enum availability avail = node->get_availability ();
181 
182       if (avail > AVAIL_INTERPOSABLE
183 	  || avail == AVAIL_INTERPOSABLE)
184 	{
185 	  /* Reuse the info if it is already there.  */
186 	  struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
187 	  if (!info)
188 	    info = XCNEW (struct ipa_dfs_info);
189 	  info->new_node = true;
190 	  info->on_stack = false;
191 	  info->next_cycle = NULL;
192 	  node->aux = info;
193 
194 	  splay_tree_insert (env.nodes_marked_new,
195 			     (splay_tree_key)node->uid,
196 			     (splay_tree_value)node);
197 	}
198       else
199 	node->aux = NULL;
200     }
201   result = splay_tree_min (env.nodes_marked_new);
202   while (result)
203     {
204       node = (struct cgraph_node *)result->value;
205       searchc (&env, node, ignore_edge);
206       result = splay_tree_min (env.nodes_marked_new);
207     }
208   splay_tree_delete (env.nodes_marked_new);
209   free (env.stack);
210 
211   return env.order_pos;
212 }
213 
214 /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
215    graph nodes.  */
216 
217 void
ipa_free_postorder_info(void)218 ipa_free_postorder_info (void)
219 {
220   struct cgraph_node *node;
221   FOR_EACH_DEFINED_FUNCTION (node)
222     {
223       /* Get rid of the aux information.  */
224       if (node->aux)
225 	{
226 	  free (node->aux);
227 	  node->aux = NULL;
228 	}
229     }
230 }
231 
232 /* Get the set of nodes for the cycle in the reduced call graph starting
233    from NODE.  */
234 
235 vec<cgraph_node *>
ipa_get_nodes_in_cycle(struct cgraph_node * node)236 ipa_get_nodes_in_cycle (struct cgraph_node *node)
237 {
238   vec<cgraph_node *> v = vNULL;
239   struct ipa_dfs_info *node_dfs_info;
240   while (node)
241     {
242       v.safe_push (node);
243       node_dfs_info = (struct ipa_dfs_info *) node->aux;
244       node = node_dfs_info->next_cycle;
245     }
246   return v;
247 }
248 
249 /* Return true iff the CS is an edge within a strongly connected component as
250    computed by ipa_reduced_postorder.  */
251 
252 bool
ipa_edge_within_scc(struct cgraph_edge * cs)253 ipa_edge_within_scc (struct cgraph_edge *cs)
254 {
255   struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
256   struct ipa_dfs_info *callee_dfs;
257   struct cgraph_node *callee = cs->callee->function_symbol ();
258 
259   callee_dfs = (struct ipa_dfs_info *) callee->aux;
260   return (caller_dfs
261 	  && callee_dfs
262 	  && caller_dfs->scc_no == callee_dfs->scc_no);
263 }
264 
265 struct postorder_stack
266 {
267   struct cgraph_node *node;
268   struct cgraph_edge *edge;
269   int ref;
270 };
271 
272 /* Fill array order with all nodes with output flag set in the reverse
273    topological order.  Return the number of elements in the array.
274    FIXME: While walking, consider aliases, too.  */
275 
276 int
ipa_reverse_postorder(struct cgraph_node ** order)277 ipa_reverse_postorder (struct cgraph_node **order)
278 {
279   struct cgraph_node *node, *node2;
280   int stack_size = 0;
281   int order_pos = 0;
282   struct cgraph_edge *edge;
283   int pass;
284   struct ipa_ref *ref = NULL;
285 
286   struct postorder_stack *stack =
287     XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
288 
289   /* We have to deal with cycles nicely, so use a depth first traversal
290      output algorithm.  Ignore the fact that some functions won't need
291      to be output and put them into order as well, so we get dependencies
292      right through inline functions.  */
293   FOR_EACH_FUNCTION (node)
294     node->aux = NULL;
295   for (pass = 0; pass < 2; pass++)
296     FOR_EACH_FUNCTION (node)
297       if (!node->aux
298 	  && (pass
299 	      || (!node->address_taken
300 		  && !node->global.inlined_to
301 		  && !node->alias && !node->thunk.thunk_p
302 		  && !node->only_called_directly_p ())))
303 	{
304 	  stack_size = 0;
305           stack[stack_size].node = node;
306 	  stack[stack_size].edge = node->callers;
307 	  stack[stack_size].ref = 0;
308 	  node->aux = (void *)(size_t)1;
309 	  while (stack_size >= 0)
310 	    {
311 	      while (true)
312 		{
313 		  node2 = NULL;
314 		  while (stack[stack_size].edge && !node2)
315 		    {
316 		      edge = stack[stack_size].edge;
317 		      node2 = edge->caller;
318 		      stack[stack_size].edge = edge->next_caller;
319 		      /* Break possible cycles involving always-inline
320 			 functions by ignoring edges from always-inline
321 			 functions to non-always-inline functions.  */
322 		      if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
323 			  && !DECL_DISREGARD_INLINE_LIMITS
324 			    (edge->callee->function_symbol ()->decl))
325 			node2 = NULL;
326 		    }
327 		  for (; stack[stack_size].node->iterate_referring (
328 						       stack[stack_size].ref,
329 						       ref) && !node2;
330 		       stack[stack_size].ref++)
331 		    {
332 		      if (ref->use == IPA_REF_ALIAS)
333 			node2 = dyn_cast <cgraph_node *> (ref->referring);
334 		    }
335 		  if (!node2)
336 		    break;
337 		  if (!node2->aux)
338 		    {
339 		      stack[++stack_size].node = node2;
340 		      stack[stack_size].edge = node2->callers;
341 		      stack[stack_size].ref = 0;
342 		      node2->aux = (void *)(size_t)1;
343 		    }
344 		}
345 	      order[order_pos++] = stack[stack_size--].node;
346 	    }
347 	}
348   free (stack);
349   FOR_EACH_FUNCTION (node)
350     node->aux = NULL;
351   return order_pos;
352 }
353 
354 
355 
356 /* Given a memory reference T, will return the variable at the bottom
357    of the access.  Unlike get_base_address, this will recurse through
358    INDIRECT_REFS.  */
359 
360 tree
get_base_var(tree t)361 get_base_var (tree t)
362 {
363   while (!SSA_VAR_P (t)
364 	 && (!CONSTANT_CLASS_P (t))
365 	 && TREE_CODE (t) != LABEL_DECL
366 	 && TREE_CODE (t) != FUNCTION_DECL
367 	 && TREE_CODE (t) != CONST_DECL
368 	 && TREE_CODE (t) != CONSTRUCTOR)
369     {
370       t = TREE_OPERAND (t, 0);
371     }
372   return t;
373 }
374 
375 /* Scale function of calls in NODE by ratio ORIG_COUNT/NODE->count.  */
376 
377 void
scale_ipa_profile_for_fn(struct cgraph_node * node,profile_count orig_count)378 scale_ipa_profile_for_fn (struct cgraph_node *node, profile_count orig_count)
379 {
380   profile_count to = node->count;
381   profile_count::adjust_for_ipa_scaling (&to, &orig_count);
382   struct cgraph_edge *e;
383 
384   for (e = node->callees; e; e = e->next_callee)
385     e->count = e->count.apply_scale (to, orig_count);
386   for (e = node->indirect_calls; e; e = e->next_callee)
387     e->count = e->count.apply_scale (to, orig_count);
388 }
389 
390 /* SRC and DST are going to be merged.  Take SRC's profile and merge it into
391    DST so it is not going to be lost.  Possibly destroy SRC's body on the way
392    unless PRESERVE_BODY is set.  */
393 
394 void
ipa_merge_profiles(struct cgraph_node * dst,struct cgraph_node * src,bool preserve_body)395 ipa_merge_profiles (struct cgraph_node *dst,
396 		    struct cgraph_node *src,
397 		    bool preserve_body)
398 {
399   tree oldsrcdecl = src->decl;
400   struct function *srccfun, *dstcfun;
401   bool match = true;
402 
403   if (!src->definition
404       || !dst->definition)
405     return;
406 
407   if (src->frequency < dst->frequency)
408     src->frequency = dst->frequency;
409 
410   /* Time profiles are merged.  */
411   if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
412     dst->tp_first_run = src->tp_first_run;
413 
414   if (src->profile_id && !dst->profile_id)
415     dst->profile_id = src->profile_id;
416 
417   /* Merging zero profile to dst is no-op.  */
418   if (src->count.ipa () == profile_count::zero ())
419     return;
420 
421   /* FIXME when we merge in unknown profile, we ought to set counts as
422      unsafe.  */
423   if (!src->count.initialized_p ()
424       || !(src->count.ipa () == src->count))
425     return;
426   if (symtab->dump_file)
427     {
428       fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
429 	       src->dump_name (), dst->dump_name ());
430     }
431   profile_count orig_count = dst->count;
432 
433   if (dst->count.initialized_p () && dst->count.ipa () == dst->count)
434     dst->count += src->count.ipa ();
435   else
436     dst->count = src->count.ipa ();
437 
438   /* First handle functions with no gimple body.  */
439   if (dst->thunk.thunk_p || dst->alias
440       || src->thunk.thunk_p || src->alias)
441     {
442       scale_ipa_profile_for_fn (dst, orig_count);
443       return;
444     }
445 
446   /* This is ugly.  We need to get both function bodies into memory.
447      If declaration is merged, we need to duplicate it to be able
448      to load body that is being replaced.  This makes symbol table
449      temporarily inconsistent.  */
450   if (src->decl == dst->decl)
451     {
452       struct lto_in_decl_state temp;
453       struct lto_in_decl_state *state;
454 
455       /* We are going to move the decl, we want to remove its file decl data.
456 	 and link these with the new decl. */
457       temp.fn_decl = src->decl;
458       lto_in_decl_state **slot
459 	= src->lto_file_data->function_decl_states->find_slot (&temp,
460 							       NO_INSERT);
461       state = *slot;
462       src->lto_file_data->function_decl_states->clear_slot (slot);
463       gcc_assert (state);
464 
465       /* Duplicate the decl and be sure it does not link into body of DST.  */
466       src->decl = copy_node (src->decl);
467       DECL_STRUCT_FUNCTION (src->decl) = NULL;
468       DECL_ARGUMENTS (src->decl) = NULL;
469       DECL_INITIAL (src->decl) = NULL;
470       DECL_RESULT (src->decl) = NULL;
471 
472       /* Associate the decl state with new declaration, so LTO streamer
473  	 can look it up.  */
474       state->fn_decl = src->decl;
475       slot
476 	= src->lto_file_data->function_decl_states->find_slot (state, INSERT);
477       gcc_assert (!*slot);
478       *slot = state;
479     }
480   src->get_untransformed_body ();
481   dst->get_untransformed_body ();
482   srccfun = DECL_STRUCT_FUNCTION (src->decl);
483   dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
484   if (n_basic_blocks_for_fn (srccfun)
485       != n_basic_blocks_for_fn (dstcfun))
486     {
487       if (symtab->dump_file)
488 	fprintf (symtab->dump_file,
489 		 "Giving up; number of basic block mismatch.\n");
490       match = false;
491     }
492   else if (last_basic_block_for_fn (srccfun)
493 	   != last_basic_block_for_fn (dstcfun))
494     {
495       if (symtab->dump_file)
496 	fprintf (symtab->dump_file,
497 		 "Giving up; last block mismatch.\n");
498       match = false;
499     }
500   else
501     {
502       basic_block srcbb, dstbb;
503 
504       FOR_ALL_BB_FN (srcbb, srccfun)
505 	{
506 	  unsigned int i;
507 
508 	  dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
509 	  if (dstbb == NULL)
510 	    {
511 	      if (symtab->dump_file)
512 		fprintf (symtab->dump_file,
513 			 "No matching block for bb %i.\n",
514 			 srcbb->index);
515 	      match = false;
516 	      break;
517 	    }
518 	  if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
519 	    {
520 	      if (symtab->dump_file)
521 		fprintf (symtab->dump_file,
522 			 "Edge count mistmatch for bb %i.\n",
523 			 srcbb->index);
524 	      match = false;
525 	      break;
526 	    }
527 	  for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
528 	    {
529 	      edge srce = EDGE_SUCC (srcbb, i);
530 	      edge dste = EDGE_SUCC (dstbb, i);
531 	      if (srce->dest->index != dste->dest->index)
532 		{
533 		  if (symtab->dump_file)
534 		    fprintf (symtab->dump_file,
535 			     "Succ edge mistmatch for bb %i.\n",
536 			     srce->dest->index);
537 		  match = false;
538 		  break;
539 		}
540 	    }
541 	}
542     }
543   if (match)
544     {
545       struct cgraph_edge *e, *e2;
546       basic_block srcbb, dstbb;
547 
548       /* TODO: merge also statement histograms.  */
549       FOR_ALL_BB_FN (srcbb, srccfun)
550 	{
551 	  unsigned int i;
552 
553 	  dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
554 
555 	  /* Either sum the profiles if both are IPA and not global0, or
556 	     pick more informative one (that is nonzero IPA if other is
557 	     uninitialized, guessed or global0).   */
558 	  if (!dstbb->count.ipa ().initialized_p ()
559 	      || (dstbb->count.ipa () == profile_count::zero ()
560 		  && (srcbb->count.ipa ().initialized_p ()
561 		      && !(srcbb->count.ipa () == profile_count::zero ()))))
562 	    {
563 	      dstbb->count = srcbb->count;
564 	      for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
565 		{
566 		  edge srce = EDGE_SUCC (srcbb, i);
567 		  edge dste = EDGE_SUCC (dstbb, i);
568 		  if (srce->probability.initialized_p ())
569 		    dste->probability = srce->probability;
570 		}
571 	    }
572 	  else if (srcbb->count.ipa ().initialized_p ()
573 		   && !(srcbb->count.ipa () == profile_count::zero ()))
574 	    {
575 	      for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
576 		{
577 		  edge srce = EDGE_SUCC (srcbb, i);
578 		  edge dste = EDGE_SUCC (dstbb, i);
579 		  dste->probability =
580 		    dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
581 		    + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
582 		}
583 	      dstbb->count += srcbb->count;
584 	    }
585 	}
586       push_cfun (dstcfun);
587       update_max_bb_count ();
588       compute_function_frequency ();
589       pop_cfun ();
590       for (e = dst->callees; e; e = e->next_callee)
591 	{
592 	  if (e->speculative)
593 	    continue;
594 	  e->count = gimple_bb (e->call_stmt)->count;
595 	}
596       for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
597 	   e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
598 	{
599 	  profile_count count = gimple_bb (e->call_stmt)->count;
600 	  /* When call is speculative, we need to re-distribute probabilities
601 	     the same way as they was.  This is not really correct because
602 	     in the other copy the speculation may differ; but probably it
603 	     is not really worth the effort.  */
604 	  if (e->speculative)
605 	    {
606 	      cgraph_edge *direct, *indirect;
607 	      cgraph_edge *direct2 = NULL, *indirect2 = NULL;
608 	      ipa_ref *ref;
609 
610 	      e->speculative_call_info (direct, indirect, ref);
611 	      gcc_assert (e == indirect);
612 	      if (e2 && e2->speculative)
613 	        e2->speculative_call_info (direct2, indirect2, ref);
614 	      if (indirect->count > profile_count::zero ()
615 		  || direct->count > profile_count::zero ())
616 		{
617 		  /* We should mismatch earlier if there is no matching
618 		     indirect edge.  */
619 		  if (!e2)
620 		    {
621 		      if (dump_file)
622 		        fprintf (dump_file,
623 				 "Mismatch in merging indirect edges\n");
624 		    }
625 		  else if (!e2->speculative)
626 		    indirect->count += e2->count;
627 		  else if (e2->speculative)
628 		    {
629 		      if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
630 			  != DECL_ASSEMBLER_NAME (direct->callee->decl))
631 			{
632 			  if (direct2->count >= direct->count)
633 			    {
634 			      direct->redirect_callee (direct2->callee);
635 			      indirect->count += indirect2->count
636 						 + direct->count;
637 			      direct->count = direct2->count;
638 			    }
639 			  else
640 			    indirect->count += indirect2->count + direct2->count;
641 			}
642 		      else
643 			{
644 			   direct->count += direct2->count;
645 			   indirect->count += indirect2->count;
646 			}
647 		    }
648 		}
649 	      else
650 		/* At the moment we should have only profile feedback based
651 		   speculations when merging.  */
652 		gcc_unreachable ();
653 	    }
654 	  else if (e2 && e2->speculative)
655 	    {
656 	      cgraph_edge *direct, *indirect;
657 	      ipa_ref *ref;
658 
659 	      e2->speculative_call_info (direct, indirect, ref);
660 	      e->count = count;
661 	      e->make_speculative (direct->callee, direct->count);
662 	    }
663 	  else
664 	    e->count = count;
665 	}
666       if (!preserve_body)
667         src->release_body ();
668       ipa_update_overall_fn_summary (dst);
669     }
670   /* We can't update CFG profile, but we can scale IPA profile. CFG
671      will be scaled according to dst->count after IPA passes.  */
672   else
673     scale_ipa_profile_for_fn (dst, orig_count);
674   src->decl = oldsrcdecl;
675 }
676 
677 /* Return true if call to DEST is known to be self-recusive call withing FUNC.   */
678 
679 bool
recursive_call_p(tree func,tree dest)680 recursive_call_p (tree func, tree dest)
681 {
682   struct cgraph_node *dest_node = cgraph_node::get_create (dest);
683   struct cgraph_node *cnode = cgraph_node::get_create (func);
684   ipa_ref *alias;
685   enum availability avail;
686 
687   gcc_assert (!cnode->alias);
688   if (cnode != dest_node->ultimate_alias_target (&avail))
689     return false;
690   if (avail >= AVAIL_AVAILABLE)
691     return true;
692   if (!dest_node->semantically_equivalent_p (cnode))
693     return false;
694   /* If there is only one way to call the fuction or we know all of them
695      are semantically equivalent, we still can consider call recursive.  */
696   FOR_EACH_ALIAS (cnode, alias)
697     if (!dest_node->semantically_equivalent_p (alias->referring))
698       return false;
699   return true;
700 }
701