1 /* Inlining decision heuristics.
2    Copyright (C) 2003-2013 Free Software Foundation, Inc.
3    Contributed by Jan Hubicka
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /*  Inlining decision heuristics
22 
23     The implementation of inliner is organized as follows:
24 
25     inlining heuristics limits
26 
27       can_inline_edge_p allow to check that particular inlining is allowed
28       by the limits specified by user (allowed function growth, growth and so
29       on).
30 
31       Functions are inlined when it is obvious the result is profitable (such
32       as functions called once or when inlining reduce code size).
33       In addition to that we perform inlining of small functions and recursive
34       inlining.
35 
36     inlining heuristics
37 
38        The inliner itself is split into two passes:
39 
40        pass_early_inlining
41 
42 	 Simple local inlining pass inlining callees into current function.
43 	 This pass makes no use of whole unit analysis and thus it can do only
44 	 very simple decisions based on local properties.
45 
46 	 The strength of the pass is that it is run in topological order
47 	 (reverse postorder) on the callgraph. Functions are converted into SSA
48 	 form just before this pass and optimized subsequently. As a result, the
49 	 callees of the function seen by the early inliner was already optimized
50 	 and results of early inlining adds a lot of optimization opportunities
51 	 for the local optimization.
52 
53 	 The pass handle the obvious inlining decisions within the compilation
54 	 unit - inlining auto inline functions, inlining for size and
55 	 flattening.
56 
57 	 main strength of the pass is the ability to eliminate abstraction
58 	 penalty in C++ code (via combination of inlining and early
59 	 optimization) and thus improve quality of analysis done by real IPA
60 	 optimizers.
61 
62 	 Because of lack of whole unit knowledge, the pass can not really make
63 	 good code size/performance tradeoffs.  It however does very simple
64 	 speculative inlining allowing code size to grow by
65 	 EARLY_INLINING_INSNS when callee is leaf function.  In this case the
66 	 optimizations performed later are very likely to eliminate the cost.
67 
68        pass_ipa_inline
69 
70 	 This is the real inliner able to handle inlining with whole program
71 	 knowledge. It performs following steps:
72 
73 	 1) inlining of small functions.  This is implemented by greedy
74 	 algorithm ordering all inlinable cgraph edges by their badness and
75 	 inlining them in this order as long as inline limits allows doing so.
76 
77 	 This heuristics is not very good on inlining recursive calls. Recursive
78 	 calls can be inlined with results similar to loop unrolling. To do so,
79 	 special purpose recursive inliner is executed on function when
80 	 recursive edge is met as viable candidate.
81 
82 	 2) Unreachable functions are removed from callgraph.  Inlining leads
83 	 to devirtualization and other modification of callgraph so functions
84 	 may become unreachable during the process. Also functions declared as
85 	 extern inline or virtual functions are removed, since after inlining
86 	 we no longer need the offline bodies.
87 
88 	 3) Functions called once and not exported from the unit are inlined.
89 	 This should almost always lead to reduction of code size by eliminating
90 	 the need for offline copy of the function.  */
91 
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "tm.h"
96 #include "tree.h"
97 #include "tree-inline.h"
98 #include "langhooks.h"
99 #include "flags.h"
100 #include "cgraph.h"
101 #include "diagnostic.h"
102 #include "gimple-pretty-print.h"
103 #include "params.h"
104 #include "fibheap.h"
105 #include "intl.h"
106 #include "tree-pass.h"
107 #include "coverage.h"
108 #include "ggc.h"
109 #include "rtl.h"
110 #include "tree-flow.h"
111 #include "ipa-prop.h"
112 #include "except.h"
113 #include "target.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
116 
117 /* Statistics we collect about inlining algorithm.  */
118 static int overall_size;
119 static gcov_type max_count;
120 
121 /* Return false when inlining edge E would lead to violating
122    limits on function unit growth or stack usage growth.
123 
124    The relative function body growth limit is present generally
125    to avoid problems with non-linear behavior of the compiler.
126    To allow inlining huge functions into tiny wrapper, the limit
127    is always based on the bigger of the two functions considered.
128 
129    For stack growth limits we always base the growth in stack usage
130    of the callers.  We want to prevent applications from segfaulting
131    on stack overflow when functions with huge stack frames gets
132    inlined. */
133 
134 static bool
caller_growth_limits(struct cgraph_edge * e)135 caller_growth_limits (struct cgraph_edge *e)
136 {
137   struct cgraph_node *to = e->caller;
138   struct cgraph_node *what = cgraph_function_or_thunk_node (e->callee, NULL);
139   int newsize;
140   int limit = 0;
141   HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
142   struct inline_summary *info, *what_info, *outer_info = inline_summary (to);
143 
144   /* Look for function e->caller is inlined to.  While doing
145      so work out the largest function body on the way.  As
146      described above, we want to base our function growth
147      limits based on that.  Not on the self size of the
148      outer function, not on the self size of inline code
149      we immediately inline to.  This is the most relaxed
150      interpretation of the rule "do not grow large functions
151      too much in order to prevent compiler from exploding".  */
152   while (true)
153     {
154       info = inline_summary (to);
155       if (limit < info->self_size)
156 	limit = info->self_size;
157       if (stack_size_limit < info->estimated_self_stack_size)
158 	stack_size_limit = info->estimated_self_stack_size;
159       if (to->global.inlined_to)
160         to = to->callers->caller;
161       else
162 	break;
163     }
164 
165   what_info = inline_summary (what);
166 
167   if (limit < what_info->self_size)
168     limit = what_info->self_size;
169 
170   limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
171 
172   /* Check the size after inlining against the function limits.  But allow
173      the function to shrink if it went over the limits by forced inlining.  */
174   newsize = estimate_size_after_inlining (to, e);
175   if (newsize >= info->size
176       && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
177       && newsize > limit)
178     {
179       e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
180       return false;
181     }
182 
183   if (!what_info->estimated_stack_size)
184     return true;
185 
186   /* FIXME: Stack size limit often prevents inlining in Fortran programs
187      due to large i/o datastructures used by the Fortran front-end.
188      We ought to ignore this limit when we know that the edge is executed
189      on every invocation of the caller (i.e. its call statement dominates
190      exit block).  We do not track this information, yet.  */
191   stack_size_limit += ((gcov_type)stack_size_limit
192 		       * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
193 
194   inlined_stack = (outer_info->stack_frame_offset
195 		   + outer_info->estimated_self_stack_size
196 		   + what_info->estimated_stack_size);
197   /* Check new stack consumption with stack consumption at the place
198      stack is used.  */
199   if (inlined_stack > stack_size_limit
200       /* If function already has large stack usage from sibling
201 	 inline call, we can inline, too.
202 	 This bit overoptimistically assume that we are good at stack
203 	 packing.  */
204       && inlined_stack > info->estimated_stack_size
205       && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
206     {
207       e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
208       return false;
209     }
210   return true;
211 }
212 
213 /* Dump info about why inlining has failed.  */
214 
215 static void
report_inline_failed_reason(struct cgraph_edge * e)216 report_inline_failed_reason (struct cgraph_edge *e)
217 {
218   if (dump_file)
219     {
220       fprintf (dump_file, "  not inlinable: %s/%i -> %s/%i, %s\n",
221 	       xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
222 	       xstrdup (cgraph_node_name (e->callee)), e->callee->uid,
223 	       cgraph_inline_failed_string (e->inline_failed));
224     }
225 }
226 
227 /* Decide if we can inline the edge and possibly update
228    inline_failed reason.
229    We check whether inlining is possible at all and whether
230    caller growth limits allow doing so.
231 
232    if REPORT is true, output reason to the dump file.  */
233 
234 static bool
can_inline_edge_p(struct cgraph_edge * e,bool report)235 can_inline_edge_p (struct cgraph_edge *e, bool report)
236 {
237   bool inlinable = true;
238   enum availability avail;
239   struct cgraph_node *callee
240     = cgraph_function_or_thunk_node (e->callee, &avail);
241   tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (e->caller->symbol.decl);
242   tree callee_tree
243     = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->symbol.decl) : NULL;
244   struct function *caller_cfun = DECL_STRUCT_FUNCTION (e->caller->symbol.decl);
245   struct function *callee_cfun
246     = callee ? DECL_STRUCT_FUNCTION (callee->symbol.decl) : NULL;
247 
248   if (!caller_cfun && e->caller->clone_of)
249     caller_cfun = DECL_STRUCT_FUNCTION (e->caller->clone_of->symbol.decl);
250 
251   if (!callee_cfun && callee && callee->clone_of)
252     callee_cfun = DECL_STRUCT_FUNCTION (callee->clone_of->symbol.decl);
253 
254   gcc_assert (e->inline_failed);
255 
256   if (!callee || !callee->analyzed)
257     {
258       e->inline_failed = CIF_BODY_NOT_AVAILABLE;
259       inlinable = false;
260     }
261   else if (!inline_summary (callee)->inlinable)
262     {
263       e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
264       inlinable = false;
265     }
266   else if (avail <= AVAIL_OVERWRITABLE)
267     {
268       e->inline_failed = CIF_OVERWRITABLE;
269       return false;
270     }
271   else if (e->call_stmt_cannot_inline_p)
272     {
273       e->inline_failed = CIF_MISMATCHED_ARGUMENTS;
274       inlinable = false;
275     }
276   /* Don't inline if the functions have different EH personalities.  */
277   else if (DECL_FUNCTION_PERSONALITY (e->caller->symbol.decl)
278 	   && DECL_FUNCTION_PERSONALITY (callee->symbol.decl)
279 	   && (DECL_FUNCTION_PERSONALITY (e->caller->symbol.decl)
280 	       != DECL_FUNCTION_PERSONALITY (callee->symbol.decl)))
281     {
282       e->inline_failed = CIF_EH_PERSONALITY;
283       inlinable = false;
284     }
285   /* TM pure functions should not be inlined into non-TM_pure
286      functions.  */
287   else if (is_tm_pure (callee->symbol.decl)
288 	   && !is_tm_pure (e->caller->symbol.decl))
289     {
290       e->inline_failed = CIF_UNSPECIFIED;
291       inlinable = false;
292     }
293   /* Don't inline if the callee can throw non-call exceptions but the
294      caller cannot.
295      FIXME: this is obviously wrong for LTO where STRUCT_FUNCTION is missing.
296      Move the flag into cgraph node or mirror it in the inline summary.  */
297   else if (callee_cfun && callee_cfun->can_throw_non_call_exceptions
298 	   && !(caller_cfun && caller_cfun->can_throw_non_call_exceptions))
299     {
300       e->inline_failed = CIF_NON_CALL_EXCEPTIONS;
301       inlinable = false;
302     }
303   /* Check compatibility of target optimization options.  */
304   else if (!targetm.target_option.can_inline_p (e->caller->symbol.decl,
305 						callee->symbol.decl))
306     {
307       e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
308       inlinable = false;
309     }
310   /* Check if caller growth allows the inlining.  */
311   else if (!DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl)
312 	   && !lookup_attribute ("flatten",
313 				 DECL_ATTRIBUTES
314 				   (e->caller->global.inlined_to
315 				    ? e->caller->global.inlined_to->symbol.decl
316 				    : e->caller->symbol.decl))
317            && !caller_growth_limits (e))
318     inlinable = false;
319   /* Don't inline a function with a higher optimization level than the
320      caller.  FIXME: this is really just tip of iceberg of handling
321      optimization attribute.  */
322   else if (caller_tree != callee_tree)
323     {
324       struct cl_optimization *caller_opt
325 	= TREE_OPTIMIZATION ((caller_tree)
326 			     ? caller_tree
327 			     : optimization_default_node);
328 
329       struct cl_optimization *callee_opt
330 	= TREE_OPTIMIZATION ((callee_tree)
331 			     ? callee_tree
332 			     : optimization_default_node);
333 
334       if (((caller_opt->x_optimize > callee_opt->x_optimize)
335 	   || (caller_opt->x_optimize_size != callee_opt->x_optimize_size))
336 	  /* gcc.dg/pr43564.c.  Look at forced inline even in -O0.  */
337 	  && !DECL_DISREGARD_INLINE_LIMITS (e->callee->symbol.decl))
338 	{
339 	  e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
340 	  inlinable = false;
341 	}
342     }
343 
344   if (!inlinable && report)
345     report_inline_failed_reason (e);
346   return inlinable;
347 }
348 
349 
350 /* Return true if the edge E is inlinable during early inlining.  */
351 
352 static bool
can_early_inline_edge_p(struct cgraph_edge * e)353 can_early_inline_edge_p (struct cgraph_edge *e)
354 {
355   struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee,
356 							      NULL);
357   /* Early inliner might get called at WPA stage when IPA pass adds new
358      function.  In this case we can not really do any of early inlining
359      because function bodies are missing.  */
360   if (!gimple_has_body_p (callee->symbol.decl))
361     {
362       e->inline_failed = CIF_BODY_NOT_AVAILABLE;
363       return false;
364     }
365   /* In early inliner some of callees may not be in SSA form yet
366      (i.e. the callgraph is cyclic and we did not process
367      the callee by early inliner, yet).  We don't have CIF code for this
368      case; later we will re-do the decision in the real inliner.  */
369   if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->symbol.decl))
370       || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->symbol.decl)))
371     {
372       if (dump_file)
373 	fprintf (dump_file, "  edge not inlinable: not in SSA form\n");
374       return false;
375     }
376   if (!can_inline_edge_p (e, true))
377     return false;
378   return true;
379 }
380 
381 
382 /* Return number of calls in N.  Ignore cheap builtins.  */
383 
384 static int
num_calls(struct cgraph_node * n)385 num_calls (struct cgraph_node *n)
386 {
387   struct cgraph_edge *e;
388   int num = 0;
389 
390   for (e = n->callees; e; e = e->next_callee)
391     if (!is_inexpensive_builtin (e->callee->symbol.decl))
392       num++;
393   return num;
394 }
395 
396 
397 /* Return true if we are interested in inlining small function.  */
398 
399 static bool
want_early_inline_function_p(struct cgraph_edge * e)400 want_early_inline_function_p (struct cgraph_edge *e)
401 {
402   bool want_inline = true;
403   struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
404 
405   if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
406     ;
407   else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
408 	   && !flag_inline_small_functions)
409     {
410       e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
411       report_inline_failed_reason (e);
412       want_inline = false;
413     }
414   else
415     {
416       int growth = estimate_edge_growth (e);
417       int n;
418 
419       if (growth <= 0)
420 	;
421       else if (!cgraph_maybe_hot_edge_p (e)
422 	       && growth > 0)
423 	{
424 	  if (dump_file)
425 	    fprintf (dump_file, "  will not early inline: %s/%i->%s/%i, "
426 		     "call is cold and code would grow by %i\n",
427 		     xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
428 		     xstrdup (cgraph_node_name (callee)), callee->uid,
429 		     growth);
430 	  want_inline = false;
431 	}
432       else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
433 	{
434 	  if (dump_file)
435 	    fprintf (dump_file, "  will not early inline: %s/%i->%s/%i, "
436 		     "growth %i exceeds --param early-inlining-insns\n",
437 		     xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
438 		     xstrdup (cgraph_node_name (callee)), callee->uid,
439 		     growth);
440 	  want_inline = false;
441 	}
442       else if ((n = num_calls (callee)) != 0
443 	       && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
444 	{
445 	  if (dump_file)
446 	    fprintf (dump_file, "  will not early inline: %s/%i->%s/%i, "
447 		     "growth %i exceeds --param early-inlining-insns "
448 		     "divided by number of calls\n",
449 		     xstrdup (cgraph_node_name (e->caller)), e->caller->uid,
450 		     xstrdup (cgraph_node_name (callee)), callee->uid,
451 		     growth);
452 	  want_inline = false;
453 	}
454     }
455   return want_inline;
456 }
457 
458 /* Compute time of the edge->caller + edge->callee execution when inlining
459    does not happen.  */
460 
461 inline gcov_type
compute_uninlined_call_time(struct inline_summary * callee_info,struct cgraph_edge * edge)462 compute_uninlined_call_time (struct inline_summary *callee_info,
463 			     struct cgraph_edge *edge)
464 {
465   gcov_type uninlined_call_time =
466     RDIV ((gcov_type)callee_info->time * MAX (edge->frequency, 1),
467 	  CGRAPH_FREQ_BASE);
468   gcov_type caller_time = inline_summary (edge->caller->global.inlined_to
469 				          ? edge->caller->global.inlined_to
470 				          : edge->caller)->time;
471   return uninlined_call_time + caller_time;
472 }
473 
474 /* Same as compute_uinlined_call_time but compute time when inlining
475    does happen.  */
476 
477 inline gcov_type
compute_inlined_call_time(struct cgraph_edge * edge,int edge_time)478 compute_inlined_call_time (struct cgraph_edge *edge,
479 			   int edge_time)
480 {
481   gcov_type caller_time = inline_summary (edge->caller->global.inlined_to
482 					  ? edge->caller->global.inlined_to
483 					  : edge->caller)->time;
484   gcov_type time = (caller_time
485 		    + RDIV (((gcov_type) edge_time
486 			     - inline_edge_summary (edge)->call_stmt_time)
487 		    * MAX (edge->frequency, 1), CGRAPH_FREQ_BASE));
488   /* Possible one roundoff error, but watch for overflows.  */
489   gcc_checking_assert (time >= INT_MIN / 2);
490   if (time < 0)
491     time = 0;
492   return time;
493 }
494 
495 /* Return true if the speedup for inlining E is bigger than
496    PARAM_MAX_INLINE_MIN_SPEEDUP.  */
497 
498 static bool
big_speedup_p(struct cgraph_edge * e)499 big_speedup_p (struct cgraph_edge *e)
500 {
501   gcov_type time = compute_uninlined_call_time (inline_summary (e->callee),
502 					  	e);
503   gcov_type inlined_time = compute_inlined_call_time (e,
504 					              estimate_edge_time (e));
505   if (time - inlined_time
506       > RDIV (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP), 100))
507     return true;
508   return false;
509 }
510 
511 /* Return true if we are interested in inlining small function.
512    When REPORT is true, report reason to dump file.  */
513 
514 static bool
want_inline_small_function_p(struct cgraph_edge * e,bool report)515 want_inline_small_function_p (struct cgraph_edge *e, bool report)
516 {
517   bool want_inline = true;
518   struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
519 
520   if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
521     ;
522   else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
523 	   && !flag_inline_small_functions)
524     {
525       e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
526       want_inline = false;
527     }
528   else
529     {
530       int growth = estimate_edge_growth (e);
531       inline_hints hints = estimate_edge_hints (e);
532       bool big_speedup = big_speedup_p (e);
533 
534       if (growth <= 0)
535 	;
536       /* Apply MAX_INLINE_INSNS_SINGLE limit.  Do not do so when
537 	 hints suggests that inlining given function is very profitable.  */
538       else if (DECL_DECLARED_INLINE_P (callee->symbol.decl)
539 	       && growth >= MAX_INLINE_INSNS_SINGLE
540 	       && !big_speedup
541 	       && !(hints & (INLINE_HINT_indirect_call
542 			     | INLINE_HINT_loop_iterations
543 			     | INLINE_HINT_array_index
544 			     | INLINE_HINT_loop_stride)))
545 	{
546           e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
547 	  want_inline = false;
548 	}
549       /* Before giving up based on fact that caller size will grow, allow
550          functions that are called few times and eliminating the offline
551 	 copy will lead to overall code size reduction.
552 	 Not all of these will be handled by subsequent inlining of functions
553 	 called once: in particular weak functions are not handled or funcitons
554 	 that inline to multiple calls but a lot of bodies is optimized out.
555 	 Finally we want to inline earlier to allow inlining of callbacks.
556 
557 	 This is slightly wrong on aggressive side:  it is entirely possible
558 	 that function is called many times with a context where inlining
559 	 reduces code size and few times with a context where inlining increase
560 	 code size.  Resoluting growth estimate will be negative even if it
561 	 would make more sense to keep offline copy and do not inline into the
562 	 call sites that makes the code size grow.
563 
564 	 When badness orders the calls in a way that code reducing calls come
565 	 first, this situation is not a problem at all: after inlining all
566 	 "good" calls, we will realize that keeping the function around is
567 	 better.  */
568       else if (growth <= MAX_INLINE_INSNS_SINGLE
569 	       /* Unlike for functions called once, we play unsafe with
570 		  COMDATs.  We can allow that since we know functions
571 		  in consideration are small (and thus risk is small) and
572 		  moreover grow estimates already accounts that COMDAT
573 		  functions may or may not disappear when eliminated from
574 		  current unit. With good probability making aggressive
575 		  choice in all units is going to make overall program
576 		  smaller.
577 
578 		  Consequently we ask cgraph_can_remove_if_no_direct_calls_p
579 		  instead of
580 		  cgraph_will_be_removed_from_program_if_no_direct_calls  */
581 	        && !DECL_EXTERNAL (callee->symbol.decl)
582 		&& cgraph_can_remove_if_no_direct_calls_p (callee)
583 		&& estimate_growth (callee) <= 0)
584 	;
585       else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
586 	       && !flag_inline_functions)
587 	{
588           e->inline_failed = CIF_NOT_DECLARED_INLINED;
589 	  want_inline = false;
590 	}
591       /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
592 	 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
593 	 inlining given function is very profitable.  */
594       else if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
595 	       && !big_speedup
596 	       && growth >= ((hints & (INLINE_HINT_indirect_call
597 				       | INLINE_HINT_loop_iterations
598 			               | INLINE_HINT_array_index
599 				       | INLINE_HINT_loop_stride))
600 			     ? MAX (MAX_INLINE_INSNS_AUTO,
601 				    MAX_INLINE_INSNS_SINGLE)
602 			     : MAX_INLINE_INSNS_AUTO))
603 	{
604           e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
605 	  want_inline = false;
606 	}
607       /* If call is cold, do not inline when function body would grow. */
608       else if (!cgraph_maybe_hot_edge_p (e))
609 	{
610           e->inline_failed = CIF_UNLIKELY_CALL;
611 	  want_inline = false;
612 	}
613     }
614   if (!want_inline && report)
615     report_inline_failed_reason (e);
616   return want_inline;
617 }
618 
619 /* EDGE is self recursive edge.
620    We hand two cases - when function A is inlining into itself
621    or when function A is being inlined into another inliner copy of function
622    A within function B.
623 
624    In first case OUTER_NODE points to the toplevel copy of A, while
625    in the second case OUTER_NODE points to the outermost copy of A in B.
626 
627    In both cases we want to be extra selective since
628    inlining the call will just introduce new recursive calls to appear.  */
629 
630 static bool
want_inline_self_recursive_call_p(struct cgraph_edge * edge,struct cgraph_node * outer_node,bool peeling,int depth)631 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
632 				   struct cgraph_node *outer_node,
633 				   bool peeling,
634 				   int depth)
635 {
636   char const *reason = NULL;
637   bool want_inline = true;
638   int caller_freq = CGRAPH_FREQ_BASE;
639   int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
640 
641   if (DECL_DECLARED_INLINE_P (edge->caller->symbol.decl))
642     max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
643 
644   if (!cgraph_maybe_hot_edge_p (edge))
645     {
646       reason = "recursive call is cold";
647       want_inline = false;
648     }
649   else if (max_count && !outer_node->count)
650     {
651       reason = "not executed in profile";
652       want_inline = false;
653     }
654   else if (depth > max_depth)
655     {
656       reason = "--param max-inline-recursive-depth exceeded.";
657       want_inline = false;
658     }
659 
660   if (outer_node->global.inlined_to)
661     caller_freq = outer_node->callers->frequency;
662 
663   if (!want_inline)
664     ;
665   /* Inlining of self recursive function into copy of itself within other function
666      is transformation similar to loop peeling.
667 
668      Peeling is profitable if we can inline enough copies to make probability
669      of actual call to the self recursive function very small.  Be sure that
670      the probability of recursion is small.
671 
672      We ensure that the frequency of recursing is at most 1 - (1/max_depth).
673      This way the expected number of recision is at most max_depth.  */
674   else if (peeling)
675     {
676       int max_prob = CGRAPH_FREQ_BASE - ((CGRAPH_FREQ_BASE + max_depth - 1)
677 					 / max_depth);
678       int i;
679       for (i = 1; i < depth; i++)
680 	max_prob = max_prob * max_prob / CGRAPH_FREQ_BASE;
681       if (max_count
682 	  && (edge->count * CGRAPH_FREQ_BASE / outer_node->count
683 	      >= max_prob))
684 	{
685 	  reason = "profile of recursive call is too large";
686 	  want_inline = false;
687 	}
688       if (!max_count
689 	  && (edge->frequency * CGRAPH_FREQ_BASE / caller_freq
690 	      >= max_prob))
691 	{
692 	  reason = "frequency of recursive call is too large";
693 	  want_inline = false;
694 	}
695     }
696   /* Recursive inlining, i.e. equivalent of unrolling, is profitable if recursion
697      depth is large.  We reduce function call overhead and increase chances that
698      things fit in hardware return predictor.
699 
700      Recursive inlining might however increase cost of stack frame setup
701      actually slowing down functions whose recursion tree is wide rather than
702      deep.
703 
704      Deciding reliably on when to do recursive inlining without profile feedback
705      is tricky.  For now we disable recursive inlining when probability of self
706      recursion is low.
707 
708      Recursive inlining of self recursive call within loop also results in large loop
709      depths that generally optimize badly.  We may want to throttle down inlining
710      in those cases.  In particular this seems to happen in one of libstdc++ rb tree
711      methods.  */
712   else
713     {
714       if (max_count
715 	  && (edge->count * 100 / outer_node->count
716 	      <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
717 	{
718 	  reason = "profile of recursive call is too small";
719 	  want_inline = false;
720 	}
721       else if (!max_count
722 	       && (edge->frequency * 100 / caller_freq
723 	           <= PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY)))
724 	{
725 	  reason = "frequency of recursive call is too small";
726 	  want_inline = false;
727 	}
728     }
729   if (!want_inline && dump_file)
730     fprintf (dump_file, "   not inlining recursively: %s\n", reason);
731   return want_inline;
732 }
733 
734 /* Return true when NODE has caller other than EDGE.
735    Worker for cgraph_for_node_and_aliases.  */
736 
737 static bool
check_caller_edge(struct cgraph_node * node,void * edge)738 check_caller_edge (struct cgraph_node *node, void *edge)
739 {
740   return (node->callers
741           && node->callers != edge);
742 }
743 
744 
745 /* Decide if inlining NODE would reduce unit size by eliminating
746    the offline copy of function.
747    When COLD is true the cold calls are considered, too.  */
748 
749 static bool
want_inline_function_to_all_callers_p(struct cgraph_node * node,bool cold)750 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
751 {
752    struct cgraph_node *function = cgraph_function_or_thunk_node (node, NULL);
753    struct cgraph_edge *e;
754    bool has_hot_call = false;
755 
756    /* Does it have callers?  */
757    if (!node->callers)
758      return false;
759    /* Already inlined?  */
760    if (function->global.inlined_to)
761      return false;
762    if (cgraph_function_or_thunk_node (node, NULL) != node)
763      return false;
764    /* Inlining into all callers would increase size?  */
765    if (estimate_growth (node) > 0)
766      return false;
767    /* Maybe other aliases has more direct calls.  */
768    if (cgraph_for_node_and_aliases (node, check_caller_edge, node->callers, true))
769      return false;
770    /* All inlines must be possible.  */
771    for (e = node->callers; e; e = e->next_caller)
772      {
773        if (!can_inline_edge_p (e, true))
774          return false;
775        if (!has_hot_call && cgraph_maybe_hot_edge_p (e))
776 	 has_hot_call = 1;
777      }
778 
779    if (!cold && !has_hot_call)
780      return false;
781    return true;
782 }
783 
784 #define RELATIVE_TIME_BENEFIT_RANGE (INT_MAX / 64)
785 
786 /* Return relative time improvement for inlining EDGE in range
787    1...RELATIVE_TIME_BENEFIT_RANGE  */
788 
789 static inline int
relative_time_benefit(struct inline_summary * callee_info,struct cgraph_edge * edge,int edge_time)790 relative_time_benefit (struct inline_summary *callee_info,
791 		       struct cgraph_edge *edge,
792 		       int edge_time)
793 {
794   gcov_type relbenefit;
795   gcov_type uninlined_call_time = compute_uninlined_call_time (callee_info, edge);
796   gcov_type inlined_call_time = compute_inlined_call_time (edge, edge_time);
797 
798   /* Inlining into extern inline function is not a win.  */
799   if (DECL_EXTERNAL (edge->caller->global.inlined_to
800 		     ? edge->caller->global.inlined_to->symbol.decl
801 		     : edge->caller->symbol.decl))
802     return 1;
803 
804   /* Watch overflows.  */
805   gcc_checking_assert (uninlined_call_time >= 0);
806   gcc_checking_assert (inlined_call_time >= 0);
807   gcc_checking_assert (uninlined_call_time >= inlined_call_time);
808 
809   /* Compute relative time benefit, i.e. how much the call becomes faster.
810      ??? perhaps computing how much the caller+calle together become faster
811      would lead to more realistic results.  */
812   if (!uninlined_call_time)
813     uninlined_call_time = 1;
814   relbenefit =
815     RDIV (((gcov_type)uninlined_call_time - inlined_call_time) * RELATIVE_TIME_BENEFIT_RANGE,
816 	  uninlined_call_time);
817   relbenefit = MIN (relbenefit, RELATIVE_TIME_BENEFIT_RANGE);
818   gcc_checking_assert (relbenefit >= 0);
819   relbenefit = MAX (relbenefit, 1);
820   return relbenefit;
821 }
822 
823 
824 /* A cost model driving the inlining heuristics in a way so the edges with
825    smallest badness are inlined first.  After each inlining is performed
826    the costs of all caller edges of nodes affected are recomputed so the
827    metrics may accurately depend on values such as number of inlinable callers
828    of the function or function body size.  */
829 
830 static int
edge_badness(struct cgraph_edge * edge,bool dump)831 edge_badness (struct cgraph_edge *edge, bool dump)
832 {
833   gcov_type badness;
834   int growth, edge_time;
835   struct cgraph_node *callee = cgraph_function_or_thunk_node (edge->callee,
836 							      NULL);
837   struct inline_summary *callee_info = inline_summary (callee);
838   inline_hints hints;
839 
840   if (DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
841     return INT_MIN;
842 
843   growth = estimate_edge_growth (edge);
844   edge_time = estimate_edge_time (edge);
845   hints = estimate_edge_hints (edge);
846   gcc_checking_assert (edge_time >= 0);
847   gcc_checking_assert (edge_time <= callee_info->time);
848   gcc_checking_assert (growth <= callee_info->size);
849 
850   if (dump)
851     {
852       fprintf (dump_file, "    Badness calculation for %s/%i -> %s/%i\n",
853 	       xstrdup (cgraph_node_name (edge->caller)),
854 	       edge->caller->uid,
855 	       xstrdup (cgraph_node_name (callee)),
856 	       edge->callee->uid);
857       fprintf (dump_file, "      size growth %i, time %i ",
858 	       growth,
859 	       edge_time);
860       dump_inline_hints (dump_file, hints);
861       if (big_speedup_p (edge))
862 	fprintf (dump_file, " big_speedup");
863       fprintf (dump_file, "\n");
864     }
865 
866   /* Always prefer inlining saving code size.  */
867   if (growth <= 0)
868     {
869       badness = INT_MIN / 2 + growth;
870       if (dump)
871 	fprintf (dump_file, "      %i: Growth %i <= 0\n", (int) badness,
872 		 growth);
873     }
874 
875   /* When profiling is available, compute badness as:
876 
877 	        relative_edge_count * relative_time_benefit
878      goodness = -------------------------------------------
879 		growth_f_caller
880      badness = -goodness
881 
882     The fraction is upside down, because on edge counts and time beneits
883     the bounds are known. Edge growth is essentially unlimited.  */
884 
885   else if (max_count)
886     {
887       int relbenefit = relative_time_benefit (callee_info, edge, edge_time);
888       badness =
889 	((int)
890 	 ((double) edge->count * INT_MIN / 2 / max_count / RELATIVE_TIME_BENEFIT_RANGE) *
891 	 relbenefit) / growth;
892 
893       /* Be sure that insanity of the profile won't lead to increasing counts
894 	 in the scalling and thus to overflow in the computation above.  */
895       gcc_assert (max_count >= edge->count);
896       if (dump)
897 	{
898 	  fprintf (dump_file,
899 		   "      %i (relative %f): profile info. Relative count %f"
900 		   " * Relative benefit %f\n",
901 		   (int) badness, (double) badness / INT_MIN,
902 		   (double) edge->count / max_count,
903 		   relbenefit * 100.0 / RELATIVE_TIME_BENEFIT_RANGE);
904 	}
905     }
906 
907   /* When function local profile is available. Compute badness as:
908 
909                  relative_time_benefit
910      goodness =  ---------------------------------
911 	         growth_of_caller * overall_growth
912 
913      badness = - goodness
914 
915      compensated by the inline hints.
916   */
917   else if (flag_guess_branch_prob)
918     {
919       badness = (relative_time_benefit (callee_info, edge, edge_time)
920 		 * (INT_MIN / 16 / RELATIVE_TIME_BENEFIT_RANGE));
921       badness /= (MIN (65536/2, growth) * MIN (65536/2, MAX (1, callee_info->growth)));
922       gcc_checking_assert (badness <=0 && badness >= INT_MIN / 16);
923       if ((hints & (INLINE_HINT_indirect_call
924 		    | INLINE_HINT_loop_iterations
925 	            | INLINE_HINT_array_index
926 		    | INLINE_HINT_loop_stride))
927 	  || callee_info->growth <= 0)
928 	badness *= 8;
929       if (hints & (INLINE_HINT_same_scc))
930 	badness /= 16;
931       else if (hints & (INLINE_HINT_in_scc))
932 	badness /= 8;
933       else if (hints & (INLINE_HINT_cross_module))
934 	badness /= 2;
935       gcc_checking_assert (badness <= 0 && badness >= INT_MIN / 2);
936       if ((hints & INLINE_HINT_declared_inline) && badness >= INT_MIN / 32)
937 	badness *= 16;
938       if (dump)
939 	{
940 	  fprintf (dump_file,
941 		   "      %i: guessed profile. frequency %f,"
942 		   " benefit %f%%, time w/o inlining %i, time w inlining %i"
943 		   " overall growth %i (current) %i (original)\n",
944 		   (int) badness, (double)edge->frequency / CGRAPH_FREQ_BASE,
945 		   relative_time_benefit (callee_info, edge, edge_time) * 100.0
946 		   / RELATIVE_TIME_BENEFIT_RANGE,
947 		   (int)compute_uninlined_call_time (callee_info, edge),
948 		   (int)compute_inlined_call_time (edge, edge_time),
949 		   estimate_growth (callee),
950 		   callee_info->growth);
951 	}
952     }
953   /* When function local profile is not available or it does not give
954      useful information (ie frequency is zero), base the cost on
955      loop nest and overall size growth, so we optimize for overall number
956      of functions fully inlined in program.  */
957   else
958     {
959       int nest = MIN (inline_edge_summary (edge)->loop_depth, 8);
960       badness = growth * 256;
961 
962       /* Decrease badness if call is nested.  */
963       if (badness > 0)
964 	badness >>= nest;
965       else
966 	{
967 	  badness <<= nest;
968 	}
969       if (dump)
970 	fprintf (dump_file, "      %i: no profile. nest %i\n", (int) badness,
971 		 nest);
972     }
973 
974   /* Ensure that we did not overflow in all the fixed point math above.  */
975   gcc_assert (badness >= INT_MIN);
976   gcc_assert (badness <= INT_MAX - 1);
977   /* Make recursive inlining happen always after other inlining is done.  */
978   if (cgraph_edge_recursive_p (edge))
979     return badness + 1;
980   else
981     return badness;
982 }
983 
984 /* Recompute badness of EDGE and update its key in HEAP if needed.  */
985 static inline void
update_edge_key(fibheap_t heap,struct cgraph_edge * edge)986 update_edge_key (fibheap_t heap, struct cgraph_edge *edge)
987 {
988   int badness = edge_badness (edge, false);
989   if (edge->aux)
990     {
991       fibnode_t n = (fibnode_t) edge->aux;
992       gcc_checking_assert (n->data == edge);
993 
994       /* fibheap_replace_key only decrease the keys.
995 	 When we increase the key we do not update heap
996 	 and instead re-insert the element once it becomes
997 	 a minimum of heap.  */
998       if (badness < n->key)
999 	{
1000 	  if (dump_file && (dump_flags & TDF_DETAILS))
1001 	    {
1002 	      fprintf (dump_file,
1003 		       "  decreasing badness %s/%i -> %s/%i, %i to %i\n",
1004 		       xstrdup (cgraph_node_name (edge->caller)),
1005 		       edge->caller->uid,
1006 		       xstrdup (cgraph_node_name (edge->callee)),
1007 		       edge->callee->uid,
1008 		       (int)n->key,
1009 		       badness);
1010 	    }
1011 	  fibheap_replace_key (heap, n, badness);
1012 	  gcc_checking_assert (n->key == badness);
1013 	}
1014     }
1015   else
1016     {
1017        if (dump_file && (dump_flags & TDF_DETAILS))
1018 	 {
1019 	   fprintf (dump_file,
1020 		    "  enqueuing call %s/%i -> %s/%i, badness %i\n",
1021 		    xstrdup (cgraph_node_name (edge->caller)),
1022 		    edge->caller->uid,
1023 		    xstrdup (cgraph_node_name (edge->callee)),
1024 		    edge->callee->uid,
1025 		    badness);
1026 	 }
1027       edge->aux = fibheap_insert (heap, badness, edge);
1028     }
1029 }
1030 
1031 
1032 /* NODE was inlined.
1033    All caller edges needs to be resetted because
1034    size estimates change. Similarly callees needs reset
1035    because better context may be known.  */
1036 
1037 static void
reset_edge_caches(struct cgraph_node * node)1038 reset_edge_caches (struct cgraph_node *node)
1039 {
1040   struct cgraph_edge *edge;
1041   struct cgraph_edge *e = node->callees;
1042   struct cgraph_node *where = node;
1043   int i;
1044   struct ipa_ref *ref;
1045 
1046   if (where->global.inlined_to)
1047     where = where->global.inlined_to;
1048 
1049   /* WHERE body size has changed, the cached growth is invalid.  */
1050   reset_node_growth_cache (where);
1051 
1052   for (edge = where->callers; edge; edge = edge->next_caller)
1053     if (edge->inline_failed)
1054       reset_edge_growth_cache (edge);
1055   for (i = 0; ipa_ref_list_referring_iterate (&where->symbol.ref_list,
1056 					      i, ref); i++)
1057     if (ref->use == IPA_REF_ALIAS)
1058       reset_edge_caches (ipa_ref_referring_node (ref));
1059 
1060   if (!e)
1061     return;
1062 
1063   while (true)
1064     if (!e->inline_failed && e->callee->callees)
1065       e = e->callee->callees;
1066     else
1067       {
1068 	if (e->inline_failed)
1069 	  reset_edge_growth_cache (e);
1070 	if (e->next_callee)
1071 	  e = e->next_callee;
1072 	else
1073 	  {
1074 	    do
1075 	      {
1076 		if (e->caller == node)
1077 		  return;
1078 		e = e->caller->callers;
1079 	      }
1080 	    while (!e->next_callee);
1081 	    e = e->next_callee;
1082 	  }
1083       }
1084 }
1085 
1086 /* Recompute HEAP nodes for each of caller of NODE.
1087    UPDATED_NODES track nodes we already visited, to avoid redundant work.
1088    When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1089    it is inlinable. Otherwise check all edges.  */
1090 
1091 static void
update_caller_keys(fibheap_t heap,struct cgraph_node * node,bitmap updated_nodes,struct cgraph_edge * check_inlinablity_for)1092 update_caller_keys (fibheap_t heap, struct cgraph_node *node,
1093 		    bitmap updated_nodes,
1094 		    struct cgraph_edge *check_inlinablity_for)
1095 {
1096   struct cgraph_edge *edge;
1097   int i;
1098   struct ipa_ref *ref;
1099 
1100   if ((!node->alias && !inline_summary (node)->inlinable)
1101       || cgraph_function_body_availability (node) <= AVAIL_OVERWRITABLE
1102       || node->global.inlined_to)
1103     return;
1104   if (!bitmap_set_bit (updated_nodes, node->uid))
1105     return;
1106 
1107   for (i = 0; ipa_ref_list_referring_iterate (&node->symbol.ref_list,
1108 					      i, ref); i++)
1109     if (ref->use == IPA_REF_ALIAS)
1110       {
1111 	struct cgraph_node *alias = ipa_ref_referring_node (ref);
1112         update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1113       }
1114 
1115   for (edge = node->callers; edge; edge = edge->next_caller)
1116     if (edge->inline_failed)
1117       {
1118         if (!check_inlinablity_for
1119 	    || check_inlinablity_for == edge)
1120 	  {
1121 	    if (can_inline_edge_p (edge, false)
1122 		&& want_inline_small_function_p (edge, false))
1123 	      update_edge_key (heap, edge);
1124 	    else if (edge->aux)
1125 	      {
1126 		report_inline_failed_reason (edge);
1127 		fibheap_delete_node (heap, (fibnode_t) edge->aux);
1128 		edge->aux = NULL;
1129 	      }
1130 	  }
1131 	else if (edge->aux)
1132 	  update_edge_key (heap, edge);
1133       }
1134 }
1135 
1136 /* Recompute HEAP nodes for each uninlined call in NODE.
1137    This is used when we know that edge badnesses are going only to increase
1138    (we introduced new call site) and thus all we need is to insert newly
1139    created edges into heap.  */
1140 
1141 static void
update_callee_keys(fibheap_t heap,struct cgraph_node * node,bitmap updated_nodes)1142 update_callee_keys (fibheap_t heap, struct cgraph_node *node,
1143 		    bitmap updated_nodes)
1144 {
1145   struct cgraph_edge *e = node->callees;
1146 
1147   if (!e)
1148     return;
1149   while (true)
1150     if (!e->inline_failed && e->callee->callees)
1151       e = e->callee->callees;
1152     else
1153       {
1154 	enum availability avail;
1155 	struct cgraph_node *callee;
1156 	/* We do not reset callee growth cache here.  Since we added a new call,
1157 	   growth chould have just increased and consequentely badness metric
1158            don't need updating.  */
1159 	if (e->inline_failed
1160 	    && (callee = cgraph_function_or_thunk_node (e->callee, &avail))
1161 	    && inline_summary (callee)->inlinable
1162 	    && cgraph_function_body_availability (callee) >= AVAIL_AVAILABLE
1163 	    && !bitmap_bit_p (updated_nodes, callee->uid))
1164 	  {
1165 	    if (can_inline_edge_p (e, false)
1166 		&& want_inline_small_function_p (e, false))
1167 	      update_edge_key (heap, e);
1168 	    else if (e->aux)
1169 	      {
1170 		report_inline_failed_reason (e);
1171 		fibheap_delete_node (heap, (fibnode_t) e->aux);
1172 		e->aux = NULL;
1173 	      }
1174 	  }
1175 	if (e->next_callee)
1176 	  e = e->next_callee;
1177 	else
1178 	  {
1179 	    do
1180 	      {
1181 		if (e->caller == node)
1182 		  return;
1183 		e = e->caller->callers;
1184 	      }
1185 	    while (!e->next_callee);
1186 	    e = e->next_callee;
1187 	  }
1188       }
1189 }
1190 
1191 /* Enqueue all recursive calls from NODE into priority queue depending on
1192    how likely we want to recursively inline the call.  */
1193 
1194 static void
lookup_recursive_calls(struct cgraph_node * node,struct cgraph_node * where,fibheap_t heap)1195 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1196 			fibheap_t heap)
1197 {
1198   struct cgraph_edge *e;
1199   enum availability avail;
1200 
1201   for (e = where->callees; e; e = e->next_callee)
1202     if (e->callee == node
1203 	|| (cgraph_function_or_thunk_node (e->callee, &avail) == node
1204 	    && avail > AVAIL_OVERWRITABLE))
1205       {
1206 	/* When profile feedback is available, prioritize by expected number
1207 	   of calls.  */
1208         fibheap_insert (heap,
1209 			!max_count ? -e->frequency
1210 		        : -(e->count / ((max_count + (1<<24) - 1) / (1<<24))),
1211 		        e);
1212       }
1213   for (e = where->callees; e; e = e->next_callee)
1214     if (!e->inline_failed)
1215       lookup_recursive_calls (node, e->callee, heap);
1216 }
1217 
1218 /* Decide on recursive inlining: in the case function has recursive calls,
1219    inline until body size reaches given argument.  If any new indirect edges
1220    are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1221    is NULL.  */
1222 
1223 static bool
recursive_inlining(struct cgraph_edge * edge,vec<cgraph_edge_p> * new_edges)1224 recursive_inlining (struct cgraph_edge *edge,
1225 		    vec<cgraph_edge_p> *new_edges)
1226 {
1227   int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1228   fibheap_t heap;
1229   struct cgraph_node *node;
1230   struct cgraph_edge *e;
1231   struct cgraph_node *master_clone = NULL, *next;
1232   int depth = 0;
1233   int n = 0;
1234 
1235   node = edge->caller;
1236   if (node->global.inlined_to)
1237     node = node->global.inlined_to;
1238 
1239   if (DECL_DECLARED_INLINE_P (node->symbol.decl))
1240     limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1241 
1242   /* Make sure that function is small enough to be considered for inlining.  */
1243   if (estimate_size_after_inlining (node, edge)  >= limit)
1244     return false;
1245   heap = fibheap_new ();
1246   lookup_recursive_calls (node, node, heap);
1247   if (fibheap_empty (heap))
1248     {
1249       fibheap_delete (heap);
1250       return false;
1251     }
1252 
1253   if (dump_file)
1254     fprintf (dump_file,
1255 	     "  Performing recursive inlining on %s\n",
1256 	     cgraph_node_name (node));
1257 
1258   /* Do the inlining and update list of recursive call during process.  */
1259   while (!fibheap_empty (heap))
1260     {
1261       struct cgraph_edge *curr
1262 	= (struct cgraph_edge *) fibheap_extract_min (heap);
1263       struct cgraph_node *cnode, *dest = curr->callee;
1264 
1265       if (!can_inline_edge_p (curr, true))
1266 	continue;
1267 
1268       /* MASTER_CLONE is produced in the case we already started modified
1269 	 the function. Be sure to redirect edge to the original body before
1270 	 estimating growths otherwise we will be seeing growths after inlining
1271 	 the already modified body.  */
1272       if (master_clone)
1273 	{
1274           cgraph_redirect_edge_callee (curr, master_clone);
1275           reset_edge_growth_cache (curr);
1276 	}
1277 
1278       if (estimate_size_after_inlining (node, curr) > limit)
1279 	{
1280 	  cgraph_redirect_edge_callee (curr, dest);
1281 	  reset_edge_growth_cache (curr);
1282 	  break;
1283 	}
1284 
1285       depth = 1;
1286       for (cnode = curr->caller;
1287 	   cnode->global.inlined_to; cnode = cnode->callers->caller)
1288 	if (node->symbol.decl
1289 	    == cgraph_function_or_thunk_node (curr->callee, NULL)->symbol.decl)
1290           depth++;
1291 
1292       if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1293 	{
1294 	  cgraph_redirect_edge_callee (curr, dest);
1295 	  reset_edge_growth_cache (curr);
1296 	  continue;
1297 	}
1298 
1299       if (dump_file)
1300 	{
1301 	  fprintf (dump_file,
1302 		   "   Inlining call of depth %i", depth);
1303 	  if (node->count)
1304 	    {
1305 	      fprintf (dump_file, " called approx. %.2f times per call",
1306 		       (double)curr->count / node->count);
1307 	    }
1308 	  fprintf (dump_file, "\n");
1309 	}
1310       if (!master_clone)
1311 	{
1312 	  /* We need original clone to copy around.  */
1313 	  master_clone = cgraph_clone_node (node, node->symbol.decl,
1314 					    node->count, CGRAPH_FREQ_BASE,
1315 					    false, vNULL, true);
1316 	  for (e = master_clone->callees; e; e = e->next_callee)
1317 	    if (!e->inline_failed)
1318 	      clone_inlined_nodes (e, true, false, NULL);
1319           cgraph_redirect_edge_callee (curr, master_clone);
1320           reset_edge_growth_cache (curr);
1321 	}
1322 
1323       inline_call (curr, false, new_edges, &overall_size, true);
1324       lookup_recursive_calls (node, curr->callee, heap);
1325       n++;
1326     }
1327 
1328   if (!fibheap_empty (heap) && dump_file)
1329     fprintf (dump_file, "    Recursive inlining growth limit met.\n");
1330   fibheap_delete (heap);
1331 
1332   if (!master_clone)
1333     return false;
1334 
1335   if (dump_file)
1336     fprintf (dump_file,
1337 	     "\n   Inlined %i times, "
1338 	     "body grown from size %i to %i, time %i to %i\n", n,
1339 	     inline_summary (master_clone)->size, inline_summary (node)->size,
1340 	     inline_summary (master_clone)->time, inline_summary (node)->time);
1341 
1342   /* Remove master clone we used for inlining.  We rely that clones inlined
1343      into master clone gets queued just before master clone so we don't
1344      need recursion.  */
1345   for (node = cgraph_first_function (); node != master_clone;
1346        node = next)
1347     {
1348       next = cgraph_next_function (node);
1349       if (node->global.inlined_to == master_clone)
1350 	cgraph_remove_node (node);
1351     }
1352   cgraph_remove_node (master_clone);
1353   return true;
1354 }
1355 
1356 
1357 /* Given whole compilation unit estimate of INSNS, compute how large we can
1358    allow the unit to grow.  */
1359 
1360 static int
compute_max_insns(int insns)1361 compute_max_insns (int insns)
1362 {
1363   int max_insns = insns;
1364   if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1365     max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1366 
1367   return ((HOST_WIDEST_INT) max_insns
1368 	  * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1369 }
1370 
1371 
1372 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP.  */
1373 
1374 static void
add_new_edges_to_heap(fibheap_t heap,vec<cgraph_edge_p> new_edges)1375 add_new_edges_to_heap (fibheap_t heap, vec<cgraph_edge_p> new_edges)
1376 {
1377   while (new_edges.length () > 0)
1378     {
1379       struct cgraph_edge *edge = new_edges.pop ();
1380 
1381       gcc_assert (!edge->aux);
1382       if (edge->inline_failed
1383 	  && can_inline_edge_p (edge, true)
1384 	  && want_inline_small_function_p (edge, true))
1385         edge->aux = fibheap_insert (heap, edge_badness (edge, false), edge);
1386     }
1387 }
1388 
1389 
1390 /* We use greedy algorithm for inlining of small functions:
1391    All inline candidates are put into prioritized heap ordered in
1392    increasing badness.
1393 
1394    The inlining of small functions is bounded by unit growth parameters.  */
1395 
1396 static void
inline_small_functions(void)1397 inline_small_functions (void)
1398 {
1399   struct cgraph_node *node;
1400   struct cgraph_edge *edge;
1401   fibheap_t edge_heap = fibheap_new ();
1402   bitmap updated_nodes = BITMAP_ALLOC (NULL);
1403   int min_size, max_size;
1404   vec<cgraph_edge_p> new_indirect_edges = vNULL;
1405   int initial_size = 0;
1406   struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1407 
1408   if (flag_indirect_inlining)
1409     new_indirect_edges.create (8);
1410 
1411   /* Compute overall unit size and other global parameters used by badness
1412      metrics.  */
1413 
1414   max_count = 0;
1415   ipa_reduced_postorder (order, true, true, NULL);
1416   free (order);
1417 
1418   FOR_EACH_DEFINED_FUNCTION (node)
1419     if (!node->global.inlined_to)
1420       {
1421 	if (cgraph_function_with_gimple_body_p (node)
1422 	    || node->thunk.thunk_p)
1423 	  {
1424 	    struct inline_summary *info = inline_summary (node);
1425 	    struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->symbol.aux;
1426 
1427 	    if (!DECL_EXTERNAL (node->symbol.decl))
1428 	      initial_size += info->size;
1429 	    info->growth = estimate_growth (node);
1430 	    if (dfs && dfs->next_cycle)
1431 	      {
1432 		struct cgraph_node *n2;
1433 		int id = dfs->scc_no + 1;
1434 		for (n2 = node; n2;
1435 		     n2 = ((struct ipa_dfs_info *) node->symbol.aux)->next_cycle)
1436 		  {
1437 		    struct inline_summary *info2 = inline_summary (n2);
1438 		    if (info2->scc_no)
1439 		      break;
1440 		    info2->scc_no = id;
1441 		  }
1442 	      }
1443 	  }
1444 
1445 	for (edge = node->callers; edge; edge = edge->next_caller)
1446 	  if (max_count < edge->count)
1447 	    max_count = edge->count;
1448       }
1449   ipa_free_postorder_info ();
1450   initialize_growth_caches ();
1451 
1452   if (dump_file)
1453     fprintf (dump_file,
1454 	     "\nDeciding on inlining of small functions.  Starting with size %i.\n",
1455 	     initial_size);
1456 
1457   overall_size = initial_size;
1458   max_size = compute_max_insns (overall_size);
1459   min_size = overall_size;
1460 
1461   /* Populate the heeap with all edges we might inline.  */
1462 
1463   FOR_EACH_DEFINED_FUNCTION (node)
1464     if (!node->global.inlined_to)
1465       {
1466 	if (dump_file)
1467 	  fprintf (dump_file, "Enqueueing calls of %s/%i.\n",
1468 		   cgraph_node_name (node), node->uid);
1469 
1470 	for (edge = node->callers; edge; edge = edge->next_caller)
1471 	  if (edge->inline_failed
1472 	      && can_inline_edge_p (edge, true)
1473 	      && want_inline_small_function_p (edge, true)
1474 	      && edge->inline_failed)
1475 	    {
1476 	      gcc_assert (!edge->aux);
1477 	      update_edge_key (edge_heap, edge);
1478 	    }
1479       }
1480 
1481   gcc_assert (in_lto_p
1482 	      || !max_count
1483 	      || (profile_info && flag_branch_probabilities));
1484 
1485   while (!fibheap_empty (edge_heap))
1486     {
1487       int old_size = overall_size;
1488       struct cgraph_node *where, *callee;
1489       int badness = fibheap_min_key (edge_heap);
1490       int current_badness;
1491       int cached_badness;
1492       int growth;
1493 
1494       edge = (struct cgraph_edge *) fibheap_extract_min (edge_heap);
1495       gcc_assert (edge->aux);
1496       edge->aux = NULL;
1497       if (!edge->inline_failed)
1498 	continue;
1499 
1500       /* Be sure that caches are maintained consistent.
1501          We can not make this ENABLE_CHECKING only because it cause different
1502          updates of the fibheap queue.  */
1503       cached_badness = edge_badness (edge, false);
1504       reset_edge_growth_cache (edge);
1505       reset_node_growth_cache (edge->callee);
1506 
1507       /* When updating the edge costs, we only decrease badness in the keys.
1508 	 Increases of badness are handled lazilly; when we see key with out
1509 	 of date value on it, we re-insert it now.  */
1510       current_badness = edge_badness (edge, false);
1511       gcc_assert (cached_badness == current_badness);
1512       gcc_assert (current_badness >= badness);
1513       if (current_badness != badness)
1514 	{
1515 	  edge->aux = fibheap_insert (edge_heap, current_badness, edge);
1516 	  continue;
1517 	}
1518 
1519       if (!can_inline_edge_p (edge, true))
1520 	continue;
1521 
1522       callee = cgraph_function_or_thunk_node (edge->callee, NULL);
1523       growth = estimate_edge_growth (edge);
1524       if (dump_file)
1525 	{
1526 	  fprintf (dump_file,
1527 		   "\nConsidering %s with %i size\n",
1528 		   cgraph_node_name (callee),
1529 		   inline_summary (callee)->size);
1530 	  fprintf (dump_file,
1531 		   " to be inlined into %s in %s:%i\n"
1532 		   " Estimated growth after inlined into all is %+i insns.\n"
1533 		   " Estimated badness is %i, frequency %.2f.\n",
1534 		   cgraph_node_name (edge->caller),
1535 		   flag_wpa ? "unknown"
1536 		   : gimple_filename ((const_gimple) edge->call_stmt),
1537 		   flag_wpa ? -1
1538 		   : gimple_lineno ((const_gimple) edge->call_stmt),
1539 		   estimate_growth (callee),
1540 		   badness,
1541 		   edge->frequency / (double)CGRAPH_FREQ_BASE);
1542 	  if (edge->count)
1543 	    fprintf (dump_file," Called " HOST_WIDEST_INT_PRINT_DEC "x\n",
1544 		     edge->count);
1545 	  if (dump_flags & TDF_DETAILS)
1546 	    edge_badness (edge, true);
1547 	}
1548 
1549       if (overall_size + growth > max_size
1550 	  && !DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
1551 	{
1552 	  edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1553 	  report_inline_failed_reason (edge);
1554 	  continue;
1555 	}
1556 
1557       if (!want_inline_small_function_p (edge, true))
1558 	continue;
1559 
1560       /* Heuristics for inlining small functions works poorly for
1561 	 recursive calls where we do efect similar to loop unrolling.
1562 	 When inliing such edge seems profitable, leave decision on
1563 	 specific inliner.  */
1564       if (cgraph_edge_recursive_p (edge))
1565 	{
1566 	  where = edge->caller;
1567 	  if (where->global.inlined_to)
1568 	    where = where->global.inlined_to;
1569 	  if (!recursive_inlining (edge,
1570 				   flag_indirect_inlining
1571 				   ? &new_indirect_edges : NULL))
1572 	    {
1573 	      edge->inline_failed = CIF_RECURSIVE_INLINING;
1574 	      continue;
1575 	    }
1576 	  reset_edge_caches (where);
1577 	  /* Recursive inliner inlines all recursive calls of the function
1578 	     at once. Consequently we need to update all callee keys.  */
1579 	  if (flag_indirect_inlining)
1580 	    add_new_edges_to_heap (edge_heap, new_indirect_edges);
1581           update_callee_keys (edge_heap, where, updated_nodes);
1582 	}
1583       else
1584 	{
1585 	  struct cgraph_node *outer_node = NULL;
1586 	  int depth = 0;
1587 
1588 	  /* Consider the case where self recursive function A is inlined into B.
1589 	     This is desired optimization in some cases, since it leads to effect
1590 	     similar of loop peeling and we might completely optimize out the
1591 	     recursive call.  However we must be extra selective.  */
1592 
1593 	  where = edge->caller;
1594 	  while (where->global.inlined_to)
1595 	    {
1596 	      if (where->symbol.decl == callee->symbol.decl)
1597 		outer_node = where, depth++;
1598 	      where = where->callers->caller;
1599 	    }
1600 	  if (outer_node
1601 	      && !want_inline_self_recursive_call_p (edge, outer_node,
1602 						     true, depth))
1603 	    {
1604 	      edge->inline_failed
1605 		= (DECL_DISREGARD_INLINE_LIMITS (edge->callee->symbol.decl)
1606 		   ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
1607 	      continue;
1608 	    }
1609 	  else if (depth && dump_file)
1610 	    fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
1611 
1612 	  gcc_checking_assert (!callee->global.inlined_to);
1613 	  inline_call (edge, true, &new_indirect_edges, &overall_size, true);
1614 	  if (flag_indirect_inlining)
1615 	    add_new_edges_to_heap (edge_heap, new_indirect_edges);
1616 
1617 	  reset_edge_caches (edge->callee);
1618           reset_node_growth_cache (callee);
1619 
1620 	  update_callee_keys (edge_heap, where, updated_nodes);
1621 	}
1622       where = edge->caller;
1623       if (where->global.inlined_to)
1624 	where = where->global.inlined_to;
1625 
1626       /* Our profitability metric can depend on local properties
1627 	 such as number of inlinable calls and size of the function body.
1628 	 After inlining these properties might change for the function we
1629 	 inlined into (since it's body size changed) and for the functions
1630 	 called by function we inlined (since number of it inlinable callers
1631 	 might change).  */
1632       update_caller_keys (edge_heap, where, updated_nodes, NULL);
1633       bitmap_clear (updated_nodes);
1634 
1635       if (dump_file)
1636 	{
1637 	  fprintf (dump_file,
1638 		   " Inlined into %s which now has time %i and size %i,"
1639 		   "net change of %+i.\n",
1640 		   cgraph_node_name (edge->caller),
1641 		   inline_summary (edge->caller)->time,
1642 		   inline_summary (edge->caller)->size,
1643 		   overall_size - old_size);
1644 	}
1645       if (min_size > overall_size)
1646 	{
1647 	  min_size = overall_size;
1648 	  max_size = compute_max_insns (min_size);
1649 
1650 	  if (dump_file)
1651 	    fprintf (dump_file, "New minimal size reached: %i\n", min_size);
1652 	}
1653     }
1654 
1655   free_growth_caches ();
1656   new_indirect_edges.release ();
1657   fibheap_delete (edge_heap);
1658   if (dump_file)
1659     fprintf (dump_file,
1660 	     "Unit growth for small function inlining: %i->%i (%i%%)\n",
1661 	     initial_size, overall_size,
1662 	     initial_size ? overall_size * 100 / (initial_size) - 100: 0);
1663   BITMAP_FREE (updated_nodes);
1664 }
1665 
1666 /* Flatten NODE.  Performed both during early inlining and
1667    at IPA inlining time.  */
1668 
1669 static void
flatten_function(struct cgraph_node * node,bool early)1670 flatten_function (struct cgraph_node *node, bool early)
1671 {
1672   struct cgraph_edge *e;
1673 
1674   /* We shouldn't be called recursively when we are being processed.  */
1675   gcc_assert (node->symbol.aux == NULL);
1676 
1677   node->symbol.aux = (void *) node;
1678 
1679   for (e = node->callees; e; e = e->next_callee)
1680     {
1681       struct cgraph_node *orig_callee;
1682       struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1683 
1684       /* We've hit cycle?  It is time to give up.  */
1685       if (callee->symbol.aux)
1686 	{
1687 	  if (dump_file)
1688 	    fprintf (dump_file,
1689 		     "Not inlining %s into %s to avoid cycle.\n",
1690 		     xstrdup (cgraph_node_name (callee)),
1691 		     xstrdup (cgraph_node_name (e->caller)));
1692 	  e->inline_failed = CIF_RECURSIVE_INLINING;
1693 	  continue;
1694 	}
1695 
1696       /* When the edge is already inlined, we just need to recurse into
1697 	 it in order to fully flatten the leaves.  */
1698       if (!e->inline_failed)
1699 	{
1700 	  flatten_function (callee, early);
1701 	  continue;
1702 	}
1703 
1704       /* Flatten attribute needs to be processed during late inlining. For
1705 	 extra code quality we however do flattening during early optimization,
1706 	 too.  */
1707       if (!early
1708 	  ? !can_inline_edge_p (e, true)
1709 	  : !can_early_inline_edge_p (e))
1710 	continue;
1711 
1712       if (cgraph_edge_recursive_p (e))
1713 	{
1714 	  if (dump_file)
1715 	    fprintf (dump_file, "Not inlining: recursive call.\n");
1716 	  continue;
1717 	}
1718 
1719       if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->symbol.decl))
1720 	  != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->symbol.decl)))
1721 	{
1722 	  if (dump_file)
1723 	    fprintf (dump_file, "Not inlining: SSA form does not match.\n");
1724 	  continue;
1725 	}
1726 
1727       /* Inline the edge and flatten the inline clone.  Avoid
1728          recursing through the original node if the node was cloned.  */
1729       if (dump_file)
1730 	fprintf (dump_file, " Inlining %s into %s.\n",
1731 		 xstrdup (cgraph_node_name (callee)),
1732 		 xstrdup (cgraph_node_name (e->caller)));
1733       orig_callee = callee;
1734       inline_call (e, true, NULL, NULL, false);
1735       if (e->callee != orig_callee)
1736 	orig_callee->symbol.aux = (void *) node;
1737       flatten_function (e->callee, early);
1738       if (e->callee != orig_callee)
1739 	orig_callee->symbol.aux = NULL;
1740     }
1741 
1742   node->symbol.aux = NULL;
1743   if (!node->global.inlined_to)
1744     inline_update_overall_summary (node);
1745 }
1746 
1747 /* Decide on the inlining.  We do so in the topological order to avoid
1748    expenses on updating data structures.  */
1749 
1750 static unsigned int
ipa_inline(void)1751 ipa_inline (void)
1752 {
1753   struct cgraph_node *node;
1754   int nnodes;
1755   struct cgraph_node **order =
1756     XCNEWVEC (struct cgraph_node *, cgraph_n_nodes);
1757   int i;
1758 
1759   if (in_lto_p && optimize)
1760     ipa_update_after_lto_read ();
1761 
1762   if (dump_file)
1763     dump_inline_summaries (dump_file);
1764 
1765   nnodes = ipa_reverse_postorder (order);
1766 
1767   FOR_EACH_FUNCTION (node)
1768     node->symbol.aux = 0;
1769 
1770   if (dump_file)
1771     fprintf (dump_file, "\nFlattening functions:\n");
1772 
1773   /* In the first pass handle functions to be flattened.  Do this with
1774      a priority so none of our later choices will make this impossible.  */
1775   for (i = nnodes - 1; i >= 0; i--)
1776     {
1777       node = order[i];
1778 
1779       /* Handle nodes to be flattened.
1780 	 Ideally when processing callees we stop inlining at the
1781 	 entry of cycles, possibly cloning that entry point and
1782 	 try to flatten itself turning it into a self-recursive
1783 	 function.  */
1784       if (lookup_attribute ("flatten",
1785 			    DECL_ATTRIBUTES (node->symbol.decl)) != NULL)
1786 	{
1787 	  if (dump_file)
1788 	    fprintf (dump_file,
1789 		     "Flattening %s\n", cgraph_node_name (node));
1790 	  flatten_function (node, false);
1791 	}
1792     }
1793 
1794   inline_small_functions ();
1795   symtab_remove_unreachable_nodes (false, dump_file);
1796   free (order);
1797 
1798   /* Inline functions with a property that after inlining into all callers the
1799      code size will shrink because the out-of-line copy is eliminated.
1800      We do this regardless on the callee size as long as function growth limits
1801      are met.  */
1802   if (flag_inline_functions_called_once)
1803     {
1804       int cold;
1805       if (dump_file)
1806 	fprintf (dump_file,
1807 		 "\nDeciding on functions to be inlined into all callers:\n");
1808 
1809       /* Inlining one function called once has good chance of preventing
1810 	 inlining other function into the same callee.  Ideally we should
1811 	 work in priority order, but probably inlining hot functions first
1812 	 is good cut without the extra pain of maintaining the queue.
1813 
1814 	 ??? this is not really fitting the bill perfectly: inlining function
1815 	 into callee often leads to better optimization of callee due to
1816 	 increased context for optimization.
1817 	 For example if main() function calls a function that outputs help
1818 	 and then function that does the main optmization, we should inline
1819 	 the second with priority even if both calls are cold by themselves.
1820 
1821 	 We probably want to implement new predicate replacing our use of
1822 	 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
1823 	 to be hot.  */
1824       for (cold = 0; cold <= 1; cold ++)
1825 	{
1826 	  FOR_EACH_DEFINED_FUNCTION (node)
1827 	    {
1828 	      if (want_inline_function_to_all_callers_p (node, cold))
1829 		{
1830 		  int num_calls = 0;
1831 		  struct cgraph_edge *e;
1832 		  for (e = node->callers; e; e = e->next_caller)
1833 		    num_calls++;
1834 		  while (node->callers && !node->global.inlined_to)
1835 		    {
1836 		      struct cgraph_node *caller = node->callers->caller;
1837 
1838 		      if (dump_file)
1839 			{
1840 			  fprintf (dump_file,
1841 				   "\nInlining %s size %i.\n",
1842 				   cgraph_node_name (node),
1843 				   inline_summary (node)->size);
1844 			  fprintf (dump_file,
1845 				   " Called once from %s %i insns.\n",
1846 				   cgraph_node_name (node->callers->caller),
1847 				   inline_summary (node->callers->caller)->size);
1848 			}
1849 
1850 		      inline_call (node->callers, true, NULL, NULL, true);
1851 		      if (dump_file)
1852 			fprintf (dump_file,
1853 				 " Inlined into %s which now has %i size\n",
1854 				 cgraph_node_name (caller),
1855 				 inline_summary (caller)->size);
1856 		      if (!num_calls--)
1857 		        {
1858 			  if (dump_file)
1859 			    fprintf (dump_file, "New calls found; giving up.\n");
1860 			  break;
1861 		        }
1862 		    }
1863 		}
1864 	    }
1865 	}
1866     }
1867 
1868   /* Free ipa-prop structures if they are no longer needed.  */
1869   if (optimize)
1870     ipa_free_all_structures_after_iinln ();
1871 
1872   if (dump_file)
1873     fprintf (dump_file,
1874 	     "\nInlined %i calls, eliminated %i functions\n\n",
1875 	     ncalls_inlined, nfunctions_inlined);
1876 
1877   if (dump_file)
1878     dump_inline_summaries (dump_file);
1879   /* In WPA we use inline summaries for partitioning process.  */
1880   if (!flag_wpa)
1881     inline_free_summary ();
1882   return 0;
1883 }
1884 
1885 /* Inline always-inline function calls in NODE.  */
1886 
1887 static bool
inline_always_inline_functions(struct cgraph_node * node)1888 inline_always_inline_functions (struct cgraph_node *node)
1889 {
1890   struct cgraph_edge *e;
1891   bool inlined = false;
1892 
1893   for (e = node->callees; e; e = e->next_callee)
1894     {
1895       struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1896       if (!DECL_DISREGARD_INLINE_LIMITS (callee->symbol.decl))
1897 	continue;
1898 
1899       if (cgraph_edge_recursive_p (e))
1900 	{
1901 	  if (dump_file)
1902 	    fprintf (dump_file, "  Not inlining recursive call to %s.\n",
1903 		     cgraph_node_name (e->callee));
1904 	  e->inline_failed = CIF_RECURSIVE_INLINING;
1905 	  continue;
1906 	}
1907 
1908       if (!can_early_inline_edge_p (e))
1909 	continue;
1910 
1911       if (dump_file)
1912 	fprintf (dump_file, "  Inlining %s into %s (always_inline).\n",
1913 		 xstrdup (cgraph_node_name (e->callee)),
1914 		 xstrdup (cgraph_node_name (e->caller)));
1915       inline_call (e, true, NULL, NULL, false);
1916       inlined = true;
1917     }
1918   if (inlined)
1919     inline_update_overall_summary (node);
1920 
1921   return inlined;
1922 }
1923 
1924 /* Decide on the inlining.  We do so in the topological order to avoid
1925    expenses on updating data structures.  */
1926 
1927 static bool
early_inline_small_functions(struct cgraph_node * node)1928 early_inline_small_functions (struct cgraph_node *node)
1929 {
1930   struct cgraph_edge *e;
1931   bool inlined = false;
1932 
1933   for (e = node->callees; e; e = e->next_callee)
1934     {
1935       struct cgraph_node *callee = cgraph_function_or_thunk_node (e->callee, NULL);
1936       if (!inline_summary (callee)->inlinable
1937 	  || !e->inline_failed)
1938 	continue;
1939 
1940       /* Do not consider functions not declared inline.  */
1941       if (!DECL_DECLARED_INLINE_P (callee->symbol.decl)
1942 	  && !flag_inline_small_functions
1943 	  && !flag_inline_functions)
1944 	continue;
1945 
1946       if (dump_file)
1947 	fprintf (dump_file, "Considering inline candidate %s.\n",
1948 		 cgraph_node_name (callee));
1949 
1950       if (!can_early_inline_edge_p (e))
1951 	continue;
1952 
1953       if (cgraph_edge_recursive_p (e))
1954 	{
1955 	  if (dump_file)
1956 	    fprintf (dump_file, "  Not inlining: recursive call.\n");
1957 	  continue;
1958 	}
1959 
1960       if (!want_early_inline_function_p (e))
1961 	continue;
1962 
1963       if (dump_file)
1964 	fprintf (dump_file, " Inlining %s into %s.\n",
1965 		 xstrdup (cgraph_node_name (callee)),
1966 		 xstrdup (cgraph_node_name (e->caller)));
1967       inline_call (e, true, NULL, NULL, true);
1968       inlined = true;
1969     }
1970 
1971   return inlined;
1972 }
1973 
1974 /* Do inlining of small functions.  Doing so early helps profiling and other
1975    passes to be somewhat more effective and avoids some code duplication in
1976    later real inlining pass for testcases with very many function calls.  */
1977 static unsigned int
early_inliner(void)1978 early_inliner (void)
1979 {
1980   struct cgraph_node *node = cgraph_get_node (current_function_decl);
1981   struct cgraph_edge *edge;
1982   unsigned int todo = 0;
1983   int iterations = 0;
1984   bool inlined = false;
1985 
1986   if (seen_error ())
1987     return 0;
1988 
1989   /* Do nothing if datastructures for ipa-inliner are already computed.  This
1990      happens when some pass decides to construct new function and
1991      cgraph_add_new_function calls lowering passes and early optimization on
1992      it.  This may confuse ourself when early inliner decide to inline call to
1993      function clone, because function clones don't have parameter list in
1994      ipa-prop matching their signature.  */
1995   if (ipa_node_params_vector.exists ())
1996     return 0;
1997 
1998 #ifdef ENABLE_CHECKING
1999   verify_cgraph_node (node);
2000 #endif
2001 
2002   /* Even when not optimizing or not inlining inline always-inline
2003      functions.  */
2004   inlined = inline_always_inline_functions (node);
2005 
2006   if (!optimize
2007       || flag_no_inline
2008       || !flag_early_inlining
2009       /* Never inline regular functions into always-inline functions
2010 	 during incremental inlining.  This sucks as functions calling
2011 	 always inline functions will get less optimized, but at the
2012 	 same time inlining of functions calling always inline
2013 	 function into an always inline function might introduce
2014 	 cycles of edges to be always inlined in the callgraph.
2015 
2016 	 We might want to be smarter and just avoid this type of inlining.  */
2017       || DECL_DISREGARD_INLINE_LIMITS (node->symbol.decl))
2018     ;
2019   else if (lookup_attribute ("flatten",
2020 			     DECL_ATTRIBUTES (node->symbol.decl)) != NULL)
2021     {
2022       /* When the function is marked to be flattened, recursively inline
2023 	 all calls in it.  */
2024       if (dump_file)
2025 	fprintf (dump_file,
2026 		 "Flattening %s\n", cgraph_node_name (node));
2027       flatten_function (node, true);
2028       inlined = true;
2029     }
2030   else
2031     {
2032       /* We iterate incremental inlining to get trivial cases of indirect
2033 	 inlining.  */
2034       while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2035 	     && early_inline_small_functions (node))
2036 	{
2037 	  timevar_push (TV_INTEGRATION);
2038 	  todo |= optimize_inline_calls (current_function_decl);
2039 
2040 	  /* Technically we ought to recompute inline parameters so the new
2041  	     iteration of early inliner works as expected.  We however have
2042 	     values approximately right and thus we only need to update edge
2043 	     info that might be cleared out for newly discovered edges.  */
2044 	  for (edge = node->callees; edge; edge = edge->next_callee)
2045 	    {
2046 	      struct inline_edge_summary *es = inline_edge_summary (edge);
2047 	      es->call_stmt_size
2048 		= estimate_num_insns (edge->call_stmt, &eni_size_weights);
2049 	      es->call_stmt_time
2050 		= estimate_num_insns (edge->call_stmt, &eni_time_weights);
2051 	      if (edge->callee->symbol.decl
2052 		  && !gimple_check_call_matching_types (edge->call_stmt,
2053 							edge->callee->symbol.decl))
2054 		edge->call_stmt_cannot_inline_p = true;
2055 	    }
2056 	  timevar_pop (TV_INTEGRATION);
2057 	  iterations++;
2058 	  inlined = false;
2059 	}
2060       if (dump_file)
2061 	fprintf (dump_file, "Iterations: %i\n", iterations);
2062     }
2063 
2064   if (inlined)
2065     {
2066       timevar_push (TV_INTEGRATION);
2067       todo |= optimize_inline_calls (current_function_decl);
2068       timevar_pop (TV_INTEGRATION);
2069     }
2070 
2071   cfun->always_inline_functions_inlined = true;
2072 
2073   return todo;
2074 }
2075 
2076 struct gimple_opt_pass pass_early_inline =
2077 {
2078  {
2079   GIMPLE_PASS,
2080   "einline",	 			/* name */
2081   OPTGROUP_INLINE,                      /* optinfo_flags */
2082   NULL,					/* gate */
2083   early_inliner,			/* execute */
2084   NULL,					/* sub */
2085   NULL,					/* next */
2086   0,					/* static_pass_number */
2087   TV_EARLY_INLINING,			/* tv_id */
2088   PROP_ssa,                             /* properties_required */
2089   0,					/* properties_provided */
2090   0,					/* properties_destroyed */
2091   0,					/* todo_flags_start */
2092   0                 			/* todo_flags_finish */
2093  }
2094 };
2095 
2096 
2097 /* When to run IPA inlining.  Inlining of always-inline functions
2098    happens during early inlining.
2099 
2100    Enable inlining unconditoinally at -flto.  We need size estimates to
2101    drive partitioning.  */
2102 
2103 static bool
gate_ipa_inline(void)2104 gate_ipa_inline (void)
2105 {
2106   return optimize || flag_lto || flag_wpa;
2107 }
2108 
2109 struct ipa_opt_pass_d pass_ipa_inline =
2110 {
2111  {
2112   IPA_PASS,
2113   "inline",				/* name */
2114   OPTGROUP_INLINE,                      /* optinfo_flags */
2115   gate_ipa_inline,			/* gate */
2116   ipa_inline,				/* execute */
2117   NULL,					/* sub */
2118   NULL,					/* next */
2119   0,					/* static_pass_number */
2120   TV_IPA_INLINING,      		/* tv_id */
2121   0,	                                /* properties_required */
2122   0,					/* properties_provided */
2123   0,					/* properties_destroyed */
2124   TODO_remove_functions,		/* todo_flags_finish */
2125   TODO_dump_symtab
2126   | TODO_remove_functions | TODO_ggc_collect	/* todo_flags_finish */
2127  },
2128  inline_generate_summary,		/* generate_summary */
2129  inline_write_summary,			/* write_summary */
2130  inline_read_summary,			/* read_summary */
2131  NULL,					/* write_optimization_summary */
2132  NULL,					/* read_optimization_summary */
2133  NULL,					/* stmt_fixup */
2134  0,					/* TODOs */
2135  inline_transform,			/* function_transform */
2136  NULL,					/* variable_transform */
2137 };
2138