1 /* Inlining decision heuristics.
2 Copyright (C) 2003-2018 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 /* Inlining decision heuristics
22
23 The implementation of inliner is organized as follows:
24
25 inlining heuristics limits
26
27 can_inline_edge_p allow to check that particular inlining is allowed
28 by the limits specified by user (allowed function growth, growth and so
29 on).
30
31 Functions are inlined when it is obvious the result is profitable (such
32 as functions called once or when inlining reduce code size).
33 In addition to that we perform inlining of small functions and recursive
34 inlining.
35
36 inlining heuristics
37
38 The inliner itself is split into two passes:
39
40 pass_early_inlining
41
42 Simple local inlining pass inlining callees into current function.
43 This pass makes no use of whole unit analysis and thus it can do only
44 very simple decisions based on local properties.
45
46 The strength of the pass is that it is run in topological order
47 (reverse postorder) on the callgraph. Functions are converted into SSA
48 form just before this pass and optimized subsequently. As a result, the
49 callees of the function seen by the early inliner was already optimized
50 and results of early inlining adds a lot of optimization opportunities
51 for the local optimization.
52
53 The pass handle the obvious inlining decisions within the compilation
54 unit - inlining auto inline functions, inlining for size and
55 flattening.
56
57 main strength of the pass is the ability to eliminate abstraction
58 penalty in C++ code (via combination of inlining and early
59 optimization) and thus improve quality of analysis done by real IPA
60 optimizers.
61
62 Because of lack of whole unit knowledge, the pass can not really make
63 good code size/performance tradeoffs. It however does very simple
64 speculative inlining allowing code size to grow by
65 EARLY_INLINING_INSNS when callee is leaf function. In this case the
66 optimizations performed later are very likely to eliminate the cost.
67
68 pass_ipa_inline
69
70 This is the real inliner able to handle inlining with whole program
71 knowledge. It performs following steps:
72
73 1) inlining of small functions. This is implemented by greedy
74 algorithm ordering all inlinable cgraph edges by their badness and
75 inlining them in this order as long as inline limits allows doing so.
76
77 This heuristics is not very good on inlining recursive calls. Recursive
78 calls can be inlined with results similar to loop unrolling. To do so,
79 special purpose recursive inliner is executed on function when
80 recursive edge is met as viable candidate.
81
82 2) Unreachable functions are removed from callgraph. Inlining leads
83 to devirtualization and other modification of callgraph so functions
84 may become unreachable during the process. Also functions declared as
85 extern inline or virtual functions are removed, since after inlining
86 we no longer need the offline bodies.
87
88 3) Functions called once and not exported from the unit are inlined.
89 This should almost always lead to reduction of code size by eliminating
90 the need for offline copy of the function. */
91
92 #include "config.h"
93 #include "system.h"
94 #include "coretypes.h"
95 #include "backend.h"
96 #include "target.h"
97 #include "rtl.h"
98 #include "tree.h"
99 #include "gimple.h"
100 #include "alloc-pool.h"
101 #include "tree-pass.h"
102 #include "gimple-ssa.h"
103 #include "cgraph.h"
104 #include "lto-streamer.h"
105 #include "trans-mem.h"
106 #include "calls.h"
107 #include "tree-inline.h"
108 #include "params.h"
109 #include "profile.h"
110 #include "symbol-summary.h"
111 #include "tree-vrp.h"
112 #include "ipa-prop.h"
113 #include "ipa-fnsummary.h"
114 #include "ipa-inline.h"
115 #include "ipa-utils.h"
116 #include "sreal.h"
117 #include "auto-profile.h"
118 #include "builtins.h"
119 #include "fibonacci_heap.h"
120 #include "stringpool.h"
121 #include "attribs.h"
122 #include "asan.h"
123
124 typedef fibonacci_heap <sreal, cgraph_edge> edge_heap_t;
125 typedef fibonacci_node <sreal, cgraph_edge> edge_heap_node_t;
126
127 /* Statistics we collect about inlining algorithm. */
128 static int overall_size;
129 static profile_count max_count;
130 static profile_count spec_rem;
131
132 /* Return false when inlining edge E would lead to violating
133 limits on function unit growth or stack usage growth.
134
135 The relative function body growth limit is present generally
136 to avoid problems with non-linear behavior of the compiler.
137 To allow inlining huge functions into tiny wrapper, the limit
138 is always based on the bigger of the two functions considered.
139
140 For stack growth limits we always base the growth in stack usage
141 of the callers. We want to prevent applications from segfaulting
142 on stack overflow when functions with huge stack frames gets
143 inlined. */
144
145 static bool
caller_growth_limits(struct cgraph_edge * e)146 caller_growth_limits (struct cgraph_edge *e)
147 {
148 struct cgraph_node *to = e->caller;
149 struct cgraph_node *what = e->callee->ultimate_alias_target ();
150 int newsize;
151 int limit = 0;
152 HOST_WIDE_INT stack_size_limit = 0, inlined_stack;
153 ipa_fn_summary *info, *what_info, *outer_info = ipa_fn_summaries->get (to);
154
155 /* Look for function e->caller is inlined to. While doing
156 so work out the largest function body on the way. As
157 described above, we want to base our function growth
158 limits based on that. Not on the self size of the
159 outer function, not on the self size of inline code
160 we immediately inline to. This is the most relaxed
161 interpretation of the rule "do not grow large functions
162 too much in order to prevent compiler from exploding". */
163 while (true)
164 {
165 info = ipa_fn_summaries->get (to);
166 if (limit < info->self_size)
167 limit = info->self_size;
168 if (stack_size_limit < info->estimated_self_stack_size)
169 stack_size_limit = info->estimated_self_stack_size;
170 if (to->global.inlined_to)
171 to = to->callers->caller;
172 else
173 break;
174 }
175
176 what_info = ipa_fn_summaries->get (what);
177
178 if (limit < what_info->self_size)
179 limit = what_info->self_size;
180
181 limit += limit * PARAM_VALUE (PARAM_LARGE_FUNCTION_GROWTH) / 100;
182
183 /* Check the size after inlining against the function limits. But allow
184 the function to shrink if it went over the limits by forced inlining. */
185 newsize = estimate_size_after_inlining (to, e);
186 if (newsize >= info->size
187 && newsize > PARAM_VALUE (PARAM_LARGE_FUNCTION_INSNS)
188 && newsize > limit)
189 {
190 e->inline_failed = CIF_LARGE_FUNCTION_GROWTH_LIMIT;
191 return false;
192 }
193
194 if (!what_info->estimated_stack_size)
195 return true;
196
197 /* FIXME: Stack size limit often prevents inlining in Fortran programs
198 due to large i/o datastructures used by the Fortran front-end.
199 We ought to ignore this limit when we know that the edge is executed
200 on every invocation of the caller (i.e. its call statement dominates
201 exit block). We do not track this information, yet. */
202 stack_size_limit += ((gcov_type)stack_size_limit
203 * PARAM_VALUE (PARAM_STACK_FRAME_GROWTH) / 100);
204
205 inlined_stack = (outer_info->stack_frame_offset
206 + outer_info->estimated_self_stack_size
207 + what_info->estimated_stack_size);
208 /* Check new stack consumption with stack consumption at the place
209 stack is used. */
210 if (inlined_stack > stack_size_limit
211 /* If function already has large stack usage from sibling
212 inline call, we can inline, too.
213 This bit overoptimistically assume that we are good at stack
214 packing. */
215 && inlined_stack > info->estimated_stack_size
216 && inlined_stack > PARAM_VALUE (PARAM_LARGE_STACK_FRAME))
217 {
218 e->inline_failed = CIF_LARGE_STACK_FRAME_GROWTH_LIMIT;
219 return false;
220 }
221 return true;
222 }
223
224 /* Dump info about why inlining has failed. */
225
226 static void
report_inline_failed_reason(struct cgraph_edge * e)227 report_inline_failed_reason (struct cgraph_edge *e)
228 {
229 if (dump_file)
230 {
231 fprintf (dump_file, " not inlinable: %s -> %s, %s\n",
232 e->caller->dump_name (),
233 e->callee->dump_name (),
234 cgraph_inline_failed_string (e->inline_failed));
235 if ((e->inline_failed == CIF_TARGET_OPTION_MISMATCH
236 || e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
237 && e->caller->lto_file_data
238 && e->callee->ultimate_alias_target ()->lto_file_data)
239 {
240 fprintf (dump_file, " LTO objects: %s, %s\n",
241 e->caller->lto_file_data->file_name,
242 e->callee->ultimate_alias_target ()->lto_file_data->file_name);
243 }
244 if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH)
245 cl_target_option_print_diff
246 (dump_file, 2, target_opts_for_fn (e->caller->decl),
247 target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
248 if (e->inline_failed == CIF_OPTIMIZATION_MISMATCH)
249 cl_optimization_print_diff
250 (dump_file, 2, opts_for_fn (e->caller->decl),
251 opts_for_fn (e->callee->ultimate_alias_target ()->decl));
252 }
253 }
254
255 /* Decide whether sanitizer-related attributes allow inlining. */
256
257 static bool
sanitize_attrs_match_for_inline_p(const_tree caller,const_tree callee)258 sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
259 {
260 if (!caller || !callee)
261 return true;
262
263 /* Allow inlining always_inline functions into no_sanitize_address
264 functions. */
265 if (!sanitize_flags_p (SANITIZE_ADDRESS, caller)
266 && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
267 return true;
268
269 return ((sanitize_flags_p (SANITIZE_ADDRESS, caller)
270 == sanitize_flags_p (SANITIZE_ADDRESS, callee))
271 && (sanitize_flags_p (SANITIZE_POINTER_COMPARE, caller)
272 == sanitize_flags_p (SANITIZE_POINTER_COMPARE, callee))
273 && (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, caller)
274 == sanitize_flags_p (SANITIZE_POINTER_SUBTRACT, callee)));
275 }
276
277 /* Used for flags where it is safe to inline when caller's value is
278 grater than callee's. */
279 #define check_maybe_up(flag) \
280 (opts_for_fn (caller->decl)->x_##flag \
281 != opts_for_fn (callee->decl)->x_##flag \
282 && (!always_inline \
283 || opts_for_fn (caller->decl)->x_##flag \
284 < opts_for_fn (callee->decl)->x_##flag))
285 /* Used for flags where it is safe to inline when caller's value is
286 smaller than callee's. */
287 #define check_maybe_down(flag) \
288 (opts_for_fn (caller->decl)->x_##flag \
289 != opts_for_fn (callee->decl)->x_##flag \
290 && (!always_inline \
291 || opts_for_fn (caller->decl)->x_##flag \
292 > opts_for_fn (callee->decl)->x_##flag))
293 /* Used for flags where exact match is needed for correctness. */
294 #define check_match(flag) \
295 (opts_for_fn (caller->decl)->x_##flag \
296 != opts_for_fn (callee->decl)->x_##flag)
297
298 /* Decide if we can inline the edge and possibly update
299 inline_failed reason.
300 We check whether inlining is possible at all and whether
301 caller growth limits allow doing so.
302
303 if REPORT is true, output reason to the dump file. */
304
305 static bool
306 can_inline_edge_p (struct cgraph_edge *e, bool report,
307 bool early = false)
308 {
309 gcc_checking_assert (e->inline_failed);
310
311 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
312 {
313 if (report)
314 report_inline_failed_reason (e);
315 return false;
316 }
317
318 bool inlinable = true;
319 enum availability avail;
320 cgraph_node *caller = e->caller->global.inlined_to
321 ? e->caller->global.inlined_to : e->caller;
322 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
323
324 if (!callee->definition)
325 {
326 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
327 inlinable = false;
328 }
329 if (!early && (!opt_for_fn (callee->decl, optimize)
330 || !opt_for_fn (caller->decl, optimize)))
331 {
332 e->inline_failed = CIF_FUNCTION_NOT_OPTIMIZED;
333 inlinable = false;
334 }
335 else if (callee->calls_comdat_local)
336 {
337 e->inline_failed = CIF_USES_COMDAT_LOCAL;
338 inlinable = false;
339 }
340 else if (avail <= AVAIL_INTERPOSABLE)
341 {
342 e->inline_failed = CIF_OVERWRITABLE;
343 inlinable = false;
344 }
345 /* All edges with call_stmt_cannot_inline_p should have inline_failed
346 initialized to one of FINAL_ERROR reasons. */
347 else if (e->call_stmt_cannot_inline_p)
348 gcc_unreachable ();
349 /* Don't inline if the functions have different EH personalities. */
350 else if (DECL_FUNCTION_PERSONALITY (caller->decl)
351 && DECL_FUNCTION_PERSONALITY (callee->decl)
352 && (DECL_FUNCTION_PERSONALITY (caller->decl)
353 != DECL_FUNCTION_PERSONALITY (callee->decl)))
354 {
355 e->inline_failed = CIF_EH_PERSONALITY;
356 inlinable = false;
357 }
358 /* TM pure functions should not be inlined into non-TM_pure
359 functions. */
360 else if (is_tm_pure (callee->decl) && !is_tm_pure (caller->decl))
361 {
362 e->inline_failed = CIF_UNSPECIFIED;
363 inlinable = false;
364 }
365 /* Check compatibility of target optimization options. */
366 else if (!targetm.target_option.can_inline_p (caller->decl,
367 callee->decl))
368 {
369 e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
370 inlinable = false;
371 }
372 else if (!ipa_fn_summaries->get (callee)->inlinable)
373 {
374 e->inline_failed = CIF_FUNCTION_NOT_INLINABLE;
375 inlinable = false;
376 }
377 /* Don't inline a function with mismatched sanitization attributes. */
378 else if (!sanitize_attrs_match_for_inline_p (caller->decl, callee->decl))
379 {
380 e->inline_failed = CIF_ATTRIBUTE_MISMATCH;
381 inlinable = false;
382 }
383 if (!inlinable && report)
384 report_inline_failed_reason (e);
385 return inlinable;
386 }
387
388 /* Decide if we can inline the edge and possibly update
389 inline_failed reason.
390 We check whether inlining is possible at all and whether
391 caller growth limits allow doing so.
392
393 if REPORT is true, output reason to the dump file.
394
395 if DISREGARD_LIMITS is true, ignore size limits. */
396
397 static bool
398 can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
399 bool disregard_limits = false, bool early = false)
400 {
401 gcc_checking_assert (e->inline_failed);
402
403 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
404 {
405 if (report)
406 report_inline_failed_reason (e);
407 return false;
408 }
409
410 bool inlinable = true;
411 enum availability avail;
412 cgraph_node *caller = e->caller->global.inlined_to
413 ? e->caller->global.inlined_to : e->caller;
414 cgraph_node *callee = e->callee->ultimate_alias_target (&avail, caller);
415 tree caller_tree = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (caller->decl);
416 tree callee_tree
417 = callee ? DECL_FUNCTION_SPECIFIC_OPTIMIZATION (callee->decl) : NULL;
418 /* Check if caller growth allows the inlining. */
419 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl)
420 && !disregard_limits
421 && !lookup_attribute ("flatten",
422 DECL_ATTRIBUTES (caller->decl))
423 && !caller_growth_limits (e))
424 inlinable = false;
425 /* Don't inline a function with a higher optimization level than the
426 caller. FIXME: this is really just tip of iceberg of handling
427 optimization attribute. */
428 else if (caller_tree != callee_tree)
429 {
430 bool always_inline =
431 (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
432 && lookup_attribute ("always_inline",
433 DECL_ATTRIBUTES (callee->decl)));
434 ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
435 ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
436
437 /* Until GCC 4.9 we did not check the semantics alterning flags
438 bellow and inline across optimization boundry.
439 Enabling checks bellow breaks several packages by refusing
440 to inline library always_inline functions. See PR65873.
441 Disable the check for early inlining for now until better solution
442 is found. */
443 if (always_inline && early)
444 ;
445 /* There are some options that change IL semantics which means
446 we cannot inline in these cases for correctness reason.
447 Not even for always_inline declared functions. */
448 else if (check_match (flag_wrapv)
449 || check_match (flag_trapv)
450 || check_match (flag_pcc_struct_return)
451 /* When caller or callee does FP math, be sure FP codegen flags
452 compatible. */
453 || ((caller_info->fp_expressions && callee_info->fp_expressions)
454 && (check_maybe_up (flag_rounding_math)
455 || check_maybe_up (flag_trapping_math)
456 || check_maybe_down (flag_unsafe_math_optimizations)
457 || check_maybe_down (flag_finite_math_only)
458 || check_maybe_up (flag_signaling_nans)
459 || check_maybe_down (flag_cx_limited_range)
460 || check_maybe_up (flag_signed_zeros)
461 || check_maybe_down (flag_associative_math)
462 || check_maybe_down (flag_reciprocal_math)
463 || check_maybe_down (flag_fp_int_builtin_inexact)
464 /* Strictly speaking only when the callee contains function
465 calls that may end up setting errno. */
466 || check_maybe_up (flag_errno_math)))
467 /* We do not want to make code compiled with exceptions to be
468 brought into a non-EH function unless we know that the callee
469 does not throw.
470 This is tracked by DECL_FUNCTION_PERSONALITY. */
471 || (check_maybe_up (flag_non_call_exceptions)
472 && DECL_FUNCTION_PERSONALITY (callee->decl))
473 || (check_maybe_up (flag_exceptions)
474 && DECL_FUNCTION_PERSONALITY (callee->decl))
475 /* When devirtualization is diabled for callee, it is not safe
476 to inline it as we possibly mangled the type info.
477 Allow early inlining of always inlines. */
478 || (!early && check_maybe_down (flag_devirtualize)))
479 {
480 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
481 inlinable = false;
482 }
483 /* gcc.dg/pr43564.c. Apply user-forced inline even at -O0. */
484 else if (always_inline)
485 ;
486 /* When user added an attribute to the callee honor it. */
487 else if (lookup_attribute ("optimize", DECL_ATTRIBUTES (callee->decl))
488 && opts_for_fn (caller->decl) != opts_for_fn (callee->decl))
489 {
490 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
491 inlinable = false;
492 }
493 /* If explicit optimize attribute are not used, the mismatch is caused
494 by different command line options used to build different units.
495 Do not care about COMDAT functions - those are intended to be
496 optimized with the optimization flags of module they are used in.
497 Also do not care about mixing up size/speed optimization when
498 DECL_DISREGARD_INLINE_LIMITS is set. */
499 else if ((callee->merged_comdat
500 && !lookup_attribute ("optimize",
501 DECL_ATTRIBUTES (caller->decl)))
502 || DECL_DISREGARD_INLINE_LIMITS (callee->decl))
503 ;
504 /* If mismatch is caused by merging two LTO units with different
505 optimizationflags we want to be bit nicer. However never inline
506 if one of functions is not optimized at all. */
507 else if (!opt_for_fn (callee->decl, optimize)
508 || !opt_for_fn (caller->decl, optimize))
509 {
510 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
511 inlinable = false;
512 }
513 /* If callee is optimized for size and caller is not, allow inlining if
514 code shrinks or we are in MAX_INLINE_INSNS_SINGLE limit and callee
515 is inline (and thus likely an unified comdat). This will allow caller
516 to run faster. */
517 else if (opt_for_fn (callee->decl, optimize_size)
518 > opt_for_fn (caller->decl, optimize_size))
519 {
520 int growth = estimate_edge_growth (e);
521 if (growth > 0
522 && (!DECL_DECLARED_INLINE_P (callee->decl)
523 && growth >= MAX (MAX_INLINE_INSNS_SINGLE,
524 MAX_INLINE_INSNS_AUTO)))
525 {
526 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
527 inlinable = false;
528 }
529 }
530 /* If callee is more aggressively optimized for performance than caller,
531 we generally want to inline only cheap (runtime wise) functions. */
532 else if (opt_for_fn (callee->decl, optimize_size)
533 < opt_for_fn (caller->decl, optimize_size)
534 || (opt_for_fn (callee->decl, optimize)
535 > opt_for_fn (caller->decl, optimize)))
536 {
537 if (estimate_edge_time (e)
538 >= 20 + ipa_call_summaries->get (e)->call_stmt_time)
539 {
540 e->inline_failed = CIF_OPTIMIZATION_MISMATCH;
541 inlinable = false;
542 }
543 }
544
545 }
546
547 if (!inlinable && report)
548 report_inline_failed_reason (e);
549 return inlinable;
550 }
551
552
553 /* Return true if the edge E is inlinable during early inlining. */
554
555 static bool
can_early_inline_edge_p(struct cgraph_edge * e)556 can_early_inline_edge_p (struct cgraph_edge *e)
557 {
558 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
559 /* Early inliner might get called at WPA stage when IPA pass adds new
560 function. In this case we can not really do any of early inlining
561 because function bodies are missing. */
562 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
563 return false;
564 if (!gimple_has_body_p (callee->decl))
565 {
566 e->inline_failed = CIF_BODY_NOT_AVAILABLE;
567 return false;
568 }
569 /* In early inliner some of callees may not be in SSA form yet
570 (i.e. the callgraph is cyclic and we did not process
571 the callee by early inliner, yet). We don't have CIF code for this
572 case; later we will re-do the decision in the real inliner. */
573 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (e->caller->decl))
574 || !gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
575 {
576 if (dump_file)
577 fprintf (dump_file, " edge not inlinable: not in SSA form\n");
578 return false;
579 }
580 if (!can_inline_edge_p (e, true, true)
581 || !can_inline_edge_by_limits_p (e, true, false, true))
582 return false;
583 return true;
584 }
585
586
587 /* Return number of calls in N. Ignore cheap builtins. */
588
589 static int
num_calls(struct cgraph_node * n)590 num_calls (struct cgraph_node *n)
591 {
592 struct cgraph_edge *e;
593 int num = 0;
594
595 for (e = n->callees; e; e = e->next_callee)
596 if (!is_inexpensive_builtin (e->callee->decl))
597 num++;
598 return num;
599 }
600
601
602 /* Return true if we are interested in inlining small function. */
603
604 static bool
want_early_inline_function_p(struct cgraph_edge * e)605 want_early_inline_function_p (struct cgraph_edge *e)
606 {
607 bool want_inline = true;
608 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
609
610 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
611 ;
612 /* For AutoFDO, we need to make sure that before profile summary, all
613 hot paths' IR look exactly the same as profiled binary. As a result,
614 in einliner, we will disregard size limit and inline those callsites
615 that are:
616 * inlined in the profiled binary, and
617 * the cloned callee has enough samples to be considered "hot". */
618 else if (flag_auto_profile && afdo_callsite_hot_enough_for_early_inline (e))
619 ;
620 else if (!DECL_DECLARED_INLINE_P (callee->decl)
621 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
622 {
623 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
624 report_inline_failed_reason (e);
625 want_inline = false;
626 }
627 else
628 {
629 int growth = estimate_edge_growth (e);
630 int n;
631
632 if (growth <= 0)
633 ;
634 else if (!e->maybe_hot_p ()
635 && growth > 0)
636 {
637 if (dump_file)
638 fprintf (dump_file, " will not early inline: %s->%s, "
639 "call is cold and code would grow by %i\n",
640 e->caller->dump_name (),
641 callee->dump_name (),
642 growth);
643 want_inline = false;
644 }
645 else if (growth > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
646 {
647 if (dump_file)
648 fprintf (dump_file, " will not early inline: %s->%s, "
649 "growth %i exceeds --param early-inlining-insns\n",
650 e->caller->dump_name (),
651 callee->dump_name (),
652 growth);
653 want_inline = false;
654 }
655 else if ((n = num_calls (callee)) != 0
656 && growth * (n + 1) > PARAM_VALUE (PARAM_EARLY_INLINING_INSNS))
657 {
658 if (dump_file)
659 fprintf (dump_file, " will not early inline: %s->%s, "
660 "growth %i exceeds --param early-inlining-insns "
661 "divided by number of calls\n",
662 e->caller->dump_name (),
663 callee->dump_name (),
664 growth);
665 want_inline = false;
666 }
667 }
668 return want_inline;
669 }
670
671 /* Compute time of the edge->caller + edge->callee execution when inlining
672 does not happen. */
673
674 inline sreal
compute_uninlined_call_time(struct cgraph_edge * edge,sreal uninlined_call_time)675 compute_uninlined_call_time (struct cgraph_edge *edge,
676 sreal uninlined_call_time)
677 {
678 cgraph_node *caller = (edge->caller->global.inlined_to
679 ? edge->caller->global.inlined_to
680 : edge->caller);
681
682 sreal freq = edge->sreal_frequency ();
683 if (freq > 0)
684 uninlined_call_time *= freq;
685 else
686 uninlined_call_time = uninlined_call_time >> 11;
687
688 sreal caller_time = ipa_fn_summaries->get (caller)->time;
689 return uninlined_call_time + caller_time;
690 }
691
692 /* Same as compute_uinlined_call_time but compute time when inlining
693 does happen. */
694
695 inline sreal
compute_inlined_call_time(struct cgraph_edge * edge,sreal time)696 compute_inlined_call_time (struct cgraph_edge *edge,
697 sreal time)
698 {
699 cgraph_node *caller = (edge->caller->global.inlined_to
700 ? edge->caller->global.inlined_to
701 : edge->caller);
702 sreal caller_time = ipa_fn_summaries->get (caller)->time;
703
704 sreal freq = edge->sreal_frequency ();
705 if (freq > 0)
706 time *= freq;
707 else
708 time = time >> 11;
709
710 /* This calculation should match one in ipa-inline-analysis.c
711 (estimate_edge_size_and_time). */
712 time -= (sreal)ipa_call_summaries->get (edge)->call_stmt_time * freq;
713 time += caller_time;
714 if (time <= 0)
715 time = ((sreal) 1) >> 8;
716 gcc_checking_assert (time >= 0);
717 return time;
718 }
719
720 /* Return true if the speedup for inlining E is bigger than
721 PARAM_MAX_INLINE_MIN_SPEEDUP. */
722
723 static bool
big_speedup_p(struct cgraph_edge * e)724 big_speedup_p (struct cgraph_edge *e)
725 {
726 sreal unspec_time;
727 sreal spec_time = estimate_edge_time (e, &unspec_time);
728 sreal time = compute_uninlined_call_time (e, unspec_time);
729 sreal inlined_time = compute_inlined_call_time (e, spec_time);
730
731 if ((time - inlined_time) * 100
732 > (sreal) (time * PARAM_VALUE (PARAM_INLINE_MIN_SPEEDUP)))
733 return true;
734 return false;
735 }
736
737 /* Return true if we are interested in inlining small function.
738 When REPORT is true, report reason to dump file. */
739
740 static bool
want_inline_small_function_p(struct cgraph_edge * e,bool report)741 want_inline_small_function_p (struct cgraph_edge *e, bool report)
742 {
743 bool want_inline = true;
744 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
745
746 /* Allow this function to be called before can_inline_edge_p,
747 since it's usually cheaper. */
748 if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
749 want_inline = false;
750 else if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
751 ;
752 else if (!DECL_DECLARED_INLINE_P (callee->decl)
753 && !opt_for_fn (e->caller->decl, flag_inline_small_functions))
754 {
755 e->inline_failed = CIF_FUNCTION_NOT_INLINE_CANDIDATE;
756 want_inline = false;
757 }
758 /* Do fast and conservative check if the function can be good
759 inline candidate. At the moment we allow inline hints to
760 promote non-inline functions to inline and we increase
761 MAX_INLINE_INSNS_SINGLE 16-fold for inline functions. */
762 else if ((!DECL_DECLARED_INLINE_P (callee->decl)
763 && (!e->count.ipa ().initialized_p () || !e->maybe_hot_p ()))
764 && ipa_fn_summaries->get (callee)->min_size
765 - ipa_call_summaries->get (e)->call_stmt_size
766 > MAX (MAX_INLINE_INSNS_SINGLE, MAX_INLINE_INSNS_AUTO))
767 {
768 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
769 want_inline = false;
770 }
771 else if ((DECL_DECLARED_INLINE_P (callee->decl)
772 || e->count.ipa ().nonzero_p ())
773 && ipa_fn_summaries->get (callee)->min_size
774 - ipa_call_summaries->get (e)->call_stmt_size
775 > 16 * MAX_INLINE_INSNS_SINGLE)
776 {
777 e->inline_failed = (DECL_DECLARED_INLINE_P (callee->decl)
778 ? CIF_MAX_INLINE_INSNS_SINGLE_LIMIT
779 : CIF_MAX_INLINE_INSNS_AUTO_LIMIT);
780 want_inline = false;
781 }
782 else
783 {
784 int growth = estimate_edge_growth (e);
785 ipa_hints hints = estimate_edge_hints (e);
786 bool big_speedup = big_speedup_p (e);
787
788 if (growth <= 0)
789 ;
790 /* Apply MAX_INLINE_INSNS_SINGLE limit. Do not do so when
791 hints suggests that inlining given function is very profitable. */
792 else if (DECL_DECLARED_INLINE_P (callee->decl)
793 && growth >= MAX_INLINE_INSNS_SINGLE
794 && ((!big_speedup
795 && !(hints & (INLINE_HINT_indirect_call
796 | INLINE_HINT_known_hot
797 | INLINE_HINT_loop_iterations
798 | INLINE_HINT_array_index
799 | INLINE_HINT_loop_stride)))
800 || growth >= MAX_INLINE_INSNS_SINGLE * 16))
801 {
802 e->inline_failed = CIF_MAX_INLINE_INSNS_SINGLE_LIMIT;
803 want_inline = false;
804 }
805 else if (!DECL_DECLARED_INLINE_P (callee->decl)
806 && !opt_for_fn (e->caller->decl, flag_inline_functions))
807 {
808 /* growth_likely_positive is expensive, always test it last. */
809 if (growth >= MAX_INLINE_INSNS_SINGLE
810 || growth_likely_positive (callee, growth))
811 {
812 e->inline_failed = CIF_NOT_DECLARED_INLINED;
813 want_inline = false;
814 }
815 }
816 /* Apply MAX_INLINE_INSNS_AUTO limit for functions not declared inline
817 Upgrade it to MAX_INLINE_INSNS_SINGLE when hints suggests that
818 inlining given function is very profitable. */
819 else if (!DECL_DECLARED_INLINE_P (callee->decl)
820 && !big_speedup
821 && !(hints & INLINE_HINT_known_hot)
822 && growth >= ((hints & (INLINE_HINT_indirect_call
823 | INLINE_HINT_loop_iterations
824 | INLINE_HINT_array_index
825 | INLINE_HINT_loop_stride))
826 ? MAX (MAX_INLINE_INSNS_AUTO,
827 MAX_INLINE_INSNS_SINGLE)
828 : MAX_INLINE_INSNS_AUTO))
829 {
830 /* growth_likely_positive is expensive, always test it last. */
831 if (growth >= MAX_INLINE_INSNS_SINGLE
832 || growth_likely_positive (callee, growth))
833 {
834 e->inline_failed = CIF_MAX_INLINE_INSNS_AUTO_LIMIT;
835 want_inline = false;
836 }
837 }
838 /* If call is cold, do not inline when function body would grow. */
839 else if (!e->maybe_hot_p ()
840 && (growth >= MAX_INLINE_INSNS_SINGLE
841 || growth_likely_positive (callee, growth)))
842 {
843 e->inline_failed = CIF_UNLIKELY_CALL;
844 want_inline = false;
845 }
846 }
847 if (!want_inline && report)
848 report_inline_failed_reason (e);
849 return want_inline;
850 }
851
852 /* EDGE is self recursive edge.
853 We hand two cases - when function A is inlining into itself
854 or when function A is being inlined into another inliner copy of function
855 A within function B.
856
857 In first case OUTER_NODE points to the toplevel copy of A, while
858 in the second case OUTER_NODE points to the outermost copy of A in B.
859
860 In both cases we want to be extra selective since
861 inlining the call will just introduce new recursive calls to appear. */
862
863 static bool
want_inline_self_recursive_call_p(struct cgraph_edge * edge,struct cgraph_node * outer_node,bool peeling,int depth)864 want_inline_self_recursive_call_p (struct cgraph_edge *edge,
865 struct cgraph_node *outer_node,
866 bool peeling,
867 int depth)
868 {
869 char const *reason = NULL;
870 bool want_inline = true;
871 sreal caller_freq = 1;
872 int max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH_AUTO);
873
874 if (DECL_DECLARED_INLINE_P (edge->caller->decl))
875 max_depth = PARAM_VALUE (PARAM_MAX_INLINE_RECURSIVE_DEPTH);
876
877 if (!edge->maybe_hot_p ())
878 {
879 reason = "recursive call is cold";
880 want_inline = false;
881 }
882 else if (depth > max_depth)
883 {
884 reason = "--param max-inline-recursive-depth exceeded.";
885 want_inline = false;
886 }
887 else if (outer_node->global.inlined_to
888 && (caller_freq = outer_node->callers->sreal_frequency ()) == 0)
889 {
890 reason = "caller frequency is 0";
891 want_inline = false;
892 }
893
894 if (!want_inline)
895 ;
896 /* Inlining of self recursive function into copy of itself within other
897 function is transformation similar to loop peeling.
898
899 Peeling is profitable if we can inline enough copies to make probability
900 of actual call to the self recursive function very small. Be sure that
901 the probability of recursion is small.
902
903 We ensure that the frequency of recursing is at most 1 - (1/max_depth).
904 This way the expected number of recursion is at most max_depth. */
905 else if (peeling)
906 {
907 sreal max_prob = (sreal)1 - ((sreal)1 / (sreal)max_depth);
908 int i;
909 for (i = 1; i < depth; i++)
910 max_prob = max_prob * max_prob;
911 if (edge->sreal_frequency () >= max_prob * caller_freq)
912 {
913 reason = "frequency of recursive call is too large";
914 want_inline = false;
915 }
916 }
917 /* Recursive inlining, i.e. equivalent of unrolling, is profitable if
918 recursion depth is large. We reduce function call overhead and increase
919 chances that things fit in hardware return predictor.
920
921 Recursive inlining might however increase cost of stack frame setup
922 actually slowing down functions whose recursion tree is wide rather than
923 deep.
924
925 Deciding reliably on when to do recursive inlining without profile feedback
926 is tricky. For now we disable recursive inlining when probability of self
927 recursion is low.
928
929 Recursive inlining of self recursive call within loop also results in
930 large loop depths that generally optimize badly. We may want to throttle
931 down inlining in those cases. In particular this seems to happen in one
932 of libstdc++ rb tree methods. */
933 else
934 {
935 if (edge->sreal_frequency () * 100
936 <= caller_freq
937 * PARAM_VALUE (PARAM_MIN_INLINE_RECURSIVE_PROBABILITY))
938 {
939 reason = "frequency of recursive call is too small";
940 want_inline = false;
941 }
942 }
943 if (!want_inline && dump_file)
944 fprintf (dump_file, " not inlining recursively: %s\n", reason);
945 return want_inline;
946 }
947
948 /* Return true when NODE has uninlinable caller;
949 set HAS_HOT_CALL if it has hot call.
950 Worker for cgraph_for_node_and_aliases. */
951
952 static bool
check_callers(struct cgraph_node * node,void * has_hot_call)953 check_callers (struct cgraph_node *node, void *has_hot_call)
954 {
955 struct cgraph_edge *e;
956 for (e = node->callers; e; e = e->next_caller)
957 {
958 if (!opt_for_fn (e->caller->decl, flag_inline_functions_called_once)
959 || !opt_for_fn (e->caller->decl, optimize))
960 return true;
961 if (!can_inline_edge_p (e, true))
962 return true;
963 if (e->recursive_p ())
964 return true;
965 if (!can_inline_edge_by_limits_p (e, true))
966 return true;
967 if (!(*(bool *)has_hot_call) && e->maybe_hot_p ())
968 *(bool *)has_hot_call = true;
969 }
970 return false;
971 }
972
973 /* If NODE has a caller, return true. */
974
975 static bool
has_caller_p(struct cgraph_node * node,void * data ATTRIBUTE_UNUSED)976 has_caller_p (struct cgraph_node *node, void *data ATTRIBUTE_UNUSED)
977 {
978 if (node->callers)
979 return true;
980 return false;
981 }
982
983 /* Decide if inlining NODE would reduce unit size by eliminating
984 the offline copy of function.
985 When COLD is true the cold calls are considered, too. */
986
987 static bool
want_inline_function_to_all_callers_p(struct cgraph_node * node,bool cold)988 want_inline_function_to_all_callers_p (struct cgraph_node *node, bool cold)
989 {
990 bool has_hot_call = false;
991
992 /* Aliases gets inlined along with the function they alias. */
993 if (node->alias)
994 return false;
995 /* Already inlined? */
996 if (node->global.inlined_to)
997 return false;
998 /* Does it have callers? */
999 if (!node->call_for_symbol_and_aliases (has_caller_p, NULL, true))
1000 return false;
1001 /* Inlining into all callers would increase size? */
1002 if (estimate_growth (node) > 0)
1003 return false;
1004 /* All inlines must be possible. */
1005 if (node->call_for_symbol_and_aliases (check_callers, &has_hot_call,
1006 true))
1007 return false;
1008 if (!cold && !has_hot_call)
1009 return false;
1010 return true;
1011 }
1012
1013 /* A cost model driving the inlining heuristics in a way so the edges with
1014 smallest badness are inlined first. After each inlining is performed
1015 the costs of all caller edges of nodes affected are recomputed so the
1016 metrics may accurately depend on values such as number of inlinable callers
1017 of the function or function body size. */
1018
1019 static sreal
edge_badness(struct cgraph_edge * edge,bool dump)1020 edge_badness (struct cgraph_edge *edge, bool dump)
1021 {
1022 sreal badness;
1023 int growth;
1024 sreal edge_time, unspec_edge_time;
1025 struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
1026 struct ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
1027 ipa_hints hints;
1028 cgraph_node *caller = (edge->caller->global.inlined_to
1029 ? edge->caller->global.inlined_to
1030 : edge->caller);
1031
1032 growth = estimate_edge_growth (edge);
1033 edge_time = estimate_edge_time (edge, &unspec_edge_time);
1034 hints = estimate_edge_hints (edge);
1035 gcc_checking_assert (edge_time >= 0);
1036 /* Check that inlined time is better, but tolerate some roundoff issues.
1037 FIXME: When callee profile drops to 0 we account calls more. This
1038 should be fixed by never doing that. */
1039 gcc_checking_assert ((edge_time * 100
1040 - callee_info->time * 101).to_int () <= 0
1041 || callee->count.ipa ().initialized_p ());
1042 gcc_checking_assert (growth <= callee_info->size);
1043
1044 if (dump)
1045 {
1046 fprintf (dump_file, " Badness calculation for %s -> %s\n",
1047 edge->caller->dump_name (),
1048 edge->callee->dump_name ());
1049 fprintf (dump_file, " size growth %i, time %f unspec %f ",
1050 growth,
1051 edge_time.to_double (),
1052 unspec_edge_time.to_double ());
1053 ipa_dump_hints (dump_file, hints);
1054 if (big_speedup_p (edge))
1055 fprintf (dump_file, " big_speedup");
1056 fprintf (dump_file, "\n");
1057 }
1058
1059 /* Always prefer inlining saving code size. */
1060 if (growth <= 0)
1061 {
1062 badness = (sreal) (-SREAL_MIN_SIG + growth) << (SREAL_MAX_EXP / 256);
1063 if (dump)
1064 fprintf (dump_file, " %f: Growth %d <= 0\n", badness.to_double (),
1065 growth);
1066 }
1067 /* Inlining into EXTERNAL functions is not going to change anything unless
1068 they are themselves inlined. */
1069 else if (DECL_EXTERNAL (caller->decl))
1070 {
1071 if (dump)
1072 fprintf (dump_file, " max: function is external\n");
1073 return sreal::max ();
1074 }
1075 /* When profile is available. Compute badness as:
1076
1077 time_saved * caller_count
1078 goodness = -------------------------------------------------
1079 growth_of_caller * overall_growth * combined_size
1080
1081 badness = - goodness
1082
1083 Again use negative value to make calls with profile appear hotter
1084 then calls without.
1085 */
1086 else if (opt_for_fn (caller->decl, flag_guess_branch_prob)
1087 || caller->count.ipa ().nonzero_p ())
1088 {
1089 sreal numerator, denominator;
1090 int overall_growth;
1091 sreal inlined_time = compute_inlined_call_time (edge, edge_time);
1092
1093 numerator = (compute_uninlined_call_time (edge, unspec_edge_time)
1094 - inlined_time);
1095 if (numerator <= 0)
1096 numerator = ((sreal) 1 >> 8);
1097 if (caller->count.ipa ().nonzero_p ())
1098 numerator *= caller->count.ipa ().to_gcov_type ();
1099 else if (caller->count.ipa ().initialized_p ())
1100 numerator = numerator >> 11;
1101 denominator = growth;
1102
1103 overall_growth = callee_info->growth;
1104
1105 /* Look for inliner wrappers of the form:
1106
1107 inline_caller ()
1108 {
1109 do_fast_job...
1110 if (need_more_work)
1111 noninline_callee ();
1112 }
1113 Withhout panilizing this case, we usually inline noninline_callee
1114 into the inline_caller because overall_growth is small preventing
1115 further inlining of inline_caller.
1116
1117 Penalize only callgraph edges to functions with small overall
1118 growth ...
1119 */
1120 if (growth > overall_growth
1121 /* ... and having only one caller which is not inlined ... */
1122 && callee_info->single_caller
1123 && !edge->caller->global.inlined_to
1124 /* ... and edges executed only conditionally ... */
1125 && edge->sreal_frequency () < 1
1126 /* ... consider case where callee is not inline but caller is ... */
1127 && ((!DECL_DECLARED_INLINE_P (edge->callee->decl)
1128 && DECL_DECLARED_INLINE_P (caller->decl))
1129 /* ... or when early optimizers decided to split and edge
1130 frequency still indicates splitting is a win ... */
1131 || (callee->split_part && !caller->split_part
1132 && edge->sreal_frequency () * 100
1133 < PARAM_VALUE
1134 (PARAM_PARTIAL_INLINING_ENTRY_PROBABILITY)
1135 /* ... and do not overwrite user specified hints. */
1136 && (!DECL_DECLARED_INLINE_P (edge->callee->decl)
1137 || DECL_DECLARED_INLINE_P (caller->decl)))))
1138 {
1139 struct ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
1140 int caller_growth = caller_info->growth;
1141
1142 /* Only apply the penalty when caller looks like inline candidate,
1143 and it is not called once and. */
1144 if (!caller_info->single_caller && overall_growth < caller_growth
1145 && caller_info->inlinable
1146 && caller_info->size
1147 < (DECL_DECLARED_INLINE_P (caller->decl)
1148 ? MAX_INLINE_INSNS_SINGLE : MAX_INLINE_INSNS_AUTO))
1149 {
1150 if (dump)
1151 fprintf (dump_file,
1152 " Wrapper penalty. Increasing growth %i to %i\n",
1153 overall_growth, caller_growth);
1154 overall_growth = caller_growth;
1155 }
1156 }
1157 if (overall_growth > 0)
1158 {
1159 /* Strongly preffer functions with few callers that can be inlined
1160 fully. The square root here leads to smaller binaries at average.
1161 Watch however for extreme cases and return to linear function
1162 when growth is large. */
1163 if (overall_growth < 256)
1164 overall_growth *= overall_growth;
1165 else
1166 overall_growth += 256 * 256 - 256;
1167 denominator *= overall_growth;
1168 }
1169 denominator *= ipa_fn_summaries->get (caller)->self_size + growth;
1170
1171 badness = - numerator / denominator;
1172
1173 if (dump)
1174 {
1175 fprintf (dump_file,
1176 " %f: guessed profile. frequency %f, count %" PRId64
1177 " caller count %" PRId64
1178 " time w/o inlining %f, time with inlining %f"
1179 " overall growth %i (current) %i (original)"
1180 " %i (compensated)\n",
1181 badness.to_double (),
1182 edge->sreal_frequency ().to_double (),
1183 edge->count.ipa ().initialized_p () ? edge->count.ipa ().to_gcov_type () : -1,
1184 caller->count.ipa ().initialized_p () ? caller->count.ipa ().to_gcov_type () : -1,
1185 compute_uninlined_call_time (edge,
1186 unspec_edge_time).to_double (),
1187 inlined_time.to_double (),
1188 estimate_growth (callee),
1189 callee_info->growth, overall_growth);
1190 }
1191 }
1192 /* When function local profile is not available or it does not give
1193 useful information (ie frequency is zero), base the cost on
1194 loop nest and overall size growth, so we optimize for overall number
1195 of functions fully inlined in program. */
1196 else
1197 {
1198 int nest = MIN (ipa_call_summaries->get (edge)->loop_depth, 8);
1199 badness = growth;
1200
1201 /* Decrease badness if call is nested. */
1202 if (badness > 0)
1203 badness = badness >> nest;
1204 else
1205 badness = badness << nest;
1206 if (dump)
1207 fprintf (dump_file, " %f: no profile. nest %i\n",
1208 badness.to_double (), nest);
1209 }
1210 gcc_checking_assert (badness != 0);
1211
1212 if (edge->recursive_p ())
1213 badness = badness.shift (badness > 0 ? 4 : -4);
1214 if ((hints & (INLINE_HINT_indirect_call
1215 | INLINE_HINT_loop_iterations
1216 | INLINE_HINT_array_index
1217 | INLINE_HINT_loop_stride))
1218 || callee_info->growth <= 0)
1219 badness = badness.shift (badness > 0 ? -2 : 2);
1220 if (hints & (INLINE_HINT_same_scc))
1221 badness = badness.shift (badness > 0 ? 3 : -3);
1222 else if (hints & (INLINE_HINT_in_scc))
1223 badness = badness.shift (badness > 0 ? 2 : -2);
1224 else if (hints & (INLINE_HINT_cross_module))
1225 badness = badness.shift (badness > 0 ? 1 : -1);
1226 if (DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1227 badness = badness.shift (badness > 0 ? -4 : 4);
1228 else if ((hints & INLINE_HINT_declared_inline))
1229 badness = badness.shift (badness > 0 ? -3 : 3);
1230 if (dump)
1231 fprintf (dump_file, " Adjusted by hints %f\n", badness.to_double ());
1232 return badness;
1233 }
1234
1235 /* Recompute badness of EDGE and update its key in HEAP if needed. */
1236 static inline void
update_edge_key(edge_heap_t * heap,struct cgraph_edge * edge)1237 update_edge_key (edge_heap_t *heap, struct cgraph_edge *edge)
1238 {
1239 sreal badness = edge_badness (edge, false);
1240 if (edge->aux)
1241 {
1242 edge_heap_node_t *n = (edge_heap_node_t *) edge->aux;
1243 gcc_checking_assert (n->get_data () == edge);
1244
1245 /* fibonacci_heap::replace_key does busy updating of the
1246 heap that is unnecesarily expensive.
1247 We do lazy increases: after extracting minimum if the key
1248 turns out to be out of date, it is re-inserted into heap
1249 with correct value. */
1250 if (badness < n->get_key ())
1251 {
1252 if (dump_file && (dump_flags & TDF_DETAILS))
1253 {
1254 fprintf (dump_file,
1255 " decreasing badness %s -> %s, %f to %f\n",
1256 edge->caller->dump_name (),
1257 edge->callee->dump_name (),
1258 n->get_key ().to_double (),
1259 badness.to_double ());
1260 }
1261 heap->decrease_key (n, badness);
1262 }
1263 }
1264 else
1265 {
1266 if (dump_file && (dump_flags & TDF_DETAILS))
1267 {
1268 fprintf (dump_file,
1269 " enqueuing call %s -> %s, badness %f\n",
1270 edge->caller->dump_name (),
1271 edge->callee->dump_name (),
1272 badness.to_double ());
1273 }
1274 edge->aux = heap->insert (badness, edge);
1275 }
1276 }
1277
1278
1279 /* NODE was inlined.
1280 All caller edges needs to be resetted because
1281 size estimates change. Similarly callees needs reset
1282 because better context may be known. */
1283
1284 static void
reset_edge_caches(struct cgraph_node * node)1285 reset_edge_caches (struct cgraph_node *node)
1286 {
1287 struct cgraph_edge *edge;
1288 struct cgraph_edge *e = node->callees;
1289 struct cgraph_node *where = node;
1290 struct ipa_ref *ref;
1291
1292 if (where->global.inlined_to)
1293 where = where->global.inlined_to;
1294
1295 for (edge = where->callers; edge; edge = edge->next_caller)
1296 if (edge->inline_failed)
1297 reset_edge_growth_cache (edge);
1298
1299 FOR_EACH_ALIAS (where, ref)
1300 reset_edge_caches (dyn_cast <cgraph_node *> (ref->referring));
1301
1302 if (!e)
1303 return;
1304
1305 while (true)
1306 if (!e->inline_failed && e->callee->callees)
1307 e = e->callee->callees;
1308 else
1309 {
1310 if (e->inline_failed)
1311 reset_edge_growth_cache (e);
1312 if (e->next_callee)
1313 e = e->next_callee;
1314 else
1315 {
1316 do
1317 {
1318 if (e->caller == node)
1319 return;
1320 e = e->caller->callers;
1321 }
1322 while (!e->next_callee);
1323 e = e->next_callee;
1324 }
1325 }
1326 }
1327
1328 /* Recompute HEAP nodes for each of caller of NODE.
1329 UPDATED_NODES track nodes we already visited, to avoid redundant work.
1330 When CHECK_INLINABLITY_FOR is set, re-check for specified edge that
1331 it is inlinable. Otherwise check all edges. */
1332
1333 static void
update_caller_keys(edge_heap_t * heap,struct cgraph_node * node,bitmap updated_nodes,struct cgraph_edge * check_inlinablity_for)1334 update_caller_keys (edge_heap_t *heap, struct cgraph_node *node,
1335 bitmap updated_nodes,
1336 struct cgraph_edge *check_inlinablity_for)
1337 {
1338 struct cgraph_edge *edge;
1339 struct ipa_ref *ref;
1340
1341 if ((!node->alias && !ipa_fn_summaries->get (node)->inlinable)
1342 || node->global.inlined_to)
1343 return;
1344 if (!bitmap_set_bit (updated_nodes, node->uid))
1345 return;
1346
1347 FOR_EACH_ALIAS (node, ref)
1348 {
1349 struct cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
1350 update_caller_keys (heap, alias, updated_nodes, check_inlinablity_for);
1351 }
1352
1353 for (edge = node->callers; edge; edge = edge->next_caller)
1354 if (edge->inline_failed)
1355 {
1356 if (!check_inlinablity_for
1357 || check_inlinablity_for == edge)
1358 {
1359 if (can_inline_edge_p (edge, false)
1360 && want_inline_small_function_p (edge, false)
1361 && can_inline_edge_by_limits_p (edge, false))
1362 update_edge_key (heap, edge);
1363 else if (edge->aux)
1364 {
1365 report_inline_failed_reason (edge);
1366 heap->delete_node ((edge_heap_node_t *) edge->aux);
1367 edge->aux = NULL;
1368 }
1369 }
1370 else if (edge->aux)
1371 update_edge_key (heap, edge);
1372 }
1373 }
1374
1375 /* Recompute HEAP nodes for each uninlined call in NODE.
1376 This is used when we know that edge badnesses are going only to increase
1377 (we introduced new call site) and thus all we need is to insert newly
1378 created edges into heap. */
1379
1380 static void
update_callee_keys(edge_heap_t * heap,struct cgraph_node * node,bitmap updated_nodes)1381 update_callee_keys (edge_heap_t *heap, struct cgraph_node *node,
1382 bitmap updated_nodes)
1383 {
1384 struct cgraph_edge *e = node->callees;
1385
1386 if (!e)
1387 return;
1388 while (true)
1389 if (!e->inline_failed && e->callee->callees)
1390 e = e->callee->callees;
1391 else
1392 {
1393 enum availability avail;
1394 struct cgraph_node *callee;
1395 /* We do not reset callee growth cache here. Since we added a new call,
1396 growth chould have just increased and consequentely badness metric
1397 don't need updating. */
1398 if (e->inline_failed
1399 && (callee = e->callee->ultimate_alias_target (&avail, e->caller))
1400 && ipa_fn_summaries->get (callee)->inlinable
1401 && avail >= AVAIL_AVAILABLE
1402 && !bitmap_bit_p (updated_nodes, callee->uid))
1403 {
1404 if (can_inline_edge_p (e, false)
1405 && want_inline_small_function_p (e, false)
1406 && can_inline_edge_by_limits_p (e, false))
1407 update_edge_key (heap, e);
1408 else if (e->aux)
1409 {
1410 report_inline_failed_reason (e);
1411 heap->delete_node ((edge_heap_node_t *) e->aux);
1412 e->aux = NULL;
1413 }
1414 }
1415 if (e->next_callee)
1416 e = e->next_callee;
1417 else
1418 {
1419 do
1420 {
1421 if (e->caller == node)
1422 return;
1423 e = e->caller->callers;
1424 }
1425 while (!e->next_callee);
1426 e = e->next_callee;
1427 }
1428 }
1429 }
1430
1431 /* Enqueue all recursive calls from NODE into priority queue depending on
1432 how likely we want to recursively inline the call. */
1433
1434 static void
lookup_recursive_calls(struct cgraph_node * node,struct cgraph_node * where,edge_heap_t * heap)1435 lookup_recursive_calls (struct cgraph_node *node, struct cgraph_node *where,
1436 edge_heap_t *heap)
1437 {
1438 struct cgraph_edge *e;
1439 enum availability avail;
1440
1441 for (e = where->callees; e; e = e->next_callee)
1442 if (e->callee == node
1443 || (e->callee->ultimate_alias_target (&avail, e->caller) == node
1444 && avail > AVAIL_INTERPOSABLE))
1445 heap->insert (-e->sreal_frequency (), e);
1446 for (e = where->callees; e; e = e->next_callee)
1447 if (!e->inline_failed)
1448 lookup_recursive_calls (node, e->callee, heap);
1449 }
1450
1451 /* Decide on recursive inlining: in the case function has recursive calls,
1452 inline until body size reaches given argument. If any new indirect edges
1453 are discovered in the process, add them to *NEW_EDGES, unless NEW_EDGES
1454 is NULL. */
1455
1456 static bool
recursive_inlining(struct cgraph_edge * edge,vec<cgraph_edge * > * new_edges)1457 recursive_inlining (struct cgraph_edge *edge,
1458 vec<cgraph_edge *> *new_edges)
1459 {
1460 int limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE_AUTO);
1461 edge_heap_t heap (sreal::min ());
1462 struct cgraph_node *node;
1463 struct cgraph_edge *e;
1464 struct cgraph_node *master_clone = NULL, *next;
1465 int depth = 0;
1466 int n = 0;
1467
1468 node = edge->caller;
1469 if (node->global.inlined_to)
1470 node = node->global.inlined_to;
1471
1472 if (DECL_DECLARED_INLINE_P (node->decl))
1473 limit = PARAM_VALUE (PARAM_MAX_INLINE_INSNS_RECURSIVE);
1474
1475 /* Make sure that function is small enough to be considered for inlining. */
1476 if (estimate_size_after_inlining (node, edge) >= limit)
1477 return false;
1478 lookup_recursive_calls (node, node, &heap);
1479 if (heap.empty ())
1480 return false;
1481
1482 if (dump_file)
1483 fprintf (dump_file,
1484 " Performing recursive inlining on %s\n",
1485 node->name ());
1486
1487 /* Do the inlining and update list of recursive call during process. */
1488 while (!heap.empty ())
1489 {
1490 struct cgraph_edge *curr = heap.extract_min ();
1491 struct cgraph_node *cnode, *dest = curr->callee;
1492
1493 if (!can_inline_edge_p (curr, true)
1494 || can_inline_edge_by_limits_p (curr, true))
1495 continue;
1496
1497 /* MASTER_CLONE is produced in the case we already started modified
1498 the function. Be sure to redirect edge to the original body before
1499 estimating growths otherwise we will be seeing growths after inlining
1500 the already modified body. */
1501 if (master_clone)
1502 {
1503 curr->redirect_callee (master_clone);
1504 reset_edge_growth_cache (curr);
1505 }
1506
1507 if (estimate_size_after_inlining (node, curr) > limit)
1508 {
1509 curr->redirect_callee (dest);
1510 reset_edge_growth_cache (curr);
1511 break;
1512 }
1513
1514 depth = 1;
1515 for (cnode = curr->caller;
1516 cnode->global.inlined_to; cnode = cnode->callers->caller)
1517 if (node->decl
1518 == curr->callee->ultimate_alias_target ()->decl)
1519 depth++;
1520
1521 if (!want_inline_self_recursive_call_p (curr, node, false, depth))
1522 {
1523 curr->redirect_callee (dest);
1524 reset_edge_growth_cache (curr);
1525 continue;
1526 }
1527
1528 if (dump_file)
1529 {
1530 fprintf (dump_file,
1531 " Inlining call of depth %i", depth);
1532 if (node->count.nonzero_p ())
1533 {
1534 fprintf (dump_file, " called approx. %.2f times per call",
1535 (double)curr->count.to_gcov_type ()
1536 / node->count.to_gcov_type ());
1537 }
1538 fprintf (dump_file, "\n");
1539 }
1540 if (!master_clone)
1541 {
1542 /* We need original clone to copy around. */
1543 master_clone = node->create_clone (node->decl, node->count,
1544 false, vNULL, true, NULL, NULL);
1545 for (e = master_clone->callees; e; e = e->next_callee)
1546 if (!e->inline_failed)
1547 clone_inlined_nodes (e, true, false, NULL);
1548 curr->redirect_callee (master_clone);
1549 reset_edge_growth_cache (curr);
1550 }
1551
1552 inline_call (curr, false, new_edges, &overall_size, true);
1553 lookup_recursive_calls (node, curr->callee, &heap);
1554 n++;
1555 }
1556
1557 if (!heap.empty () && dump_file)
1558 fprintf (dump_file, " Recursive inlining growth limit met.\n");
1559
1560 if (!master_clone)
1561 return false;
1562
1563 if (dump_file)
1564 fprintf (dump_file,
1565 "\n Inlined %i times, "
1566 "body grown from size %i to %i, time %f to %f\n", n,
1567 ipa_fn_summaries->get (master_clone)->size,
1568 ipa_fn_summaries->get (node)->size,
1569 ipa_fn_summaries->get (master_clone)->time.to_double (),
1570 ipa_fn_summaries->get (node)->time.to_double ());
1571
1572 /* Remove master clone we used for inlining. We rely that clones inlined
1573 into master clone gets queued just before master clone so we don't
1574 need recursion. */
1575 for (node = symtab->first_function (); node != master_clone;
1576 node = next)
1577 {
1578 next = symtab->next_function (node);
1579 if (node->global.inlined_to == master_clone)
1580 node->remove ();
1581 }
1582 master_clone->remove ();
1583 return true;
1584 }
1585
1586
1587 /* Given whole compilation unit estimate of INSNS, compute how large we can
1588 allow the unit to grow. */
1589
1590 static int
compute_max_insns(int insns)1591 compute_max_insns (int insns)
1592 {
1593 int max_insns = insns;
1594 if (max_insns < PARAM_VALUE (PARAM_LARGE_UNIT_INSNS))
1595 max_insns = PARAM_VALUE (PARAM_LARGE_UNIT_INSNS);
1596
1597 return ((int64_t) max_insns
1598 * (100 + PARAM_VALUE (PARAM_INLINE_UNIT_GROWTH)) / 100);
1599 }
1600
1601
1602 /* Compute badness of all edges in NEW_EDGES and add them to the HEAP. */
1603
1604 static void
add_new_edges_to_heap(edge_heap_t * heap,vec<cgraph_edge * > new_edges)1605 add_new_edges_to_heap (edge_heap_t *heap, vec<cgraph_edge *> new_edges)
1606 {
1607 while (new_edges.length () > 0)
1608 {
1609 struct cgraph_edge *edge = new_edges.pop ();
1610
1611 gcc_assert (!edge->aux);
1612 if (edge->inline_failed
1613 && can_inline_edge_p (edge, true)
1614 && want_inline_small_function_p (edge, true)
1615 && can_inline_edge_by_limits_p (edge, true))
1616 edge->aux = heap->insert (edge_badness (edge, false), edge);
1617 }
1618 }
1619
1620 /* Remove EDGE from the fibheap. */
1621
1622 static void
heap_edge_removal_hook(struct cgraph_edge * e,void * data)1623 heap_edge_removal_hook (struct cgraph_edge *e, void *data)
1624 {
1625 if (e->aux)
1626 {
1627 ((edge_heap_t *)data)->delete_node ((edge_heap_node_t *)e->aux);
1628 e->aux = NULL;
1629 }
1630 }
1631
1632 /* Return true if speculation of edge E seems useful.
1633 If ANTICIPATE_INLINING is true, be conservative and hope that E
1634 may get inlined. */
1635
1636 bool
speculation_useful_p(struct cgraph_edge * e,bool anticipate_inlining)1637 speculation_useful_p (struct cgraph_edge *e, bool anticipate_inlining)
1638 {
1639 enum availability avail;
1640 struct cgraph_node *target = e->callee->ultimate_alias_target (&avail,
1641 e->caller);
1642 struct cgraph_edge *direct, *indirect;
1643 struct ipa_ref *ref;
1644
1645 gcc_assert (e->speculative && !e->indirect_unknown_callee);
1646
1647 if (!e->maybe_hot_p ())
1648 return false;
1649
1650 /* See if IP optimizations found something potentially useful about the
1651 function. For now we look only for CONST/PURE flags. Almost everything
1652 else we propagate is useless. */
1653 if (avail >= AVAIL_AVAILABLE)
1654 {
1655 int ecf_flags = flags_from_decl_or_type (target->decl);
1656 if (ecf_flags & ECF_CONST)
1657 {
1658 e->speculative_call_info (direct, indirect, ref);
1659 if (!(indirect->indirect_info->ecf_flags & ECF_CONST))
1660 return true;
1661 }
1662 else if (ecf_flags & ECF_PURE)
1663 {
1664 e->speculative_call_info (direct, indirect, ref);
1665 if (!(indirect->indirect_info->ecf_flags & ECF_PURE))
1666 return true;
1667 }
1668 }
1669 /* If we did not managed to inline the function nor redirect
1670 to an ipa-cp clone (that are seen by having local flag set),
1671 it is probably pointless to inline it unless hardware is missing
1672 indirect call predictor. */
1673 if (!anticipate_inlining && e->inline_failed && !target->local.local)
1674 return false;
1675 /* For overwritable targets there is not much to do. */
1676 if (e->inline_failed
1677 && (!can_inline_edge_p (e, false)
1678 || !can_inline_edge_by_limits_p (e, false, true)))
1679 return false;
1680 /* OK, speculation seems interesting. */
1681 return true;
1682 }
1683
1684 /* We know that EDGE is not going to be inlined.
1685 See if we can remove speculation. */
1686
1687 static void
resolve_noninline_speculation(edge_heap_t * edge_heap,struct cgraph_edge * edge)1688 resolve_noninline_speculation (edge_heap_t *edge_heap, struct cgraph_edge *edge)
1689 {
1690 if (edge->speculative && !speculation_useful_p (edge, false))
1691 {
1692 struct cgraph_node *node = edge->caller;
1693 struct cgraph_node *where = node->global.inlined_to
1694 ? node->global.inlined_to : node;
1695 auto_bitmap updated_nodes;
1696
1697 if (edge->count.ipa ().initialized_p ())
1698 spec_rem += edge->count.ipa ();
1699 edge->resolve_speculation ();
1700 reset_edge_caches (where);
1701 ipa_update_overall_fn_summary (where);
1702 update_caller_keys (edge_heap, where,
1703 updated_nodes, NULL);
1704 update_callee_keys (edge_heap, where,
1705 updated_nodes);
1706 }
1707 }
1708
1709 /* Return true if NODE should be accounted for overall size estimate.
1710 Skip all nodes optimized for size so we can measure the growth of hot
1711 part of program no matter of the padding. */
1712
1713 bool
inline_account_function_p(struct cgraph_node * node)1714 inline_account_function_p (struct cgraph_node *node)
1715 {
1716 return (!DECL_EXTERNAL (node->decl)
1717 && !opt_for_fn (node->decl, optimize_size)
1718 && node->frequency != NODE_FREQUENCY_UNLIKELY_EXECUTED);
1719 }
1720
1721 /* Count number of callers of NODE and store it into DATA (that
1722 points to int. Worker for cgraph_for_node_and_aliases. */
1723
1724 static bool
sum_callers(struct cgraph_node * node,void * data)1725 sum_callers (struct cgraph_node *node, void *data)
1726 {
1727 struct cgraph_edge *e;
1728 int *num_calls = (int *)data;
1729
1730 for (e = node->callers; e; e = e->next_caller)
1731 (*num_calls)++;
1732 return false;
1733 }
1734
1735 /* We use greedy algorithm for inlining of small functions:
1736 All inline candidates are put into prioritized heap ordered in
1737 increasing badness.
1738
1739 The inlining of small functions is bounded by unit growth parameters. */
1740
1741 static void
inline_small_functions(void)1742 inline_small_functions (void)
1743 {
1744 struct cgraph_node *node;
1745 struct cgraph_edge *edge;
1746 edge_heap_t edge_heap (sreal::min ());
1747 auto_bitmap updated_nodes;
1748 int min_size, max_size;
1749 auto_vec<cgraph_edge *> new_indirect_edges;
1750 int initial_size = 0;
1751 struct cgraph_node **order = XCNEWVEC (cgraph_node *, symtab->cgraph_count);
1752 struct cgraph_edge_hook_list *edge_removal_hook_holder;
1753 new_indirect_edges.create (8);
1754
1755 edge_removal_hook_holder
1756 = symtab->add_edge_removal_hook (&heap_edge_removal_hook, &edge_heap);
1757
1758 /* Compute overall unit size and other global parameters used by badness
1759 metrics. */
1760
1761 max_count = profile_count::uninitialized ();
1762 ipa_reduced_postorder (order, true, NULL);
1763 free (order);
1764
1765 FOR_EACH_DEFINED_FUNCTION (node)
1766 if (!node->global.inlined_to)
1767 {
1768 if (!node->alias && node->analyzed
1769 && (node->has_gimple_body_p () || node->thunk.thunk_p)
1770 && opt_for_fn (node->decl, optimize))
1771 {
1772 struct ipa_fn_summary *info = ipa_fn_summaries->get (node);
1773 struct ipa_dfs_info *dfs = (struct ipa_dfs_info *) node->aux;
1774
1775 /* Do not account external functions, they will be optimized out
1776 if not inlined. Also only count the non-cold portion of program. */
1777 if (inline_account_function_p (node))
1778 initial_size += info->size;
1779 info->growth = estimate_growth (node);
1780
1781 int num_calls = 0;
1782 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
1783 true);
1784 if (num_calls == 1)
1785 info->single_caller = true;
1786 if (dfs && dfs->next_cycle)
1787 {
1788 struct cgraph_node *n2;
1789 int id = dfs->scc_no + 1;
1790 for (n2 = node; n2;
1791 n2 = ((struct ipa_dfs_info *) n2->aux)->next_cycle)
1792 if (opt_for_fn (n2->decl, optimize))
1793 {
1794 struct ipa_fn_summary *info2 = ipa_fn_summaries->get (n2);
1795 if (info2->scc_no)
1796 break;
1797 info2->scc_no = id;
1798 }
1799 }
1800 }
1801
1802 for (edge = node->callers; edge; edge = edge->next_caller)
1803 max_count = max_count.max (edge->count.ipa ());
1804 }
1805 ipa_free_postorder_info ();
1806 initialize_growth_caches ();
1807
1808 if (dump_file)
1809 fprintf (dump_file,
1810 "\nDeciding on inlining of small functions. Starting with size %i.\n",
1811 initial_size);
1812
1813 overall_size = initial_size;
1814 max_size = compute_max_insns (overall_size);
1815 min_size = overall_size;
1816
1817 /* Populate the heap with all edges we might inline. */
1818
1819 FOR_EACH_DEFINED_FUNCTION (node)
1820 {
1821 bool update = false;
1822 struct cgraph_edge *next = NULL;
1823 bool has_speculative = false;
1824
1825 if (!opt_for_fn (node->decl, optimize))
1826 continue;
1827
1828 if (dump_file)
1829 fprintf (dump_file, "Enqueueing calls in %s.\n", node->dump_name ());
1830
1831 for (edge = node->callees; edge; edge = next)
1832 {
1833 next = edge->next_callee;
1834 if (edge->inline_failed
1835 && !edge->aux
1836 && can_inline_edge_p (edge, true)
1837 && want_inline_small_function_p (edge, true)
1838 && can_inline_edge_by_limits_p (edge, true)
1839 && edge->inline_failed)
1840 {
1841 gcc_assert (!edge->aux);
1842 update_edge_key (&edge_heap, edge);
1843 }
1844 if (edge->speculative)
1845 has_speculative = true;
1846 }
1847 if (has_speculative)
1848 for (edge = node->callees; edge; edge = next)
1849 if (edge->speculative && !speculation_useful_p (edge,
1850 edge->aux != NULL))
1851 {
1852 edge->resolve_speculation ();
1853 update = true;
1854 }
1855 if (update)
1856 {
1857 struct cgraph_node *where = node->global.inlined_to
1858 ? node->global.inlined_to : node;
1859 ipa_update_overall_fn_summary (where);
1860 reset_edge_caches (where);
1861 update_caller_keys (&edge_heap, where,
1862 updated_nodes, NULL);
1863 update_callee_keys (&edge_heap, where,
1864 updated_nodes);
1865 bitmap_clear (updated_nodes);
1866 }
1867 }
1868
1869 gcc_assert (in_lto_p
1870 || !(max_count > 0)
1871 || (profile_info && flag_branch_probabilities));
1872
1873 while (!edge_heap.empty ())
1874 {
1875 int old_size = overall_size;
1876 struct cgraph_node *where, *callee;
1877 sreal badness = edge_heap.min_key ();
1878 sreal current_badness;
1879 int growth;
1880
1881 edge = edge_heap.extract_min ();
1882 gcc_assert (edge->aux);
1883 edge->aux = NULL;
1884 if (!edge->inline_failed || !edge->callee->analyzed)
1885 continue;
1886
1887 #if CHECKING_P
1888 /* Be sure that caches are maintained consistent.
1889 This check is affected by scaling roundoff errors when compiling for
1890 IPA this we skip it in that case. */
1891 if (!edge->callee->count.ipa_p ()
1892 && (!max_count.initialized_p () || !max_count.nonzero_p ()))
1893 {
1894 sreal cached_badness = edge_badness (edge, false);
1895
1896 int old_size_est = estimate_edge_size (edge);
1897 sreal old_time_est = estimate_edge_time (edge);
1898 int old_hints_est = estimate_edge_hints (edge);
1899
1900 reset_edge_growth_cache (edge);
1901 gcc_assert (old_size_est == estimate_edge_size (edge));
1902 gcc_assert (old_time_est == estimate_edge_time (edge));
1903 /* FIXME:
1904
1905 gcc_assert (old_hints_est == estimate_edge_hints (edge));
1906
1907 fails with profile feedback because some hints depends on
1908 maybe_hot_edge_p predicate and because callee gets inlined to other
1909 calls, the edge may become cold.
1910 This ought to be fixed by computing relative probabilities
1911 for given invocation but that will be better done once whole
1912 code is converted to sreals. Disable for now and revert to "wrong"
1913 value so enable/disable checking paths agree. */
1914 edge_growth_cache[edge->uid].hints = old_hints_est + 1;
1915
1916 /* When updating the edge costs, we only decrease badness in the keys.
1917 Increases of badness are handled lazilly; when we see key with out
1918 of date value on it, we re-insert it now. */
1919 current_badness = edge_badness (edge, false);
1920 gcc_assert (cached_badness == current_badness);
1921 gcc_assert (current_badness >= badness);
1922 }
1923 else
1924 current_badness = edge_badness (edge, false);
1925 #else
1926 current_badness = edge_badness (edge, false);
1927 #endif
1928 if (current_badness != badness)
1929 {
1930 if (edge_heap.min () && current_badness > edge_heap.min_key ())
1931 {
1932 edge->aux = edge_heap.insert (current_badness, edge);
1933 continue;
1934 }
1935 else
1936 badness = current_badness;
1937 }
1938
1939 if (!can_inline_edge_p (edge, true)
1940 || !can_inline_edge_by_limits_p (edge, true))
1941 {
1942 resolve_noninline_speculation (&edge_heap, edge);
1943 continue;
1944 }
1945
1946 callee = edge->callee->ultimate_alias_target ();
1947 growth = estimate_edge_growth (edge);
1948 if (dump_file)
1949 {
1950 fprintf (dump_file,
1951 "\nConsidering %s with %i size\n",
1952 callee->dump_name (),
1953 ipa_fn_summaries->get (callee)->size);
1954 fprintf (dump_file,
1955 " to be inlined into %s in %s:%i\n"
1956 " Estimated badness is %f, frequency %.2f.\n",
1957 edge->caller->dump_name (),
1958 edge->call_stmt
1959 && (LOCATION_LOCUS (gimple_location ((const gimple *)
1960 edge->call_stmt))
1961 > BUILTINS_LOCATION)
1962 ? gimple_filename ((const gimple *) edge->call_stmt)
1963 : "unknown",
1964 edge->call_stmt
1965 ? gimple_lineno ((const gimple *) edge->call_stmt)
1966 : -1,
1967 badness.to_double (),
1968 edge->sreal_frequency ().to_double ());
1969 if (edge->count.ipa ().initialized_p ())
1970 {
1971 fprintf (dump_file, " Called ");
1972 edge->count.ipa ().dump (dump_file);
1973 fprintf (dump_file, " times\n");
1974 }
1975 if (dump_flags & TDF_DETAILS)
1976 edge_badness (edge, true);
1977 }
1978
1979 if (overall_size + growth > max_size
1980 && !DECL_DISREGARD_INLINE_LIMITS (callee->decl))
1981 {
1982 edge->inline_failed = CIF_INLINE_UNIT_GROWTH_LIMIT;
1983 report_inline_failed_reason (edge);
1984 resolve_noninline_speculation (&edge_heap, edge);
1985 continue;
1986 }
1987
1988 if (!want_inline_small_function_p (edge, true))
1989 {
1990 resolve_noninline_speculation (&edge_heap, edge);
1991 continue;
1992 }
1993
1994 /* Heuristics for inlining small functions work poorly for
1995 recursive calls where we do effects similar to loop unrolling.
1996 When inlining such edge seems profitable, leave decision on
1997 specific inliner. */
1998 if (edge->recursive_p ())
1999 {
2000 where = edge->caller;
2001 if (where->global.inlined_to)
2002 where = where->global.inlined_to;
2003 if (!recursive_inlining (edge,
2004 opt_for_fn (edge->caller->decl,
2005 flag_indirect_inlining)
2006 ? &new_indirect_edges : NULL))
2007 {
2008 edge->inline_failed = CIF_RECURSIVE_INLINING;
2009 resolve_noninline_speculation (&edge_heap, edge);
2010 continue;
2011 }
2012 reset_edge_caches (where);
2013 /* Recursive inliner inlines all recursive calls of the function
2014 at once. Consequently we need to update all callee keys. */
2015 if (opt_for_fn (edge->caller->decl, flag_indirect_inlining))
2016 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2017 update_callee_keys (&edge_heap, where, updated_nodes);
2018 bitmap_clear (updated_nodes);
2019 }
2020 else
2021 {
2022 struct cgraph_node *outer_node = NULL;
2023 int depth = 0;
2024
2025 /* Consider the case where self recursive function A is inlined
2026 into B. This is desired optimization in some cases, since it
2027 leads to effect similar of loop peeling and we might completely
2028 optimize out the recursive call. However we must be extra
2029 selective. */
2030
2031 where = edge->caller;
2032 while (where->global.inlined_to)
2033 {
2034 if (where->decl == callee->decl)
2035 outer_node = where, depth++;
2036 where = where->callers->caller;
2037 }
2038 if (outer_node
2039 && !want_inline_self_recursive_call_p (edge, outer_node,
2040 true, depth))
2041 {
2042 edge->inline_failed
2043 = (DECL_DISREGARD_INLINE_LIMITS (edge->callee->decl)
2044 ? CIF_RECURSIVE_INLINING : CIF_UNSPECIFIED);
2045 resolve_noninline_speculation (&edge_heap, edge);
2046 continue;
2047 }
2048 else if (depth && dump_file)
2049 fprintf (dump_file, " Peeling recursion with depth %i\n", depth);
2050
2051 gcc_checking_assert (!callee->global.inlined_to);
2052 inline_call (edge, true, &new_indirect_edges, &overall_size, true);
2053 add_new_edges_to_heap (&edge_heap, new_indirect_edges);
2054
2055 reset_edge_caches (edge->callee);
2056
2057 update_callee_keys (&edge_heap, where, updated_nodes);
2058 }
2059 where = edge->caller;
2060 if (where->global.inlined_to)
2061 where = where->global.inlined_to;
2062
2063 /* Our profitability metric can depend on local properties
2064 such as number of inlinable calls and size of the function body.
2065 After inlining these properties might change for the function we
2066 inlined into (since it's body size changed) and for the functions
2067 called by function we inlined (since number of it inlinable callers
2068 might change). */
2069 update_caller_keys (&edge_heap, where, updated_nodes, NULL);
2070 /* Offline copy count has possibly changed, recompute if profile is
2071 available. */
2072 struct cgraph_node *n = cgraph_node::get (edge->callee->decl);
2073 if (n != edge->callee && n->analyzed && n->count.ipa ().initialized_p ())
2074 update_callee_keys (&edge_heap, n, updated_nodes);
2075 bitmap_clear (updated_nodes);
2076
2077 if (dump_file)
2078 {
2079 fprintf (dump_file,
2080 " Inlined %s into %s which now has time %f and size %i, "
2081 "net change of %+i.\n",
2082 xstrdup_for_dump (edge->callee->name ()),
2083 xstrdup_for_dump (edge->caller->name ()),
2084 ipa_fn_summaries->get (edge->caller)->time.to_double (),
2085 ipa_fn_summaries->get (edge->caller)->size,
2086 overall_size - old_size);
2087 }
2088 if (min_size > overall_size)
2089 {
2090 min_size = overall_size;
2091 max_size = compute_max_insns (min_size);
2092
2093 if (dump_file)
2094 fprintf (dump_file, "New minimal size reached: %i\n", min_size);
2095 }
2096 }
2097
2098 free_growth_caches ();
2099 if (dump_file)
2100 fprintf (dump_file,
2101 "Unit growth for small function inlining: %i->%i (%i%%)\n",
2102 initial_size, overall_size,
2103 initial_size ? overall_size * 100 / (initial_size) - 100: 0);
2104 symtab->remove_edge_removal_hook (edge_removal_hook_holder);
2105 }
2106
2107 /* Flatten NODE. Performed both during early inlining and
2108 at IPA inlining time. */
2109
2110 static void
flatten_function(struct cgraph_node * node,bool early)2111 flatten_function (struct cgraph_node *node, bool early)
2112 {
2113 struct cgraph_edge *e;
2114
2115 /* We shouldn't be called recursively when we are being processed. */
2116 gcc_assert (node->aux == NULL);
2117
2118 node->aux = (void *) node;
2119
2120 for (e = node->callees; e; e = e->next_callee)
2121 {
2122 struct cgraph_node *orig_callee;
2123 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2124
2125 /* We've hit cycle? It is time to give up. */
2126 if (callee->aux)
2127 {
2128 if (dump_file)
2129 fprintf (dump_file,
2130 "Not inlining %s into %s to avoid cycle.\n",
2131 xstrdup_for_dump (callee->name ()),
2132 xstrdup_for_dump (e->caller->name ()));
2133 if (cgraph_inline_failed_type (e->inline_failed) != CIF_FINAL_ERROR)
2134 e->inline_failed = CIF_RECURSIVE_INLINING;
2135 continue;
2136 }
2137
2138 /* When the edge is already inlined, we just need to recurse into
2139 it in order to fully flatten the leaves. */
2140 if (!e->inline_failed)
2141 {
2142 flatten_function (callee, early);
2143 continue;
2144 }
2145
2146 /* Flatten attribute needs to be processed during late inlining. For
2147 extra code quality we however do flattening during early optimization,
2148 too. */
2149 if (!early
2150 ? !can_inline_edge_p (e, true)
2151 && !can_inline_edge_by_limits_p (e, true)
2152 : !can_early_inline_edge_p (e))
2153 continue;
2154
2155 if (e->recursive_p ())
2156 {
2157 if (dump_file)
2158 fprintf (dump_file, "Not inlining: recursive call.\n");
2159 continue;
2160 }
2161
2162 if (gimple_in_ssa_p (DECL_STRUCT_FUNCTION (node->decl))
2163 != gimple_in_ssa_p (DECL_STRUCT_FUNCTION (callee->decl)))
2164 {
2165 if (dump_file)
2166 fprintf (dump_file, "Not inlining: SSA form does not match.\n");
2167 continue;
2168 }
2169
2170 /* Inline the edge and flatten the inline clone. Avoid
2171 recursing through the original node if the node was cloned. */
2172 if (dump_file)
2173 fprintf (dump_file, " Inlining %s into %s.\n",
2174 xstrdup_for_dump (callee->name ()),
2175 xstrdup_for_dump (e->caller->name ()));
2176 orig_callee = callee;
2177 inline_call (e, true, NULL, NULL, false);
2178 if (e->callee != orig_callee)
2179 orig_callee->aux = (void *) node;
2180 flatten_function (e->callee, early);
2181 if (e->callee != orig_callee)
2182 orig_callee->aux = NULL;
2183 }
2184
2185 node->aux = NULL;
2186 if (!node->global.inlined_to)
2187 ipa_update_overall_fn_summary (node);
2188 }
2189
2190 /* Inline NODE to all callers. Worker for cgraph_for_node_and_aliases.
2191 DATA points to number of calls originally found so we avoid infinite
2192 recursion. */
2193
2194 static bool
inline_to_all_callers_1(struct cgraph_node * node,void * data,hash_set<cgraph_node * > * callers)2195 inline_to_all_callers_1 (struct cgraph_node *node, void *data,
2196 hash_set<cgraph_node *> *callers)
2197 {
2198 int *num_calls = (int *)data;
2199 bool callee_removed = false;
2200
2201 while (node->callers && !node->global.inlined_to)
2202 {
2203 struct cgraph_node *caller = node->callers->caller;
2204
2205 if (!can_inline_edge_p (node->callers, true)
2206 || !can_inline_edge_by_limits_p (node->callers, true)
2207 || node->callers->recursive_p ())
2208 {
2209 if (dump_file)
2210 fprintf (dump_file, "Uninlinable call found; giving up.\n");
2211 *num_calls = 0;
2212 return false;
2213 }
2214
2215 if (dump_file)
2216 {
2217 fprintf (dump_file,
2218 "\nInlining %s size %i.\n",
2219 node->name (),
2220 ipa_fn_summaries->get (node)->size);
2221 fprintf (dump_file,
2222 " Called once from %s %i insns.\n",
2223 node->callers->caller->name (),
2224 ipa_fn_summaries->get (node->callers->caller)->size);
2225 }
2226
2227 /* Remember which callers we inlined to, delaying updating the
2228 overall summary. */
2229 callers->add (node->callers->caller);
2230 inline_call (node->callers, true, NULL, NULL, false, &callee_removed);
2231 if (dump_file)
2232 fprintf (dump_file,
2233 " Inlined into %s which now has %i size\n",
2234 caller->name (),
2235 ipa_fn_summaries->get (caller)->size);
2236 if (!(*num_calls)--)
2237 {
2238 if (dump_file)
2239 fprintf (dump_file, "New calls found; giving up.\n");
2240 return callee_removed;
2241 }
2242 if (callee_removed)
2243 return true;
2244 }
2245 return false;
2246 }
2247
2248 /* Wrapper around inline_to_all_callers_1 doing delayed overall summary
2249 update. */
2250
2251 static bool
inline_to_all_callers(struct cgraph_node * node,void * data)2252 inline_to_all_callers (struct cgraph_node *node, void *data)
2253 {
2254 hash_set<cgraph_node *> callers;
2255 bool res = inline_to_all_callers_1 (node, data, &callers);
2256 /* Perform the delayed update of the overall summary of all callers
2257 processed. This avoids quadratic behavior in the cases where
2258 we have a lot of calls to the same function. */
2259 for (hash_set<cgraph_node *>::iterator i = callers.begin ();
2260 i != callers.end (); ++i)
2261 ipa_update_overall_fn_summary (*i);
2262 return res;
2263 }
2264
2265 /* Output overall time estimate. */
2266 static void
dump_overall_stats(void)2267 dump_overall_stats (void)
2268 {
2269 sreal sum_weighted = 0, sum = 0;
2270 struct cgraph_node *node;
2271
2272 FOR_EACH_DEFINED_FUNCTION (node)
2273 if (!node->global.inlined_to
2274 && !node->alias)
2275 {
2276 sreal time = ipa_fn_summaries->get (node)->time;
2277 sum += time;
2278 if (node->count.ipa ().initialized_p ())
2279 sum_weighted += time * node->count.ipa ().to_gcov_type ();
2280 }
2281 fprintf (dump_file, "Overall time estimate: "
2282 "%f weighted by profile: "
2283 "%f\n", sum.to_double (), sum_weighted.to_double ());
2284 }
2285
2286 /* Output some useful stats about inlining. */
2287
2288 static void
dump_inline_stats(void)2289 dump_inline_stats (void)
2290 {
2291 int64_t inlined_cnt = 0, inlined_indir_cnt = 0;
2292 int64_t inlined_virt_cnt = 0, inlined_virt_indir_cnt = 0;
2293 int64_t noninlined_cnt = 0, noninlined_indir_cnt = 0;
2294 int64_t noninlined_virt_cnt = 0, noninlined_virt_indir_cnt = 0;
2295 int64_t inlined_speculative = 0, inlined_speculative_ply = 0;
2296 int64_t indirect_poly_cnt = 0, indirect_cnt = 0;
2297 int64_t reason[CIF_N_REASONS][2];
2298 sreal reason_freq[CIF_N_REASONS];
2299 int i;
2300 struct cgraph_node *node;
2301
2302 memset (reason, 0, sizeof (reason));
2303 for (i=0; i < CIF_N_REASONS; i++)
2304 reason_freq[i] = 0;
2305 FOR_EACH_DEFINED_FUNCTION (node)
2306 {
2307 struct cgraph_edge *e;
2308 for (e = node->callees; e; e = e->next_callee)
2309 {
2310 if (e->inline_failed)
2311 {
2312 if (e->count.ipa ().initialized_p ())
2313 reason[(int) e->inline_failed][0] += e->count.ipa ().to_gcov_type ();
2314 reason_freq[(int) e->inline_failed] += e->sreal_frequency ();
2315 reason[(int) e->inline_failed][1] ++;
2316 if (DECL_VIRTUAL_P (e->callee->decl)
2317 && e->count.ipa ().initialized_p ())
2318 {
2319 if (e->indirect_inlining_edge)
2320 noninlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
2321 else
2322 noninlined_virt_cnt += e->count.ipa ().to_gcov_type ();
2323 }
2324 else if (e->count.ipa ().initialized_p ())
2325 {
2326 if (e->indirect_inlining_edge)
2327 noninlined_indir_cnt += e->count.ipa ().to_gcov_type ();
2328 else
2329 noninlined_cnt += e->count.ipa ().to_gcov_type ();
2330 }
2331 }
2332 else if (e->count.ipa ().initialized_p ())
2333 {
2334 if (e->speculative)
2335 {
2336 if (DECL_VIRTUAL_P (e->callee->decl))
2337 inlined_speculative_ply += e->count.ipa ().to_gcov_type ();
2338 else
2339 inlined_speculative += e->count.ipa ().to_gcov_type ();
2340 }
2341 else if (DECL_VIRTUAL_P (e->callee->decl))
2342 {
2343 if (e->indirect_inlining_edge)
2344 inlined_virt_indir_cnt += e->count.ipa ().to_gcov_type ();
2345 else
2346 inlined_virt_cnt += e->count.ipa ().to_gcov_type ();
2347 }
2348 else
2349 {
2350 if (e->indirect_inlining_edge)
2351 inlined_indir_cnt += e->count.ipa ().to_gcov_type ();
2352 else
2353 inlined_cnt += e->count.ipa ().to_gcov_type ();
2354 }
2355 }
2356 }
2357 for (e = node->indirect_calls; e; e = e->next_callee)
2358 if (e->indirect_info->polymorphic
2359 & e->count.ipa ().initialized_p ())
2360 indirect_poly_cnt += e->count.ipa ().to_gcov_type ();
2361 else if (e->count.ipa ().initialized_p ())
2362 indirect_cnt += e->count.ipa ().to_gcov_type ();
2363 }
2364 if (max_count.initialized_p ())
2365 {
2366 fprintf (dump_file,
2367 "Inlined %" PRId64 " + speculative "
2368 "%" PRId64 " + speculative polymorphic "
2369 "%" PRId64 " + previously indirect "
2370 "%" PRId64 " + virtual "
2371 "%" PRId64 " + virtual and previously indirect "
2372 "%" PRId64 "\n" "Not inlined "
2373 "%" PRId64 " + previously indirect "
2374 "%" PRId64 " + virtual "
2375 "%" PRId64 " + virtual and previously indirect "
2376 "%" PRId64 " + stil indirect "
2377 "%" PRId64 " + still indirect polymorphic "
2378 "%" PRId64 "\n", inlined_cnt,
2379 inlined_speculative, inlined_speculative_ply,
2380 inlined_indir_cnt, inlined_virt_cnt, inlined_virt_indir_cnt,
2381 noninlined_cnt, noninlined_indir_cnt, noninlined_virt_cnt,
2382 noninlined_virt_indir_cnt, indirect_cnt, indirect_poly_cnt);
2383 fprintf (dump_file, "Removed speculations ");
2384 spec_rem.dump (dump_file);
2385 fprintf (dump_file, "\n");
2386 }
2387 dump_overall_stats ();
2388 fprintf (dump_file, "\nWhy inlining failed?\n");
2389 for (i = 0; i < CIF_N_REASONS; i++)
2390 if (reason[i][1])
2391 fprintf (dump_file, "%-50s: %8i calls, %8f freq, %" PRId64" count\n",
2392 cgraph_inline_failed_string ((cgraph_inline_failed_t) i),
2393 (int) reason[i][1], reason_freq[i].to_double (), reason[i][0]);
2394 }
2395
2396 /* Called when node is removed. */
2397
2398 static void
flatten_remove_node_hook(struct cgraph_node * node,void * data)2399 flatten_remove_node_hook (struct cgraph_node *node, void *data)
2400 {
2401 if (lookup_attribute ("flatten", DECL_ATTRIBUTES (node->decl)) == NULL)
2402 return;
2403
2404 hash_set<struct cgraph_node *> *removed
2405 = (hash_set<struct cgraph_node *> *) data;
2406 removed->add (node);
2407 }
2408
2409 /* Decide on the inlining. We do so in the topological order to avoid
2410 expenses on updating data structures. */
2411
2412 static unsigned int
ipa_inline(void)2413 ipa_inline (void)
2414 {
2415 struct cgraph_node *node;
2416 int nnodes;
2417 struct cgraph_node **order;
2418 int i, j;
2419 int cold;
2420 bool remove_functions = false;
2421
2422 order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
2423
2424 if (dump_file)
2425 ipa_dump_fn_summaries (dump_file);
2426
2427 nnodes = ipa_reverse_postorder (order);
2428 spec_rem = profile_count::zero ();
2429
2430 FOR_EACH_FUNCTION (node)
2431 {
2432 node->aux = 0;
2433
2434 /* Recompute the default reasons for inlining because they may have
2435 changed during merging. */
2436 if (in_lto_p)
2437 {
2438 for (cgraph_edge *e = node->callees; e; e = e->next_callee)
2439 {
2440 gcc_assert (e->inline_failed);
2441 initialize_inline_failed (e);
2442 }
2443 for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee)
2444 initialize_inline_failed (e);
2445 }
2446 }
2447
2448 if (dump_file)
2449 fprintf (dump_file, "\nFlattening functions:\n");
2450
2451 /* First shrink order array, so that it only contains nodes with
2452 flatten attribute. */
2453 for (i = nnodes - 1, j = i; i >= 0; i--)
2454 {
2455 node = order[i];
2456 if (lookup_attribute ("flatten",
2457 DECL_ATTRIBUTES (node->decl)) != NULL)
2458 order[j--] = order[i];
2459 }
2460
2461 /* After the above loop, order[j + 1] ... order[nnodes - 1] contain
2462 nodes with flatten attribute. If there is more than one such
2463 node, we need to register a node removal hook, as flatten_function
2464 could remove other nodes with flatten attribute. See PR82801. */
2465 struct cgraph_node_hook_list *node_removal_hook_holder = NULL;
2466 hash_set<struct cgraph_node *> *flatten_removed_nodes = NULL;
2467 if (j < nnodes - 2)
2468 {
2469 flatten_removed_nodes = new hash_set<struct cgraph_node *>;
2470 node_removal_hook_holder
2471 = symtab->add_cgraph_removal_hook (&flatten_remove_node_hook,
2472 flatten_removed_nodes);
2473 }
2474
2475 /* In the first pass handle functions to be flattened. Do this with
2476 a priority so none of our later choices will make this impossible. */
2477 for (i = nnodes - 1; i > j; i--)
2478 {
2479 node = order[i];
2480 if (flatten_removed_nodes
2481 && flatten_removed_nodes->contains (node))
2482 continue;
2483
2484 /* Handle nodes to be flattened.
2485 Ideally when processing callees we stop inlining at the
2486 entry of cycles, possibly cloning that entry point and
2487 try to flatten itself turning it into a self-recursive
2488 function. */
2489 if (dump_file)
2490 fprintf (dump_file, "Flattening %s\n", node->name ());
2491 flatten_function (node, false);
2492 }
2493
2494 if (j < nnodes - 2)
2495 {
2496 symtab->remove_cgraph_removal_hook (node_removal_hook_holder);
2497 delete flatten_removed_nodes;
2498 }
2499 free (order);
2500
2501 if (dump_file)
2502 dump_overall_stats ();
2503
2504 inline_small_functions ();
2505
2506 gcc_assert (symtab->state == IPA_SSA);
2507 symtab->state = IPA_SSA_AFTER_INLINING;
2508 /* Do first after-inlining removal. We want to remove all "stale" extern
2509 inline functions and virtual functions so we really know what is called
2510 once. */
2511 symtab->remove_unreachable_nodes (dump_file);
2512
2513 /* Inline functions with a property that after inlining into all callers the
2514 code size will shrink because the out-of-line copy is eliminated.
2515 We do this regardless on the callee size as long as function growth limits
2516 are met. */
2517 if (dump_file)
2518 fprintf (dump_file,
2519 "\nDeciding on functions to be inlined into all callers and "
2520 "removing useless speculations:\n");
2521
2522 /* Inlining one function called once has good chance of preventing
2523 inlining other function into the same callee. Ideally we should
2524 work in priority order, but probably inlining hot functions first
2525 is good cut without the extra pain of maintaining the queue.
2526
2527 ??? this is not really fitting the bill perfectly: inlining function
2528 into callee often leads to better optimization of callee due to
2529 increased context for optimization.
2530 For example if main() function calls a function that outputs help
2531 and then function that does the main optmization, we should inline
2532 the second with priority even if both calls are cold by themselves.
2533
2534 We probably want to implement new predicate replacing our use of
2535 maybe_hot_edge interpreted as maybe_hot_edge || callee is known
2536 to be hot. */
2537 for (cold = 0; cold <= 1; cold ++)
2538 {
2539 FOR_EACH_DEFINED_FUNCTION (node)
2540 {
2541 struct cgraph_edge *edge, *next;
2542 bool update=false;
2543
2544 if (!opt_for_fn (node->decl, optimize)
2545 || !opt_for_fn (node->decl, flag_inline_functions_called_once))
2546 continue;
2547
2548 for (edge = node->callees; edge; edge = next)
2549 {
2550 next = edge->next_callee;
2551 if (edge->speculative && !speculation_useful_p (edge, false))
2552 {
2553 if (edge->count.ipa ().initialized_p ())
2554 spec_rem += edge->count.ipa ();
2555 edge->resolve_speculation ();
2556 update = true;
2557 remove_functions = true;
2558 }
2559 }
2560 if (update)
2561 {
2562 struct cgraph_node *where = node->global.inlined_to
2563 ? node->global.inlined_to : node;
2564 reset_edge_caches (where);
2565 ipa_update_overall_fn_summary (where);
2566 }
2567 if (want_inline_function_to_all_callers_p (node, cold))
2568 {
2569 int num_calls = 0;
2570 node->call_for_symbol_and_aliases (sum_callers, &num_calls,
2571 true);
2572 while (node->call_for_symbol_and_aliases
2573 (inline_to_all_callers, &num_calls, true))
2574 ;
2575 remove_functions = true;
2576 }
2577 }
2578 }
2579
2580 /* Free ipa-prop structures if they are no longer needed. */
2581 ipa_free_all_structures_after_iinln ();
2582
2583 if (dump_file)
2584 {
2585 fprintf (dump_file,
2586 "\nInlined %i calls, eliminated %i functions\n\n",
2587 ncalls_inlined, nfunctions_inlined);
2588 dump_inline_stats ();
2589 }
2590
2591 if (dump_file)
2592 ipa_dump_fn_summaries (dump_file);
2593 return remove_functions ? TODO_remove_functions : 0;
2594 }
2595
2596 /* Inline always-inline function calls in NODE. */
2597
2598 static bool
inline_always_inline_functions(struct cgraph_node * node)2599 inline_always_inline_functions (struct cgraph_node *node)
2600 {
2601 struct cgraph_edge *e;
2602 bool inlined = false;
2603
2604 for (e = node->callees; e; e = e->next_callee)
2605 {
2606 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2607 if (!DECL_DISREGARD_INLINE_LIMITS (callee->decl))
2608 continue;
2609
2610 if (e->recursive_p ())
2611 {
2612 if (dump_file)
2613 fprintf (dump_file, " Not inlining recursive call to %s.\n",
2614 e->callee->name ());
2615 e->inline_failed = CIF_RECURSIVE_INLINING;
2616 continue;
2617 }
2618
2619 if (!can_early_inline_edge_p (e))
2620 {
2621 /* Set inlined to true if the callee is marked "always_inline" but
2622 is not inlinable. This will allow flagging an error later in
2623 expand_call_inline in tree-inline.c. */
2624 if (lookup_attribute ("always_inline",
2625 DECL_ATTRIBUTES (callee->decl)) != NULL)
2626 inlined = true;
2627 continue;
2628 }
2629
2630 if (dump_file)
2631 fprintf (dump_file, " Inlining %s into %s (always_inline).\n",
2632 xstrdup_for_dump (e->callee->name ()),
2633 xstrdup_for_dump (e->caller->name ()));
2634 inline_call (e, true, NULL, NULL, false);
2635 inlined = true;
2636 }
2637 if (inlined)
2638 ipa_update_overall_fn_summary (node);
2639
2640 return inlined;
2641 }
2642
2643 /* Decide on the inlining. We do so in the topological order to avoid
2644 expenses on updating data structures. */
2645
2646 static bool
early_inline_small_functions(struct cgraph_node * node)2647 early_inline_small_functions (struct cgraph_node *node)
2648 {
2649 struct cgraph_edge *e;
2650 bool inlined = false;
2651
2652 for (e = node->callees; e; e = e->next_callee)
2653 {
2654 struct cgraph_node *callee = e->callee->ultimate_alias_target ();
2655 if (!ipa_fn_summaries->get (callee)->inlinable
2656 || !e->inline_failed)
2657 continue;
2658
2659 /* Do not consider functions not declared inline. */
2660 if (!DECL_DECLARED_INLINE_P (callee->decl)
2661 && !opt_for_fn (node->decl, flag_inline_small_functions)
2662 && !opt_for_fn (node->decl, flag_inline_functions))
2663 continue;
2664
2665 if (dump_file)
2666 fprintf (dump_file, "Considering inline candidate %s.\n",
2667 callee->name ());
2668
2669 if (!can_early_inline_edge_p (e))
2670 continue;
2671
2672 if (e->recursive_p ())
2673 {
2674 if (dump_file)
2675 fprintf (dump_file, " Not inlining: recursive call.\n");
2676 continue;
2677 }
2678
2679 if (!want_early_inline_function_p (e))
2680 continue;
2681
2682 if (dump_file)
2683 fprintf (dump_file, " Inlining %s into %s.\n",
2684 xstrdup_for_dump (callee->name ()),
2685 xstrdup_for_dump (e->caller->name ()));
2686 inline_call (e, true, NULL, NULL, false);
2687 inlined = true;
2688 }
2689
2690 if (inlined)
2691 ipa_update_overall_fn_summary (node);
2692
2693 return inlined;
2694 }
2695
2696 unsigned int
early_inliner(function * fun)2697 early_inliner (function *fun)
2698 {
2699 struct cgraph_node *node = cgraph_node::get (current_function_decl);
2700 struct cgraph_edge *edge;
2701 unsigned int todo = 0;
2702 int iterations = 0;
2703 bool inlined = false;
2704
2705 if (seen_error ())
2706 return 0;
2707
2708 /* Do nothing if datastructures for ipa-inliner are already computed. This
2709 happens when some pass decides to construct new function and
2710 cgraph_add_new_function calls lowering passes and early optimization on
2711 it. This may confuse ourself when early inliner decide to inline call to
2712 function clone, because function clones don't have parameter list in
2713 ipa-prop matching their signature. */
2714 if (ipa_node_params_sum)
2715 return 0;
2716
2717 if (flag_checking)
2718 node->verify ();
2719 node->remove_all_references ();
2720
2721 /* Rebuild this reference because it dosn't depend on
2722 function's body and it's required to pass cgraph_node
2723 verification. */
2724 if (node->instrumented_version
2725 && !node->instrumentation_clone)
2726 node->create_reference (node->instrumented_version, IPA_REF_CHKP, NULL);
2727
2728 /* Even when not optimizing or not inlining inline always-inline
2729 functions. */
2730 inlined = inline_always_inline_functions (node);
2731
2732 if (!optimize
2733 || flag_no_inline
2734 || !flag_early_inlining
2735 /* Never inline regular functions into always-inline functions
2736 during incremental inlining. This sucks as functions calling
2737 always inline functions will get less optimized, but at the
2738 same time inlining of functions calling always inline
2739 function into an always inline function might introduce
2740 cycles of edges to be always inlined in the callgraph.
2741
2742 We might want to be smarter and just avoid this type of inlining. */
2743 || (DECL_DISREGARD_INLINE_LIMITS (node->decl)
2744 && lookup_attribute ("always_inline",
2745 DECL_ATTRIBUTES (node->decl))))
2746 ;
2747 else if (lookup_attribute ("flatten",
2748 DECL_ATTRIBUTES (node->decl)) != NULL)
2749 {
2750 /* When the function is marked to be flattened, recursively inline
2751 all calls in it. */
2752 if (dump_file)
2753 fprintf (dump_file,
2754 "Flattening %s\n", node->name ());
2755 flatten_function (node, true);
2756 inlined = true;
2757 }
2758 else
2759 {
2760 /* If some always_inline functions was inlined, apply the changes.
2761 This way we will not account always inline into growth limits and
2762 moreover we will inline calls from always inlines that we skipped
2763 previously because of conditional above. */
2764 if (inlined)
2765 {
2766 timevar_push (TV_INTEGRATION);
2767 todo |= optimize_inline_calls (current_function_decl);
2768 /* optimize_inline_calls call above might have introduced new
2769 statements that don't have inline parameters computed. */
2770 for (edge = node->callees; edge; edge = edge->next_callee)
2771 {
2772 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2773 es->call_stmt_size
2774 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2775 es->call_stmt_time
2776 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2777 }
2778 ipa_update_overall_fn_summary (node);
2779 inlined = false;
2780 timevar_pop (TV_INTEGRATION);
2781 }
2782 /* We iterate incremental inlining to get trivial cases of indirect
2783 inlining. */
2784 while (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS)
2785 && early_inline_small_functions (node))
2786 {
2787 timevar_push (TV_INTEGRATION);
2788 todo |= optimize_inline_calls (current_function_decl);
2789
2790 /* Technically we ought to recompute inline parameters so the new
2791 iteration of early inliner works as expected. We however have
2792 values approximately right and thus we only need to update edge
2793 info that might be cleared out for newly discovered edges. */
2794 for (edge = node->callees; edge; edge = edge->next_callee)
2795 {
2796 /* We have no summary for new bound store calls yet. */
2797 struct ipa_call_summary *es = ipa_call_summaries->get (edge);
2798 es->call_stmt_size
2799 = estimate_num_insns (edge->call_stmt, &eni_size_weights);
2800 es->call_stmt_time
2801 = estimate_num_insns (edge->call_stmt, &eni_time_weights);
2802
2803 if (edge->callee->decl
2804 && !gimple_check_call_matching_types (
2805 edge->call_stmt, edge->callee->decl, false))
2806 {
2807 edge->inline_failed = CIF_MISMATCHED_ARGUMENTS;
2808 edge->call_stmt_cannot_inline_p = true;
2809 }
2810 }
2811 if (iterations < PARAM_VALUE (PARAM_EARLY_INLINER_MAX_ITERATIONS) - 1)
2812 ipa_update_overall_fn_summary (node);
2813 timevar_pop (TV_INTEGRATION);
2814 iterations++;
2815 inlined = false;
2816 }
2817 if (dump_file)
2818 fprintf (dump_file, "Iterations: %i\n", iterations);
2819 }
2820
2821 if (inlined)
2822 {
2823 timevar_push (TV_INTEGRATION);
2824 todo |= optimize_inline_calls (current_function_decl);
2825 timevar_pop (TV_INTEGRATION);
2826 }
2827
2828 fun->always_inline_functions_inlined = true;
2829
2830 return todo;
2831 }
2832
2833 /* Do inlining of small functions. Doing so early helps profiling and other
2834 passes to be somewhat more effective and avoids some code duplication in
2835 later real inlining pass for testcases with very many function calls. */
2836
2837 namespace {
2838
2839 const pass_data pass_data_early_inline =
2840 {
2841 GIMPLE_PASS, /* type */
2842 "einline", /* name */
2843 OPTGROUP_INLINE, /* optinfo_flags */
2844 TV_EARLY_INLINING, /* tv_id */
2845 PROP_ssa, /* properties_required */
2846 0, /* properties_provided */
2847 0, /* properties_destroyed */
2848 0, /* todo_flags_start */
2849 0, /* todo_flags_finish */
2850 };
2851
2852 class pass_early_inline : public gimple_opt_pass
2853 {
2854 public:
pass_early_inline(gcc::context * ctxt)2855 pass_early_inline (gcc::context *ctxt)
2856 : gimple_opt_pass (pass_data_early_inline, ctxt)
2857 {}
2858
2859 /* opt_pass methods: */
2860 virtual unsigned int execute (function *);
2861
2862 }; // class pass_early_inline
2863
2864 unsigned int
execute(function * fun)2865 pass_early_inline::execute (function *fun)
2866 {
2867 return early_inliner (fun);
2868 }
2869
2870 } // anon namespace
2871
2872 gimple_opt_pass *
make_pass_early_inline(gcc::context * ctxt)2873 make_pass_early_inline (gcc::context *ctxt)
2874 {
2875 return new pass_early_inline (ctxt);
2876 }
2877
2878 namespace {
2879
2880 const pass_data pass_data_ipa_inline =
2881 {
2882 IPA_PASS, /* type */
2883 "inline", /* name */
2884 OPTGROUP_INLINE, /* optinfo_flags */
2885 TV_IPA_INLINING, /* tv_id */
2886 0, /* properties_required */
2887 0, /* properties_provided */
2888 0, /* properties_destroyed */
2889 0, /* todo_flags_start */
2890 ( TODO_dump_symtab ), /* todo_flags_finish */
2891 };
2892
2893 class pass_ipa_inline : public ipa_opt_pass_d
2894 {
2895 public:
pass_ipa_inline(gcc::context * ctxt)2896 pass_ipa_inline (gcc::context *ctxt)
2897 : ipa_opt_pass_d (pass_data_ipa_inline, ctxt,
2898 NULL, /* generate_summary */
2899 NULL, /* write_summary */
2900 NULL, /* read_summary */
2901 NULL, /* write_optimization_summary */
2902 NULL, /* read_optimization_summary */
2903 NULL, /* stmt_fixup */
2904 0, /* function_transform_todo_flags_start */
2905 inline_transform, /* function_transform */
2906 NULL) /* variable_transform */
2907 {}
2908
2909 /* opt_pass methods: */
execute(function *)2910 virtual unsigned int execute (function *) { return ipa_inline (); }
2911
2912 }; // class pass_ipa_inline
2913
2914 } // anon namespace
2915
2916 ipa_opt_pass_d *
make_pass_ipa_inline(gcc::context * ctxt)2917 make_pass_ipa_inline (gcc::context *ctxt)
2918 {
2919 return new pass_ipa_inline (ctxt);
2920 }
2921