1*38fd1498Szrj /* Callgraph transformations to handle inlining
2*38fd1498Szrj    Copyright (C) 2003-2018 Free Software Foundation, Inc.
3*38fd1498Szrj    Contributed by Jan Hubicka
4*38fd1498Szrj 
5*38fd1498Szrj This file is part of GCC.
6*38fd1498Szrj 
7*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
8*38fd1498Szrj the terms of the GNU General Public License as published by the Free
9*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
10*38fd1498Szrj version.
11*38fd1498Szrj 
12*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
14*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15*38fd1498Szrj for more details.
16*38fd1498Szrj 
17*38fd1498Szrj You should have received a copy of the GNU General Public License
18*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
19*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
20*38fd1498Szrj 
21*38fd1498Szrj /* The inline decisions are stored in callgraph in "inline plan" and
22*38fd1498Szrj    applied later.
23*38fd1498Szrj 
24*38fd1498Szrj    To mark given call inline, use inline_call function.
25*38fd1498Szrj    The function marks the edge inlinable and, if necessary, produces
26*38fd1498Szrj    virtual clone in the callgraph representing the new copy of callee's
27*38fd1498Szrj    function body.
28*38fd1498Szrj 
29*38fd1498Szrj    The inline plan is applied on given function body by inline_transform.  */
30*38fd1498Szrj 
31*38fd1498Szrj #include "config.h"
32*38fd1498Szrj #include "system.h"
33*38fd1498Szrj #include "coretypes.h"
34*38fd1498Szrj #include "tm.h"
35*38fd1498Szrj #include "function.h"
36*38fd1498Szrj #include "tree.h"
37*38fd1498Szrj #include "alloc-pool.h"
38*38fd1498Szrj #include "tree-pass.h"
39*38fd1498Szrj #include "cgraph.h"
40*38fd1498Szrj #include "tree-cfg.h"
41*38fd1498Szrj #include "symbol-summary.h"
42*38fd1498Szrj #include "tree-vrp.h"
43*38fd1498Szrj #include "ipa-prop.h"
44*38fd1498Szrj #include "ipa-fnsummary.h"
45*38fd1498Szrj #include "ipa-inline.h"
46*38fd1498Szrj #include "tree-inline.h"
47*38fd1498Szrj #include "function.h"
48*38fd1498Szrj #include "cfg.h"
49*38fd1498Szrj #include "basic-block.h"
50*38fd1498Szrj 
51*38fd1498Szrj int ncalls_inlined;
52*38fd1498Szrj int nfunctions_inlined;
53*38fd1498Szrj 
54*38fd1498Szrj /* Scale counts of NODE edges by NUM/DEN.  */
55*38fd1498Szrj 
56*38fd1498Szrj static void
update_noncloned_counts(struct cgraph_node * node,profile_count num,profile_count den)57*38fd1498Szrj update_noncloned_counts (struct cgraph_node *node,
58*38fd1498Szrj 			 profile_count num, profile_count den)
59*38fd1498Szrj {
60*38fd1498Szrj   struct cgraph_edge *e;
61*38fd1498Szrj 
62*38fd1498Szrj   profile_count::adjust_for_ipa_scaling (&num, &den);
63*38fd1498Szrj 
64*38fd1498Szrj   for (e = node->callees; e; e = e->next_callee)
65*38fd1498Szrj     {
66*38fd1498Szrj       if (!e->inline_failed)
67*38fd1498Szrj         update_noncloned_counts (e->callee, num, den);
68*38fd1498Szrj       e->count = e->count.apply_scale (num, den);
69*38fd1498Szrj     }
70*38fd1498Szrj   for (e = node->indirect_calls; e; e = e->next_callee)
71*38fd1498Szrj     e->count = e->count.apply_scale (num, den);
72*38fd1498Szrj   node->count = node->count.apply_scale (num, den);
73*38fd1498Szrj }
74*38fd1498Szrj 
75*38fd1498Szrj /* We removed or are going to remove the last call to NODE.
76*38fd1498Szrj    Return true if we can and want proactively remove the NODE now.
77*38fd1498Szrj    This is important to do, since we want inliner to know when offline
78*38fd1498Szrj    copy of function was removed.  */
79*38fd1498Szrj 
80*38fd1498Szrj static bool
can_remove_node_now_p_1(struct cgraph_node * node,struct cgraph_edge * e)81*38fd1498Szrj can_remove_node_now_p_1 (struct cgraph_node *node, struct cgraph_edge *e)
82*38fd1498Szrj {
83*38fd1498Szrj   ipa_ref *ref;
84*38fd1498Szrj 
85*38fd1498Szrj   FOR_EACH_ALIAS (node, ref)
86*38fd1498Szrj     {
87*38fd1498Szrj       cgraph_node *alias = dyn_cast <cgraph_node *> (ref->referring);
88*38fd1498Szrj       if ((alias->callers && alias->callers != e)
89*38fd1498Szrj           || !can_remove_node_now_p_1 (alias, e))
90*38fd1498Szrj 	return false;
91*38fd1498Szrj     }
92*38fd1498Szrj   /* FIXME: When address is taken of DECL_EXTERNAL function we still
93*38fd1498Szrj      can remove its offline copy, but we would need to keep unanalyzed node in
94*38fd1498Szrj      the callgraph so references can point to it.
95*38fd1498Szrj 
96*38fd1498Szrj      Also for comdat group we can ignore references inside a group as we
97*38fd1498Szrj      want to prove the group as a whole to be dead.  */
98*38fd1498Szrj   return (!node->address_taken
99*38fd1498Szrj 	  && node->can_remove_if_no_direct_calls_and_refs_p ()
100*38fd1498Szrj 	  /* Inlining might enable more devirtualizing, so we want to remove
101*38fd1498Szrj 	     those only after all devirtualizable virtual calls are processed.
102*38fd1498Szrj 	     Lacking may edges in callgraph we just preserve them post
103*38fd1498Szrj 	     inlining.  */
104*38fd1498Szrj 	  && (!DECL_VIRTUAL_P (node->decl)
105*38fd1498Szrj 	      || !opt_for_fn (node->decl, flag_devirtualize))
106*38fd1498Szrj 	  /* During early inlining some unanalyzed cgraph nodes might be in the
107*38fd1498Szrj 	     callgraph and they might reffer the function in question.  */
108*38fd1498Szrj 	  && !cgraph_new_nodes.exists ());
109*38fd1498Szrj }
110*38fd1498Szrj 
111*38fd1498Szrj /* We are going to eliminate last direct call to NODE (or alias of it) via edge E.
112*38fd1498Szrj    Verify that the NODE can be removed from unit and if it is contained in comdat
113*38fd1498Szrj    group that the whole comdat group is removable.  */
114*38fd1498Szrj 
115*38fd1498Szrj static bool
can_remove_node_now_p(struct cgraph_node * node,struct cgraph_edge * e)116*38fd1498Szrj can_remove_node_now_p (struct cgraph_node *node, struct cgraph_edge *e)
117*38fd1498Szrj {
118*38fd1498Szrj   struct cgraph_node *next;
119*38fd1498Szrj   if (!can_remove_node_now_p_1 (node, e))
120*38fd1498Szrj     return false;
121*38fd1498Szrj 
122*38fd1498Szrj   /* When we see same comdat group, we need to be sure that all
123*38fd1498Szrj      items can be removed.  */
124*38fd1498Szrj   if (!node->same_comdat_group || !node->externally_visible)
125*38fd1498Szrj     return true;
126*38fd1498Szrj   for (next = dyn_cast<cgraph_node *> (node->same_comdat_group);
127*38fd1498Szrj        next != node; next = dyn_cast<cgraph_node *> (next->same_comdat_group))
128*38fd1498Szrj     {
129*38fd1498Szrj       if (next->alias)
130*38fd1498Szrj 	continue;
131*38fd1498Szrj       if ((next->callers && next->callers != e)
132*38fd1498Szrj 	  || !can_remove_node_now_p_1 (next, e))
133*38fd1498Szrj         return false;
134*38fd1498Szrj     }
135*38fd1498Szrj   return true;
136*38fd1498Szrj }
137*38fd1498Szrj 
138*38fd1498Szrj /* Return true if NODE is a master clone with non-inline clones.  */
139*38fd1498Szrj 
140*38fd1498Szrj static bool
master_clone_with_noninline_clones_p(struct cgraph_node * node)141*38fd1498Szrj master_clone_with_noninline_clones_p (struct cgraph_node *node)
142*38fd1498Szrj {
143*38fd1498Szrj   if (node->clone_of)
144*38fd1498Szrj     return false;
145*38fd1498Szrj 
146*38fd1498Szrj   for (struct cgraph_node *n = node->clones; n; n = n->next_sibling_clone)
147*38fd1498Szrj     if (n->decl != node->decl)
148*38fd1498Szrj       return true;
149*38fd1498Szrj 
150*38fd1498Szrj   return false;
151*38fd1498Szrj }
152*38fd1498Szrj 
153*38fd1498Szrj /* E is expected to be an edge being inlined.  Clone destination node of
154*38fd1498Szrj    the edge and redirect it to the new clone.
155*38fd1498Szrj    DUPLICATE is used for bookkeeping on whether we are actually creating new
156*38fd1498Szrj    clones or re-using node originally representing out-of-line function call.
157*38fd1498Szrj    By default the offline copy is removed, when it appears dead after inlining.
158*38fd1498Szrj    UPDATE_ORIGINAL prevents this transformation.
159*38fd1498Szrj    If OVERALL_SIZE is non-NULL, the size is updated to reflect the
160*38fd1498Szrj    transformation.  */
161*38fd1498Szrj 
162*38fd1498Szrj void
clone_inlined_nodes(struct cgraph_edge * e,bool duplicate,bool update_original,int * overall_size)163*38fd1498Szrj clone_inlined_nodes (struct cgraph_edge *e, bool duplicate,
164*38fd1498Szrj 		     bool update_original, int *overall_size)
165*38fd1498Szrj {
166*38fd1498Szrj   struct cgraph_node *inlining_into;
167*38fd1498Szrj   struct cgraph_edge *next;
168*38fd1498Szrj 
169*38fd1498Szrj   if (e->caller->global.inlined_to)
170*38fd1498Szrj     inlining_into = e->caller->global.inlined_to;
171*38fd1498Szrj   else
172*38fd1498Szrj     inlining_into = e->caller;
173*38fd1498Szrj 
174*38fd1498Szrj   if (duplicate)
175*38fd1498Szrj     {
176*38fd1498Szrj       /* We may eliminate the need for out-of-line copy to be output.
177*38fd1498Szrj 	 In that case just go ahead and re-use it.  This is not just an
178*38fd1498Szrj 	 memory optimization.  Making offline copy of fuction disappear
179*38fd1498Szrj 	 from the program will improve future decisions on inlining.  */
180*38fd1498Szrj       if (!e->callee->callers->next_caller
181*38fd1498Szrj 	  /* Recursive inlining never wants the master clone to
182*38fd1498Szrj 	     be overwritten.  */
183*38fd1498Szrj 	  && update_original
184*38fd1498Szrj 	  && can_remove_node_now_p (e->callee, e)
185*38fd1498Szrj 	  /* We cannot overwrite a master clone with non-inline clones
186*38fd1498Szrj 	     until after these clones are materialized.  */
187*38fd1498Szrj 	  && !master_clone_with_noninline_clones_p (e->callee))
188*38fd1498Szrj 	{
189*38fd1498Szrj 	  /* TODO: When callee is in a comdat group, we could remove all of it,
190*38fd1498Szrj 	     including all inline clones inlined into it.  That would however
191*38fd1498Szrj 	     need small function inlining to register edge removal hook to
192*38fd1498Szrj 	     maintain the priority queue.
193*38fd1498Szrj 
194*38fd1498Szrj 	     For now we keep the ohter functions in the group in program until
195*38fd1498Szrj 	     cgraph_remove_unreachable_functions gets rid of them.  */
196*38fd1498Szrj 	  gcc_assert (!e->callee->global.inlined_to);
197*38fd1498Szrj 	  e->callee->remove_from_same_comdat_group ();
198*38fd1498Szrj 	  if (e->callee->definition
199*38fd1498Szrj 	      && inline_account_function_p (e->callee))
200*38fd1498Szrj 	    {
201*38fd1498Szrj 	      gcc_assert (!e->callee->alias);
202*38fd1498Szrj 	      if (overall_size)
203*38fd1498Szrj 	        *overall_size -= ipa_fn_summaries->get (e->callee)->size;
204*38fd1498Szrj 	      nfunctions_inlined++;
205*38fd1498Szrj 	    }
206*38fd1498Szrj 	  duplicate = false;
207*38fd1498Szrj 	  e->callee->externally_visible = false;
208*38fd1498Szrj           update_noncloned_counts (e->callee, e->count, e->callee->count);
209*38fd1498Szrj 
210*38fd1498Szrj 	  dump_callgraph_transformation (e->callee, inlining_into,
211*38fd1498Szrj 					 "inlining to");
212*38fd1498Szrj 	}
213*38fd1498Szrj       else
214*38fd1498Szrj 	{
215*38fd1498Szrj 	  struct cgraph_node *n;
216*38fd1498Szrj 
217*38fd1498Szrj 	  n = e->callee->create_clone (e->callee->decl,
218*38fd1498Szrj 				       e->count,
219*38fd1498Szrj 				       update_original, vNULL, true,
220*38fd1498Szrj 				       inlining_into,
221*38fd1498Szrj 				       NULL);
222*38fd1498Szrj 	  n->used_as_abstract_origin = e->callee->used_as_abstract_origin;
223*38fd1498Szrj 	  e->redirect_callee (n);
224*38fd1498Szrj 	}
225*38fd1498Szrj     }
226*38fd1498Szrj   else
227*38fd1498Szrj     e->callee->remove_from_same_comdat_group ();
228*38fd1498Szrj 
229*38fd1498Szrj   e->callee->global.inlined_to = inlining_into;
230*38fd1498Szrj 
231*38fd1498Szrj   /* Recursively clone all bodies.  */
232*38fd1498Szrj   for (e = e->callee->callees; e; e = next)
233*38fd1498Szrj     {
234*38fd1498Szrj       next = e->next_callee;
235*38fd1498Szrj       if (!e->inline_failed)
236*38fd1498Szrj         clone_inlined_nodes (e, duplicate, update_original, overall_size);
237*38fd1498Szrj     }
238*38fd1498Szrj }
239*38fd1498Szrj 
240*38fd1498Szrj /* Check all speculations in N and resolve them if they seems useless. */
241*38fd1498Szrj 
242*38fd1498Szrj static bool
check_speculations(cgraph_node * n)243*38fd1498Szrj check_speculations (cgraph_node *n)
244*38fd1498Szrj {
245*38fd1498Szrj   bool speculation_removed = false;
246*38fd1498Szrj   cgraph_edge *next;
247*38fd1498Szrj 
248*38fd1498Szrj   for (cgraph_edge *e = n->callees; e; e = next)
249*38fd1498Szrj     {
250*38fd1498Szrj       next = e->next_callee;
251*38fd1498Szrj       if (e->speculative && !speculation_useful_p (e, true))
252*38fd1498Szrj 	{
253*38fd1498Szrj 	  e->resolve_speculation (NULL);
254*38fd1498Szrj 	  speculation_removed = true;
255*38fd1498Szrj 	}
256*38fd1498Szrj       else if (!e->inline_failed)
257*38fd1498Szrj 	speculation_removed |= check_speculations (e->callee);
258*38fd1498Szrj     }
259*38fd1498Szrj   return speculation_removed;
260*38fd1498Szrj }
261*38fd1498Szrj 
262*38fd1498Szrj /* Mark all call graph edges coming out of NODE and all nodes that have been
263*38fd1498Szrj    inlined to it as in_polymorphic_cdtor.  */
264*38fd1498Szrj 
265*38fd1498Szrj static void
mark_all_inlined_calls_cdtor(cgraph_node * node)266*38fd1498Szrj mark_all_inlined_calls_cdtor (cgraph_node *node)
267*38fd1498Szrj {
268*38fd1498Szrj   for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
269*38fd1498Szrj     {
270*38fd1498Szrj       cs->in_polymorphic_cdtor = true;
271*38fd1498Szrj       if (!cs->inline_failed)
272*38fd1498Szrj 	mark_all_inlined_calls_cdtor (cs->callee);
273*38fd1498Szrj     }
274*38fd1498Szrj   for (cgraph_edge *cs = node->indirect_calls; cs; cs = cs->next_callee)
275*38fd1498Szrj     cs->in_polymorphic_cdtor = true;
276*38fd1498Szrj }
277*38fd1498Szrj 
278*38fd1498Szrj 
279*38fd1498Szrj /* Mark edge E as inlined and update callgraph accordingly.  UPDATE_ORIGINAL
280*38fd1498Szrj    specify whether profile of original function should be updated.  If any new
281*38fd1498Szrj    indirect edges are discovered in the process, add them to NEW_EDGES, unless
282*38fd1498Szrj    it is NULL. If UPDATE_OVERALL_SUMMARY is false, do not bother to recompute overall
283*38fd1498Szrj    size of caller after inlining. Caller is required to eventually do it via
284*38fd1498Szrj    ipa_update_overall_fn_summary.
285*38fd1498Szrj    If callee_removed is non-NULL, set it to true if we removed callee node.
286*38fd1498Szrj 
287*38fd1498Szrj    Return true iff any new callgraph edges were discovered as a
288*38fd1498Szrj    result of inlining.  */
289*38fd1498Szrj 
290*38fd1498Szrj bool
inline_call(struct cgraph_edge * e,bool update_original,vec<cgraph_edge * > * new_edges,int * overall_size,bool update_overall_summary,bool * callee_removed)291*38fd1498Szrj inline_call (struct cgraph_edge *e, bool update_original,
292*38fd1498Szrj 	     vec<cgraph_edge *> *new_edges,
293*38fd1498Szrj 	     int *overall_size, bool update_overall_summary,
294*38fd1498Szrj 	     bool *callee_removed)
295*38fd1498Szrj {
296*38fd1498Szrj   int old_size = 0, new_size = 0;
297*38fd1498Szrj   struct cgraph_node *to = NULL;
298*38fd1498Szrj   struct cgraph_edge *curr = e;
299*38fd1498Szrj   struct cgraph_node *callee = e->callee->ultimate_alias_target ();
300*38fd1498Szrj   bool new_edges_found = false;
301*38fd1498Szrj 
302*38fd1498Szrj   int estimated_growth = 0;
303*38fd1498Szrj   if (! update_overall_summary)
304*38fd1498Szrj     estimated_growth = estimate_edge_growth (e);
305*38fd1498Szrj   /* This is used only for assert bellow.  */
306*38fd1498Szrj #if 0
307*38fd1498Szrj   bool predicated = inline_edge_summary (e)->predicate != NULL;
308*38fd1498Szrj #endif
309*38fd1498Szrj 
310*38fd1498Szrj   /* Don't inline inlined edges.  */
311*38fd1498Szrj   gcc_assert (e->inline_failed);
312*38fd1498Szrj   /* Don't even think of inlining inline clone.  */
313*38fd1498Szrj   gcc_assert (!callee->global.inlined_to);
314*38fd1498Szrj 
315*38fd1498Szrj   to = e->caller;
316*38fd1498Szrj   if (to->global.inlined_to)
317*38fd1498Szrj     to = to->global.inlined_to;
318*38fd1498Szrj   if (to->thunk.thunk_p)
319*38fd1498Szrj     {
320*38fd1498Szrj       struct cgraph_node *target = to->callees->callee;
321*38fd1498Szrj       if (in_lto_p)
322*38fd1498Szrj 	to->get_untransformed_body ();
323*38fd1498Szrj       to->expand_thunk (false, true);
324*38fd1498Szrj       /* When thunk is instrumented we may have multiple callees.  */
325*38fd1498Szrj       for (e = to->callees; e && e->callee != target; e = e->next_callee)
326*38fd1498Szrj 	;
327*38fd1498Szrj       gcc_assert (e);
328*38fd1498Szrj     }
329*38fd1498Szrj 
330*38fd1498Szrj 
331*38fd1498Szrj   e->inline_failed = CIF_OK;
332*38fd1498Szrj   DECL_POSSIBLY_INLINED (callee->decl) = true;
333*38fd1498Szrj 
334*38fd1498Szrj   if (DECL_FUNCTION_PERSONALITY (callee->decl))
335*38fd1498Szrj     DECL_FUNCTION_PERSONALITY (to->decl)
336*38fd1498Szrj       = DECL_FUNCTION_PERSONALITY (callee->decl);
337*38fd1498Szrj 
338*38fd1498Szrj   bool reload_optimization_node = false;
339*38fd1498Szrj   if (!opt_for_fn (callee->decl, flag_strict_aliasing)
340*38fd1498Szrj       && opt_for_fn (to->decl, flag_strict_aliasing))
341*38fd1498Szrj     {
342*38fd1498Szrj       struct gcc_options opts = global_options;
343*38fd1498Szrj 
344*38fd1498Szrj       cl_optimization_restore (&opts, opts_for_fn (to->decl));
345*38fd1498Szrj       opts.x_flag_strict_aliasing = false;
346*38fd1498Szrj       if (dump_file)
347*38fd1498Szrj 	fprintf (dump_file, "Dropping flag_strict_aliasing on %s\n",
348*38fd1498Szrj 		 to->dump_name ());
349*38fd1498Szrj       DECL_FUNCTION_SPECIFIC_OPTIMIZATION (to->decl)
350*38fd1498Szrj 	 = build_optimization_node (&opts);
351*38fd1498Szrj       reload_optimization_node = true;
352*38fd1498Szrj     }
353*38fd1498Szrj 
354*38fd1498Szrj   ipa_fn_summary *caller_info = ipa_fn_summaries->get (to);
355*38fd1498Szrj   ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
356*38fd1498Szrj   if (!caller_info->fp_expressions && callee_info->fp_expressions)
357*38fd1498Szrj     {
358*38fd1498Szrj       caller_info->fp_expressions = true;
359*38fd1498Szrj       if (opt_for_fn (callee->decl, flag_rounding_math)
360*38fd1498Szrj 	  != opt_for_fn (to->decl, flag_rounding_math)
361*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_trapping_math)
362*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_trapping_math)
363*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_unsafe_math_optimizations)
364*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_unsafe_math_optimizations)
365*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_finite_math_only)
366*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_finite_math_only)
367*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_signaling_nans)
368*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_signaling_nans)
369*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_cx_limited_range)
370*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_cx_limited_range)
371*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_signed_zeros)
372*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_signed_zeros)
373*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_associative_math)
374*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_associative_math)
375*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_reciprocal_math)
376*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_reciprocal_math)
377*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_fp_int_builtin_inexact)
378*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_fp_int_builtin_inexact)
379*38fd1498Szrj 	  || opt_for_fn (callee->decl, flag_errno_math)
380*38fd1498Szrj 	     != opt_for_fn (to->decl, flag_errno_math))
381*38fd1498Szrj 	{
382*38fd1498Szrj 	  struct gcc_options opts = global_options;
383*38fd1498Szrj 
384*38fd1498Szrj 	  cl_optimization_restore (&opts, opts_for_fn (to->decl));
385*38fd1498Szrj 	  opts.x_flag_rounding_math
386*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_rounding_math);
387*38fd1498Szrj 	  opts.x_flag_trapping_math
388*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_trapping_math);
389*38fd1498Szrj 	  opts.x_flag_unsafe_math_optimizations
390*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_unsafe_math_optimizations);
391*38fd1498Szrj 	  opts.x_flag_finite_math_only
392*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_finite_math_only);
393*38fd1498Szrj 	  opts.x_flag_signaling_nans
394*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_signaling_nans);
395*38fd1498Szrj 	  opts.x_flag_cx_limited_range
396*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_cx_limited_range);
397*38fd1498Szrj 	  opts.x_flag_signed_zeros
398*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_signed_zeros);
399*38fd1498Szrj 	  opts.x_flag_associative_math
400*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_associative_math);
401*38fd1498Szrj 	  opts.x_flag_reciprocal_math
402*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_reciprocal_math);
403*38fd1498Szrj 	  opts.x_flag_fp_int_builtin_inexact
404*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_fp_int_builtin_inexact);
405*38fd1498Szrj 	  opts.x_flag_errno_math
406*38fd1498Szrj 	    = opt_for_fn (callee->decl, flag_errno_math);
407*38fd1498Szrj 	  if (dump_file)
408*38fd1498Szrj 	    fprintf (dump_file, "Copying FP flags from %s to %s\n",
409*38fd1498Szrj 		     callee->dump_name (), to->dump_name ());
410*38fd1498Szrj 	  DECL_FUNCTION_SPECIFIC_OPTIMIZATION (to->decl)
411*38fd1498Szrj 	     = build_optimization_node (&opts);
412*38fd1498Szrj 	  reload_optimization_node = true;
413*38fd1498Szrj 	}
414*38fd1498Szrj     }
415*38fd1498Szrj 
416*38fd1498Szrj   /* Reload global optimization flags.  */
417*38fd1498Szrj   if (reload_optimization_node && DECL_STRUCT_FUNCTION (to->decl) == cfun)
418*38fd1498Szrj     set_cfun (cfun, true);
419*38fd1498Szrj 
420*38fd1498Szrj   /* If aliases are involved, redirect edge to the actual destination and
421*38fd1498Szrj      possibly remove the aliases.  */
422*38fd1498Szrj   if (e->callee != callee)
423*38fd1498Szrj     {
424*38fd1498Szrj       struct cgraph_node *alias = e->callee, *next_alias;
425*38fd1498Szrj       e->redirect_callee (callee);
426*38fd1498Szrj       while (alias && alias != callee)
427*38fd1498Szrj 	{
428*38fd1498Szrj 	  if (!alias->callers
429*38fd1498Szrj 	      && can_remove_node_now_p (alias,
430*38fd1498Szrj 					!e->next_caller && !e->prev_caller ? e : NULL))
431*38fd1498Szrj 	    {
432*38fd1498Szrj 	      next_alias = alias->get_alias_target ();
433*38fd1498Szrj 	      alias->remove ();
434*38fd1498Szrj 	      if (callee_removed)
435*38fd1498Szrj 		*callee_removed = true;
436*38fd1498Szrj 	      alias = next_alias;
437*38fd1498Szrj 	    }
438*38fd1498Szrj 	  else
439*38fd1498Szrj 	    break;
440*38fd1498Szrj 	}
441*38fd1498Szrj     }
442*38fd1498Szrj 
443*38fd1498Szrj   clone_inlined_nodes (e, true, update_original, overall_size);
444*38fd1498Szrj 
445*38fd1498Szrj   gcc_assert (curr->callee->global.inlined_to == to);
446*38fd1498Szrj 
447*38fd1498Szrj   old_size = ipa_fn_summaries->get (to)->size;
448*38fd1498Szrj   ipa_merge_fn_summary_after_inlining (e);
449*38fd1498Szrj   if (e->in_polymorphic_cdtor)
450*38fd1498Szrj     mark_all_inlined_calls_cdtor (e->callee);
451*38fd1498Szrj   if (opt_for_fn (e->caller->decl, optimize))
452*38fd1498Szrj     new_edges_found = ipa_propagate_indirect_call_infos (curr, new_edges);
453*38fd1498Szrj   check_speculations (e->callee);
454*38fd1498Szrj   if (update_overall_summary)
455*38fd1498Szrj     ipa_update_overall_fn_summary (to);
456*38fd1498Szrj   else
457*38fd1498Szrj     /* Update self size by the estimate so overall function growth limits
458*38fd1498Szrj        work for further inlining into this function.  Before inlining
459*38fd1498Szrj        the function we inlined to again we expect the caller to update
460*38fd1498Szrj        the overall summary.  */
461*38fd1498Szrj     ipa_fn_summaries->get (to)->size += estimated_growth;
462*38fd1498Szrj   new_size = ipa_fn_summaries->get (to)->size;
463*38fd1498Szrj 
464*38fd1498Szrj   if (callee->calls_comdat_local)
465*38fd1498Szrj     to->calls_comdat_local = true;
466*38fd1498Szrj   else if (to->calls_comdat_local && callee->comdat_local_p ())
467*38fd1498Szrj     {
468*38fd1498Szrj       struct cgraph_edge *se = to->callees;
469*38fd1498Szrj       for (; se; se = se->next_callee)
470*38fd1498Szrj 	if (se->inline_failed && se->callee->comdat_local_p ())
471*38fd1498Szrj 	  break;
472*38fd1498Szrj       if (se == NULL)
473*38fd1498Szrj 	to->calls_comdat_local = false;
474*38fd1498Szrj     }
475*38fd1498Szrj 
476*38fd1498Szrj   /* FIXME: This assert suffers from roundoff errors, disable it for GCC 5
477*38fd1498Szrj      and revisit it after conversion to sreals in GCC 6.
478*38fd1498Szrj      See PR 65654.  */
479*38fd1498Szrj #if 0
480*38fd1498Szrj   /* Verify that estimated growth match real growth.  Allow off-by-one
481*38fd1498Szrj      error due to ipa_fn_summary::size_scale roudoff errors.  */
482*38fd1498Szrj   gcc_assert (!update_overall_summary || !overall_size || new_edges_found
483*38fd1498Szrj 	      || abs (estimated_growth - (new_size - old_size)) <= 1
484*38fd1498Szrj 	      || speculation_removed
485*38fd1498Szrj 	      /* FIXME: a hack.  Edges with false predicate are accounted
486*38fd1498Szrj 		 wrong, we should remove them from callgraph.  */
487*38fd1498Szrj 	      || predicated);
488*38fd1498Szrj #endif
489*38fd1498Szrj 
490*38fd1498Szrj   /* Account the change of overall unit size; external functions will be
491*38fd1498Szrj      removed and are thus not accounted.  */
492*38fd1498Szrj   if (overall_size && inline_account_function_p (to))
493*38fd1498Szrj     *overall_size += new_size - old_size;
494*38fd1498Szrj   ncalls_inlined++;
495*38fd1498Szrj 
496*38fd1498Szrj   /* This must happen after ipa_merge_fn_summary_after_inlining that rely on jump
497*38fd1498Szrj      functions of callee to not be updated.  */
498*38fd1498Szrj   return new_edges_found;
499*38fd1498Szrj }
500*38fd1498Szrj 
501*38fd1498Szrj 
502*38fd1498Szrj /* Copy function body of NODE and redirect all inline clones to it.
503*38fd1498Szrj    This is done before inline plan is applied to NODE when there are
504*38fd1498Szrj    still some inline clones if it.
505*38fd1498Szrj 
506*38fd1498Szrj    This is necessary because inline decisions are not really transitive
507*38fd1498Szrj    and the other inline clones may have different bodies.  */
508*38fd1498Szrj 
509*38fd1498Szrj static struct cgraph_node *
save_inline_function_body(struct cgraph_node * node)510*38fd1498Szrj save_inline_function_body (struct cgraph_node *node)
511*38fd1498Szrj {
512*38fd1498Szrj   struct cgraph_node *first_clone, *n;
513*38fd1498Szrj 
514*38fd1498Szrj   if (dump_file)
515*38fd1498Szrj     fprintf (dump_file, "\nSaving body of %s for later reuse\n",
516*38fd1498Szrj 	     node->name ());
517*38fd1498Szrj 
518*38fd1498Szrj   gcc_assert (node == cgraph_node::get (node->decl));
519*38fd1498Szrj 
520*38fd1498Szrj   /* first_clone will be turned into real function.  */
521*38fd1498Szrj   first_clone = node->clones;
522*38fd1498Szrj 
523*38fd1498Szrj   /* Arrange first clone to not be thunk as those do not have bodies.  */
524*38fd1498Szrj   if (first_clone->thunk.thunk_p)
525*38fd1498Szrj     {
526*38fd1498Szrj       while (first_clone->thunk.thunk_p)
527*38fd1498Szrj         first_clone = first_clone->next_sibling_clone;
528*38fd1498Szrj       first_clone->prev_sibling_clone->next_sibling_clone
529*38fd1498Szrj 	= first_clone->next_sibling_clone;
530*38fd1498Szrj       if (first_clone->next_sibling_clone)
531*38fd1498Szrj 	first_clone->next_sibling_clone->prev_sibling_clone
532*38fd1498Szrj 	   = first_clone->prev_sibling_clone;
533*38fd1498Szrj       first_clone->next_sibling_clone = node->clones;
534*38fd1498Szrj       first_clone->prev_sibling_clone = NULL;
535*38fd1498Szrj       node->clones->prev_sibling_clone = first_clone;
536*38fd1498Szrj       node->clones = first_clone;
537*38fd1498Szrj     }
538*38fd1498Szrj   first_clone->decl = copy_node (node->decl);
539*38fd1498Szrj   first_clone->decl->decl_with_vis.symtab_node = first_clone;
540*38fd1498Szrj   gcc_assert (first_clone == cgraph_node::get (first_clone->decl));
541*38fd1498Szrj 
542*38fd1498Szrj   /* Now reshape the clone tree, so all other clones descends from
543*38fd1498Szrj      first_clone.  */
544*38fd1498Szrj   if (first_clone->next_sibling_clone)
545*38fd1498Szrj     {
546*38fd1498Szrj       for (n = first_clone->next_sibling_clone; n->next_sibling_clone;
547*38fd1498Szrj 	   n = n->next_sibling_clone)
548*38fd1498Szrj         n->clone_of = first_clone;
549*38fd1498Szrj       n->clone_of = first_clone;
550*38fd1498Szrj       n->next_sibling_clone = first_clone->clones;
551*38fd1498Szrj       if (first_clone->clones)
552*38fd1498Szrj         first_clone->clones->prev_sibling_clone = n;
553*38fd1498Szrj       first_clone->clones = first_clone->next_sibling_clone;
554*38fd1498Szrj       first_clone->next_sibling_clone->prev_sibling_clone = NULL;
555*38fd1498Szrj       first_clone->next_sibling_clone = NULL;
556*38fd1498Szrj       gcc_assert (!first_clone->prev_sibling_clone);
557*38fd1498Szrj     }
558*38fd1498Szrj   first_clone->clone_of = NULL;
559*38fd1498Szrj 
560*38fd1498Szrj   /* Now node in question has no clones.  */
561*38fd1498Szrj   node->clones = NULL;
562*38fd1498Szrj 
563*38fd1498Szrj   /* Inline clones share decl with the function they are cloned
564*38fd1498Szrj      from.  Walk the whole clone tree and redirect them all to the
565*38fd1498Szrj      new decl.  */
566*38fd1498Szrj   if (first_clone->clones)
567*38fd1498Szrj     for (n = first_clone->clones; n != first_clone;)
568*38fd1498Szrj       {
569*38fd1498Szrj         gcc_assert (n->decl == node->decl);
570*38fd1498Szrj 	n->decl = first_clone->decl;
571*38fd1498Szrj 	if (n->clones)
572*38fd1498Szrj 	  n = n->clones;
573*38fd1498Szrj 	else if (n->next_sibling_clone)
574*38fd1498Szrj 	  n = n->next_sibling_clone;
575*38fd1498Szrj 	else
576*38fd1498Szrj 	  {
577*38fd1498Szrj 	    while (n != first_clone && !n->next_sibling_clone)
578*38fd1498Szrj 	      n = n->clone_of;
579*38fd1498Szrj 	    if (n != first_clone)
580*38fd1498Szrj 	      n = n->next_sibling_clone;
581*38fd1498Szrj 	  }
582*38fd1498Szrj       }
583*38fd1498Szrj 
584*38fd1498Szrj   /* Copy the OLD_VERSION_NODE function tree to the new version.  */
585*38fd1498Szrj   tree_function_versioning (node->decl, first_clone->decl,
586*38fd1498Szrj 			    NULL, true, NULL, false,
587*38fd1498Szrj 			    NULL, NULL);
588*38fd1498Szrj 
589*38fd1498Szrj   /* The function will be short lived and removed after we inline all the clones,
590*38fd1498Szrj      but make it internal so we won't confuse ourself.  */
591*38fd1498Szrj   DECL_EXTERNAL (first_clone->decl) = 0;
592*38fd1498Szrj   TREE_PUBLIC (first_clone->decl) = 0;
593*38fd1498Szrj   DECL_COMDAT (first_clone->decl) = 0;
594*38fd1498Szrj   first_clone->ipa_transforms_to_apply.release ();
595*38fd1498Szrj 
596*38fd1498Szrj   /* When doing recursive inlining, the clone may become unnecessary.
597*38fd1498Szrj      This is possible i.e. in the case when the recursive function is proved to be
598*38fd1498Szrj      non-throwing and the recursion happens only in the EH landing pad.
599*38fd1498Szrj      We can not remove the clone until we are done with saving the body.
600*38fd1498Szrj      Remove it now.  */
601*38fd1498Szrj   if (!first_clone->callers)
602*38fd1498Szrj     {
603*38fd1498Szrj       first_clone->remove_symbol_and_inline_clones ();
604*38fd1498Szrj       first_clone = NULL;
605*38fd1498Szrj     }
606*38fd1498Szrj   else if (flag_checking)
607*38fd1498Szrj     first_clone->verify ();
608*38fd1498Szrj 
609*38fd1498Szrj   return first_clone;
610*38fd1498Szrj }
611*38fd1498Szrj 
612*38fd1498Szrj /* Return true when function body of DECL still needs to be kept around
613*38fd1498Szrj    for later re-use.  */
614*38fd1498Szrj static bool
preserve_function_body_p(struct cgraph_node * node)615*38fd1498Szrj preserve_function_body_p (struct cgraph_node *node)
616*38fd1498Szrj {
617*38fd1498Szrj   gcc_assert (symtab->global_info_ready);
618*38fd1498Szrj   gcc_assert (!node->alias && !node->thunk.thunk_p);
619*38fd1498Szrj 
620*38fd1498Szrj   /* Look if there is any non-thunk clone around.  */
621*38fd1498Szrj   for (node = node->clones; node; node = node->next_sibling_clone)
622*38fd1498Szrj     if (!node->thunk.thunk_p)
623*38fd1498Szrj       return true;
624*38fd1498Szrj   return false;
625*38fd1498Szrj }
626*38fd1498Szrj 
627*38fd1498Szrj /* Apply inline plan to function.  */
628*38fd1498Szrj 
629*38fd1498Szrj unsigned int
inline_transform(struct cgraph_node * node)630*38fd1498Szrj inline_transform (struct cgraph_node *node)
631*38fd1498Szrj {
632*38fd1498Szrj   unsigned int todo = 0;
633*38fd1498Szrj   struct cgraph_edge *e, *next;
634*38fd1498Szrj   bool has_inline = false;
635*38fd1498Szrj 
636*38fd1498Szrj   /* FIXME: Currently the pass manager is adding inline transform more than
637*38fd1498Szrj      once to some clones.  This needs revisiting after WPA cleanups.  */
638*38fd1498Szrj   if (cfun->after_inlining)
639*38fd1498Szrj     return 0;
640*38fd1498Szrj 
641*38fd1498Szrj   /* We might need the body of this function so that we can expand
642*38fd1498Szrj      it inline somewhere else.  */
643*38fd1498Szrj   if (preserve_function_body_p (node))
644*38fd1498Szrj     save_inline_function_body (node);
645*38fd1498Szrj 
646*38fd1498Szrj   for (e = node->callees; e; e = next)
647*38fd1498Szrj     {
648*38fd1498Szrj       if (!e->inline_failed)
649*38fd1498Szrj 	has_inline = true;
650*38fd1498Szrj       next = e->next_callee;
651*38fd1498Szrj       e->redirect_call_stmt_to_callee ();
652*38fd1498Szrj     }
653*38fd1498Szrj   node->remove_all_references ();
654*38fd1498Szrj 
655*38fd1498Szrj   timevar_push (TV_INTEGRATION);
656*38fd1498Szrj   if (node->callees && (opt_for_fn (node->decl, optimize) || has_inline))
657*38fd1498Szrj     {
658*38fd1498Szrj       profile_count num = node->count;
659*38fd1498Szrj       profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
660*38fd1498Szrj       bool scale = num.initialized_p () && !(num == den);
661*38fd1498Szrj       if (scale)
662*38fd1498Szrj 	{
663*38fd1498Szrj 	  profile_count::adjust_for_ipa_scaling (&num, &den);
664*38fd1498Szrj 	  if (dump_file)
665*38fd1498Szrj 	    {
666*38fd1498Szrj 	      fprintf (dump_file, "Applying count scale ");
667*38fd1498Szrj 	      num.dump (dump_file);
668*38fd1498Szrj 	      fprintf (dump_file, "/");
669*38fd1498Szrj 	      den.dump (dump_file);
670*38fd1498Szrj 	      fprintf (dump_file, "\n");
671*38fd1498Szrj 	    }
672*38fd1498Szrj 
673*38fd1498Szrj 	  basic_block bb;
674*38fd1498Szrj 	  cfun->cfg->count_max = profile_count::uninitialized ();
675*38fd1498Szrj 	  FOR_ALL_BB_FN (bb, cfun)
676*38fd1498Szrj 	    {
677*38fd1498Szrj 	      bb->count = bb->count.apply_scale (num, den);
678*38fd1498Szrj 	      cfun->cfg->count_max = cfun->cfg->count_max.max (bb->count);
679*38fd1498Szrj 	    }
680*38fd1498Szrj 	  ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = node->count;
681*38fd1498Szrj 	}
682*38fd1498Szrj       todo = optimize_inline_calls (current_function_decl);
683*38fd1498Szrj    }
684*38fd1498Szrj   timevar_pop (TV_INTEGRATION);
685*38fd1498Szrj 
686*38fd1498Szrj   cfun->always_inline_functions_inlined = true;
687*38fd1498Szrj   cfun->after_inlining = true;
688*38fd1498Szrj   todo |= execute_fixup_cfg ();
689*38fd1498Szrj 
690*38fd1498Szrj   if (!(todo & TODO_update_ssa_any))
691*38fd1498Szrj     /* Redirecting edges might lead to a need for vops to be recomputed.  */
692*38fd1498Szrj     todo |= TODO_update_ssa_only_virtuals;
693*38fd1498Szrj 
694*38fd1498Szrj   return todo;
695*38fd1498Szrj }
696