xref: /dragonfly/contrib/gcc-8.0/gcc/cfgloopanal.c (revision 38fd1498)
1*38fd1498Szrj /* Natural loop analysis code for GNU compiler.
2*38fd1498Szrj    Copyright (C) 2002-2018 Free Software Foundation, Inc.
3*38fd1498Szrj 
4*38fd1498Szrj This file is part of GCC.
5*38fd1498Szrj 
6*38fd1498Szrj GCC is free software; you can redistribute it and/or modify it under
7*38fd1498Szrj the terms of the GNU General Public License as published by the Free
8*38fd1498Szrj Software Foundation; either version 3, or (at your option) any later
9*38fd1498Szrj version.
10*38fd1498Szrj 
11*38fd1498Szrj GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12*38fd1498Szrj WARRANTY; without even the implied warranty of MERCHANTABILITY or
13*38fd1498Szrj FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14*38fd1498Szrj for more details.
15*38fd1498Szrj 
16*38fd1498Szrj You should have received a copy of the GNU General Public License
17*38fd1498Szrj along with GCC; see the file COPYING3.  If not see
18*38fd1498Szrj <http://www.gnu.org/licenses/>.  */
19*38fd1498Szrj 
20*38fd1498Szrj #include "config.h"
21*38fd1498Szrj #include "system.h"
22*38fd1498Szrj #include "coretypes.h"
23*38fd1498Szrj #include "backend.h"
24*38fd1498Szrj #include "rtl.h"
25*38fd1498Szrj #include "tree.h"
26*38fd1498Szrj #include "predict.h"
27*38fd1498Szrj #include "memmodel.h"
28*38fd1498Szrj #include "emit-rtl.h"
29*38fd1498Szrj #include "cfgloop.h"
30*38fd1498Szrj #include "explow.h"
31*38fd1498Szrj #include "expr.h"
32*38fd1498Szrj #include "graphds.h"
33*38fd1498Szrj #include "params.h"
34*38fd1498Szrj #include "sreal.h"
35*38fd1498Szrj 
36*38fd1498Szrj struct target_cfgloop default_target_cfgloop;
37*38fd1498Szrj #if SWITCHABLE_TARGET
38*38fd1498Szrj struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop;
39*38fd1498Szrj #endif
40*38fd1498Szrj 
41*38fd1498Szrj /* Checks whether BB is executed exactly once in each LOOP iteration.  */
42*38fd1498Szrj 
43*38fd1498Szrj bool
just_once_each_iteration_p(const struct loop * loop,const_basic_block bb)44*38fd1498Szrj just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
45*38fd1498Szrj {
46*38fd1498Szrj   /* It must be executed at least once each iteration.  */
47*38fd1498Szrj   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
48*38fd1498Szrj     return false;
49*38fd1498Szrj 
50*38fd1498Szrj   /* And just once.  */
51*38fd1498Szrj   if (bb->loop_father != loop)
52*38fd1498Szrj     return false;
53*38fd1498Szrj 
54*38fd1498Szrj   /* But this was not enough.  We might have some irreducible loop here.  */
55*38fd1498Szrj   if (bb->flags & BB_IRREDUCIBLE_LOOP)
56*38fd1498Szrj     return false;
57*38fd1498Szrj 
58*38fd1498Szrj   return true;
59*38fd1498Szrj }
60*38fd1498Szrj 
61*38fd1498Szrj /* Marks blocks and edges that are part of non-recognized loops; i.e. we
62*38fd1498Szrj    throw away all latch edges and mark blocks inside any remaining cycle.
63*38fd1498Szrj    Everything is a bit complicated due to fact we do not want to do this
64*38fd1498Szrj    for parts of cycles that only "pass" through some loop -- i.e. for
65*38fd1498Szrj    each cycle, we want to mark blocks that belong directly to innermost
66*38fd1498Szrj    loop containing the whole cycle.
67*38fd1498Szrj 
68*38fd1498Szrj    LOOPS is the loop tree.  */
69*38fd1498Szrj 
70*38fd1498Szrj #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block_for_fn (cfun))
71*38fd1498Szrj #define BB_REPR(BB) ((BB)->index + 1)
72*38fd1498Szrj 
73*38fd1498Szrj bool
mark_irreducible_loops(void)74*38fd1498Szrj mark_irreducible_loops (void)
75*38fd1498Szrj {
76*38fd1498Szrj   basic_block act;
77*38fd1498Szrj   struct graph_edge *ge;
78*38fd1498Szrj   edge e;
79*38fd1498Szrj   edge_iterator ei;
80*38fd1498Szrj   int src, dest;
81*38fd1498Szrj   unsigned depth;
82*38fd1498Szrj   struct graph *g;
83*38fd1498Szrj   int num = number_of_loops (cfun);
84*38fd1498Szrj   struct loop *cloop;
85*38fd1498Szrj   bool irred_loop_found = false;
86*38fd1498Szrj   int i;
87*38fd1498Szrj 
88*38fd1498Szrj   gcc_assert (current_loops != NULL);
89*38fd1498Szrj 
90*38fd1498Szrj   /* Reset the flags.  */
91*38fd1498Szrj   FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
92*38fd1498Szrj 		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
93*38fd1498Szrj     {
94*38fd1498Szrj       act->flags &= ~BB_IRREDUCIBLE_LOOP;
95*38fd1498Szrj       FOR_EACH_EDGE (e, ei, act->succs)
96*38fd1498Szrj 	e->flags &= ~EDGE_IRREDUCIBLE_LOOP;
97*38fd1498Szrj     }
98*38fd1498Szrj 
99*38fd1498Szrj   /* Create the edge lists.  */
100*38fd1498Szrj   g = new_graph (last_basic_block_for_fn (cfun) + num);
101*38fd1498Szrj 
102*38fd1498Szrj   FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR_FOR_FN (cfun),
103*38fd1498Szrj 		  EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
104*38fd1498Szrj     FOR_EACH_EDGE (e, ei, act->succs)
105*38fd1498Szrj       {
106*38fd1498Szrj 	/* Ignore edges to exit.  */
107*38fd1498Szrj 	if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
108*38fd1498Szrj 	  continue;
109*38fd1498Szrj 
110*38fd1498Szrj 	src = BB_REPR (act);
111*38fd1498Szrj 	dest = BB_REPR (e->dest);
112*38fd1498Szrj 
113*38fd1498Szrj 	/* Ignore latch edges.  */
114*38fd1498Szrj 	if (e->dest->loop_father->header == e->dest
115*38fd1498Szrj 	    && e->dest->loop_father->latch == act)
116*38fd1498Szrj 	  continue;
117*38fd1498Szrj 
118*38fd1498Szrj 	/* Edges inside a single loop should be left where they are.  Edges
119*38fd1498Szrj 	   to subloop headers should lead to representative of the subloop,
120*38fd1498Szrj 	   but from the same place.
121*38fd1498Szrj 
122*38fd1498Szrj 	   Edges exiting loops should lead from representative
123*38fd1498Szrj 	   of the son of nearest common ancestor of the loops in that
124*38fd1498Szrj 	   act lays.  */
125*38fd1498Szrj 
126*38fd1498Szrj 	if (e->dest->loop_father->header == e->dest)
127*38fd1498Szrj 	  dest = LOOP_REPR (e->dest->loop_father);
128*38fd1498Szrj 
129*38fd1498Szrj 	if (!flow_bb_inside_loop_p (act->loop_father, e->dest))
130*38fd1498Szrj 	  {
131*38fd1498Szrj 	    depth = 1 + loop_depth (find_common_loop (act->loop_father,
132*38fd1498Szrj 						      e->dest->loop_father));
133*38fd1498Szrj 	    if (depth == loop_depth (act->loop_father))
134*38fd1498Szrj 	      cloop = act->loop_father;
135*38fd1498Szrj 	    else
136*38fd1498Szrj 	      cloop = (*act->loop_father->superloops)[depth];
137*38fd1498Szrj 
138*38fd1498Szrj 	    src = LOOP_REPR (cloop);
139*38fd1498Szrj 	  }
140*38fd1498Szrj 
141*38fd1498Szrj 	add_edge (g, src, dest)->data = e;
142*38fd1498Szrj       }
143*38fd1498Szrj 
144*38fd1498Szrj   /* Find the strongly connected components.  */
145*38fd1498Szrj   graphds_scc (g, NULL);
146*38fd1498Szrj 
147*38fd1498Szrj   /* Mark the irreducible loops.  */
148*38fd1498Szrj   for (i = 0; i < g->n_vertices; i++)
149*38fd1498Szrj     for (ge = g->vertices[i].succ; ge; ge = ge->succ_next)
150*38fd1498Szrj       {
151*38fd1498Szrj 	edge real = (edge) ge->data;
152*38fd1498Szrj 	/* edge E in graph G is irreducible if it connects two vertices in the
153*38fd1498Szrj 	   same scc.  */
154*38fd1498Szrj 
155*38fd1498Szrj 	/* All edges should lead from a component with higher number to the
156*38fd1498Szrj 	   one with lower one.  */
157*38fd1498Szrj 	gcc_assert (g->vertices[ge->src].component >= g->vertices[ge->dest].component);
158*38fd1498Szrj 
159*38fd1498Szrj 	if (g->vertices[ge->src].component != g->vertices[ge->dest].component)
160*38fd1498Szrj 	  continue;
161*38fd1498Szrj 
162*38fd1498Szrj 	real->flags |= EDGE_IRREDUCIBLE_LOOP;
163*38fd1498Szrj 	irred_loop_found = true;
164*38fd1498Szrj 	if (flow_bb_inside_loop_p (real->src->loop_father, real->dest))
165*38fd1498Szrj 	  real->src->flags |= BB_IRREDUCIBLE_LOOP;
166*38fd1498Szrj       }
167*38fd1498Szrj 
168*38fd1498Szrj   free_graph (g);
169*38fd1498Szrj 
170*38fd1498Szrj   loops_state_set (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
171*38fd1498Szrj   return irred_loop_found;
172*38fd1498Szrj }
173*38fd1498Szrj 
174*38fd1498Szrj /* Counts number of insns inside LOOP.  */
175*38fd1498Szrj int
num_loop_insns(const struct loop * loop)176*38fd1498Szrj num_loop_insns (const struct loop *loop)
177*38fd1498Szrj {
178*38fd1498Szrj   basic_block *bbs, bb;
179*38fd1498Szrj   unsigned i, ninsns = 0;
180*38fd1498Szrj   rtx_insn *insn;
181*38fd1498Szrj 
182*38fd1498Szrj   bbs = get_loop_body (loop);
183*38fd1498Szrj   for (i = 0; i < loop->num_nodes; i++)
184*38fd1498Szrj     {
185*38fd1498Szrj       bb = bbs[i];
186*38fd1498Szrj       FOR_BB_INSNS (bb, insn)
187*38fd1498Szrj 	if (NONDEBUG_INSN_P (insn))
188*38fd1498Szrj 	  ninsns++;
189*38fd1498Szrj     }
190*38fd1498Szrj   free (bbs);
191*38fd1498Szrj 
192*38fd1498Szrj   if (!ninsns)
193*38fd1498Szrj     ninsns = 1;	/* To avoid division by zero.  */
194*38fd1498Szrj 
195*38fd1498Szrj   return ninsns;
196*38fd1498Szrj }
197*38fd1498Szrj 
198*38fd1498Szrj /* Counts number of insns executed on average per iteration LOOP.  */
199*38fd1498Szrj int
average_num_loop_insns(const struct loop * loop)200*38fd1498Szrj average_num_loop_insns (const struct loop *loop)
201*38fd1498Szrj {
202*38fd1498Szrj   basic_block *bbs, bb;
203*38fd1498Szrj   unsigned i, binsns;
204*38fd1498Szrj   sreal ninsns;
205*38fd1498Szrj   rtx_insn *insn;
206*38fd1498Szrj 
207*38fd1498Szrj   ninsns = 0;
208*38fd1498Szrj   bbs = get_loop_body (loop);
209*38fd1498Szrj   for (i = 0; i < loop->num_nodes; i++)
210*38fd1498Szrj     {
211*38fd1498Szrj       bb = bbs[i];
212*38fd1498Szrj 
213*38fd1498Szrj       binsns = 0;
214*38fd1498Szrj       FOR_BB_INSNS (bb, insn)
215*38fd1498Szrj 	if (NONDEBUG_INSN_P (insn))
216*38fd1498Szrj 	  binsns++;
217*38fd1498Szrj 
218*38fd1498Szrj       ninsns += (sreal)binsns * bb->count.to_sreal_scale (loop->header->count);
219*38fd1498Szrj       /* Avoid overflows.   */
220*38fd1498Szrj       if (ninsns > 1000000)
221*38fd1498Szrj 	return 100000;
222*38fd1498Szrj     }
223*38fd1498Szrj   free (bbs);
224*38fd1498Szrj 
225*38fd1498Szrj   int64_t ret = ninsns.to_int ();
226*38fd1498Szrj   if (!ret)
227*38fd1498Szrj     ret = 1; /* To avoid division by zero.  */
228*38fd1498Szrj 
229*38fd1498Szrj   return ret;
230*38fd1498Szrj }
231*38fd1498Szrj 
232*38fd1498Szrj /* Returns expected number of iterations of LOOP, according to
233*38fd1498Szrj    measured or guessed profile.
234*38fd1498Szrj 
235*38fd1498Szrj    This functions attempts to return "sane" value even if profile
236*38fd1498Szrj    information is not good enough to derive osmething.
237*38fd1498Szrj    If BY_PROFILE_ONLY is set, this logic is bypassed and function
238*38fd1498Szrj    return -1 in those scenarios.  */
239*38fd1498Szrj 
240*38fd1498Szrj gcov_type
expected_loop_iterations_unbounded(const struct loop * loop,bool * read_profile_p,bool by_profile_only)241*38fd1498Szrj expected_loop_iterations_unbounded (const struct loop *loop,
242*38fd1498Szrj 				    bool *read_profile_p,
243*38fd1498Szrj 				    bool by_profile_only)
244*38fd1498Szrj {
245*38fd1498Szrj   edge e;
246*38fd1498Szrj   edge_iterator ei;
247*38fd1498Szrj   gcov_type expected = -1;
248*38fd1498Szrj 
249*38fd1498Szrj   if (read_profile_p)
250*38fd1498Szrj     *read_profile_p = false;
251*38fd1498Szrj 
252*38fd1498Szrj   /* If we have no profile at all, use AVG_LOOP_NITER.  */
253*38fd1498Szrj   if (profile_status_for_fn (cfun) == PROFILE_ABSENT)
254*38fd1498Szrj     {
255*38fd1498Szrj       if (by_profile_only)
256*38fd1498Szrj 	return -1;
257*38fd1498Szrj       expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
258*38fd1498Szrj     }
259*38fd1498Szrj   else if (loop->latch && (loop->latch->count.initialized_p ()
260*38fd1498Szrj 			   || loop->header->count.initialized_p ()))
261*38fd1498Szrj     {
262*38fd1498Szrj       profile_count count_in = profile_count::zero (),
263*38fd1498Szrj 		    count_latch = profile_count::zero ();
264*38fd1498Szrj 
265*38fd1498Szrj       FOR_EACH_EDGE (e, ei, loop->header->preds)
266*38fd1498Szrj 	if (e->src == loop->latch)
267*38fd1498Szrj 	  count_latch = e->count ();
268*38fd1498Szrj 	else
269*38fd1498Szrj 	  count_in += e->count ();
270*38fd1498Szrj 
271*38fd1498Szrj       if (!count_latch.initialized_p ())
272*38fd1498Szrj 	{
273*38fd1498Szrj           if (by_profile_only)
274*38fd1498Szrj 	    return -1;
275*38fd1498Szrj 	  expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
276*38fd1498Szrj 	}
277*38fd1498Szrj       else if (!count_in.nonzero_p ())
278*38fd1498Szrj 	{
279*38fd1498Szrj           if (by_profile_only)
280*38fd1498Szrj 	    return -1;
281*38fd1498Szrj 	  expected = count_latch.to_gcov_type () * 2;
282*38fd1498Szrj 	}
283*38fd1498Szrj       else
284*38fd1498Szrj 	{
285*38fd1498Szrj 	  expected = (count_latch.to_gcov_type () + count_in.to_gcov_type ()
286*38fd1498Szrj 		      - 1) / count_in.to_gcov_type ();
287*38fd1498Szrj 	  if (read_profile_p
288*38fd1498Szrj 	      && count_latch.reliable_p () && count_in.reliable_p ())
289*38fd1498Szrj 	    *read_profile_p = true;
290*38fd1498Szrj 	}
291*38fd1498Szrj     }
292*38fd1498Szrj   else
293*38fd1498Szrj     {
294*38fd1498Szrj       if (by_profile_only)
295*38fd1498Szrj 	return -1;
296*38fd1498Szrj       expected = PARAM_VALUE (PARAM_AVG_LOOP_NITER);
297*38fd1498Szrj     }
298*38fd1498Szrj 
299*38fd1498Szrj   if (!by_profile_only)
300*38fd1498Szrj     {
301*38fd1498Szrj       HOST_WIDE_INT max = get_max_loop_iterations_int (loop);
302*38fd1498Szrj       if (max != -1 && max < expected)
303*38fd1498Szrj         return max;
304*38fd1498Szrj     }
305*38fd1498Szrj 
306*38fd1498Szrj   return expected;
307*38fd1498Szrj }
308*38fd1498Szrj 
309*38fd1498Szrj /* Returns expected number of LOOP iterations.  The returned value is bounded
310*38fd1498Szrj    by REG_BR_PROB_BASE.  */
311*38fd1498Szrj 
312*38fd1498Szrj unsigned
expected_loop_iterations(struct loop * loop)313*38fd1498Szrj expected_loop_iterations (struct loop *loop)
314*38fd1498Szrj {
315*38fd1498Szrj   gcov_type expected = expected_loop_iterations_unbounded (loop);
316*38fd1498Szrj   return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
317*38fd1498Szrj }
318*38fd1498Szrj 
319*38fd1498Szrj /* Returns the maximum level of nesting of subloops of LOOP.  */
320*38fd1498Szrj 
321*38fd1498Szrj unsigned
get_loop_level(const struct loop * loop)322*38fd1498Szrj get_loop_level (const struct loop *loop)
323*38fd1498Szrj {
324*38fd1498Szrj   const struct loop *ploop;
325*38fd1498Szrj   unsigned mx = 0, l;
326*38fd1498Szrj 
327*38fd1498Szrj   for (ploop = loop->inner; ploop; ploop = ploop->next)
328*38fd1498Szrj     {
329*38fd1498Szrj       l = get_loop_level (ploop);
330*38fd1498Szrj       if (l >= mx)
331*38fd1498Szrj 	mx = l + 1;
332*38fd1498Szrj     }
333*38fd1498Szrj   return mx;
334*38fd1498Szrj }
335*38fd1498Szrj 
336*38fd1498Szrj /* Initialize the constants for computing set costs.  */
337*38fd1498Szrj 
338*38fd1498Szrj void
init_set_costs(void)339*38fd1498Szrj init_set_costs (void)
340*38fd1498Szrj {
341*38fd1498Szrj   int speed;
342*38fd1498Szrj   rtx_insn *seq;
343*38fd1498Szrj   rtx reg1 = gen_raw_REG (SImode, LAST_VIRTUAL_REGISTER + 1);
344*38fd1498Szrj   rtx reg2 = gen_raw_REG (SImode, LAST_VIRTUAL_REGISTER + 2);
345*38fd1498Szrj   rtx addr = gen_raw_REG (Pmode, LAST_VIRTUAL_REGISTER + 3);
346*38fd1498Szrj   rtx mem = validize_mem (gen_rtx_MEM (SImode, addr));
347*38fd1498Szrj   unsigned i;
348*38fd1498Szrj 
349*38fd1498Szrj   target_avail_regs = 0;
350*38fd1498Szrj   target_clobbered_regs = 0;
351*38fd1498Szrj   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
352*38fd1498Szrj     if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i)
353*38fd1498Szrj 	&& !fixed_regs[i])
354*38fd1498Szrj       {
355*38fd1498Szrj 	target_avail_regs++;
356*38fd1498Szrj 	if (call_used_regs[i])
357*38fd1498Szrj 	  target_clobbered_regs++;
358*38fd1498Szrj       }
359*38fd1498Szrj 
360*38fd1498Szrj   target_res_regs = 3;
361*38fd1498Szrj 
362*38fd1498Szrj   for (speed = 0; speed < 2; speed++)
363*38fd1498Szrj      {
364*38fd1498Szrj       crtl->maybe_hot_insn_p = speed;
365*38fd1498Szrj       /* Set up the costs for using extra registers:
366*38fd1498Szrj 
367*38fd1498Szrj 	 1) If not many free registers remain, we should prefer having an
368*38fd1498Szrj 	    additional move to decreasing the number of available registers.
369*38fd1498Szrj 	    (TARGET_REG_COST).
370*38fd1498Szrj 	 2) If no registers are available, we need to spill, which may require
371*38fd1498Szrj 	    storing the old value to memory and loading it back
372*38fd1498Szrj 	    (TARGET_SPILL_COST).  */
373*38fd1498Szrj 
374*38fd1498Szrj       start_sequence ();
375*38fd1498Szrj       emit_move_insn (reg1, reg2);
376*38fd1498Szrj       seq = get_insns ();
377*38fd1498Szrj       end_sequence ();
378*38fd1498Szrj       target_reg_cost [speed] = seq_cost (seq, speed);
379*38fd1498Szrj 
380*38fd1498Szrj       start_sequence ();
381*38fd1498Szrj       emit_move_insn (mem, reg1);
382*38fd1498Szrj       emit_move_insn (reg2, mem);
383*38fd1498Szrj       seq = get_insns ();
384*38fd1498Szrj       end_sequence ();
385*38fd1498Szrj       target_spill_cost [speed] = seq_cost (seq, speed);
386*38fd1498Szrj     }
387*38fd1498Szrj   default_rtl_profile ();
388*38fd1498Szrj }
389*38fd1498Szrj 
390*38fd1498Szrj /* Estimates cost of increased register pressure caused by making N_NEW new
391*38fd1498Szrj    registers live around the loop.  N_OLD is the number of registers live
392*38fd1498Szrj    around the loop.  If CALL_P is true, also take into account that
393*38fd1498Szrj    call-used registers may be clobbered in the loop body, reducing the
394*38fd1498Szrj    number of available registers before we spill.  */
395*38fd1498Szrj 
396*38fd1498Szrj unsigned
estimate_reg_pressure_cost(unsigned n_new,unsigned n_old,bool speed,bool call_p)397*38fd1498Szrj estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed,
398*38fd1498Szrj 			    bool call_p)
399*38fd1498Szrj {
400*38fd1498Szrj   unsigned cost;
401*38fd1498Szrj   unsigned regs_needed = n_new + n_old;
402*38fd1498Szrj   unsigned available_regs = target_avail_regs;
403*38fd1498Szrj 
404*38fd1498Szrj   /* If there is a call in the loop body, the call-clobbered registers
405*38fd1498Szrj      are not available for loop invariants.  */
406*38fd1498Szrj   if (call_p)
407*38fd1498Szrj     available_regs = available_regs - target_clobbered_regs;
408*38fd1498Szrj 
409*38fd1498Szrj   /* If we have enough registers, we should use them and not restrict
410*38fd1498Szrj      the transformations unnecessarily.  */
411*38fd1498Szrj   if (regs_needed + target_res_regs <= available_regs)
412*38fd1498Szrj     return 0;
413*38fd1498Szrj 
414*38fd1498Szrj   if (regs_needed <= available_regs)
415*38fd1498Szrj     /* If we are close to running out of registers, try to preserve
416*38fd1498Szrj        them.  */
417*38fd1498Szrj     cost = target_reg_cost [speed] * n_new;
418*38fd1498Szrj   else
419*38fd1498Szrj     /* If we run out of registers, it is very expensive to add another
420*38fd1498Szrj        one.  */
421*38fd1498Szrj     cost = target_spill_cost [speed] * n_new;
422*38fd1498Szrj 
423*38fd1498Szrj   if (optimize && (flag_ira_region == IRA_REGION_ALL
424*38fd1498Szrj 		   || flag_ira_region == IRA_REGION_MIXED)
425*38fd1498Szrj       && number_of_loops (cfun) <= (unsigned) IRA_MAX_LOOPS_NUM)
426*38fd1498Szrj     /* IRA regional allocation deals with high register pressure
427*38fd1498Szrj        better.  So decrease the cost (to do more accurate the cost
428*38fd1498Szrj        calculation for IRA, we need to know how many registers lives
429*38fd1498Szrj        through the loop transparently).  */
430*38fd1498Szrj     cost /= 2;
431*38fd1498Szrj 
432*38fd1498Szrj   return cost;
433*38fd1498Szrj }
434*38fd1498Szrj 
435*38fd1498Szrj /* Sets EDGE_LOOP_EXIT flag for all loop exits.  */
436*38fd1498Szrj 
437*38fd1498Szrj void
mark_loop_exit_edges(void)438*38fd1498Szrj mark_loop_exit_edges (void)
439*38fd1498Szrj {
440*38fd1498Szrj   basic_block bb;
441*38fd1498Szrj   edge e;
442*38fd1498Szrj 
443*38fd1498Szrj   if (number_of_loops (cfun) <= 1)
444*38fd1498Szrj     return;
445*38fd1498Szrj 
446*38fd1498Szrj   FOR_EACH_BB_FN (bb, cfun)
447*38fd1498Szrj     {
448*38fd1498Szrj       edge_iterator ei;
449*38fd1498Szrj 
450*38fd1498Szrj       FOR_EACH_EDGE (e, ei, bb->succs)
451*38fd1498Szrj 	{
452*38fd1498Szrj 	  if (loop_outer (bb->loop_father)
453*38fd1498Szrj 	      && loop_exit_edge_p (bb->loop_father, e))
454*38fd1498Szrj 	    e->flags |= EDGE_LOOP_EXIT;
455*38fd1498Szrj 	  else
456*38fd1498Szrj 	    e->flags &= ~EDGE_LOOP_EXIT;
457*38fd1498Szrj 	}
458*38fd1498Szrj     }
459*38fd1498Szrj }
460*38fd1498Szrj 
461*38fd1498Szrj /* Return exit edge if loop has only one exit that is likely
462*38fd1498Szrj    to be executed on runtime (i.e. it is not EH or leading
463*38fd1498Szrj    to noreturn call.  */
464*38fd1498Szrj 
465*38fd1498Szrj edge
single_likely_exit(struct loop * loop)466*38fd1498Szrj single_likely_exit (struct loop *loop)
467*38fd1498Szrj {
468*38fd1498Szrj   edge found = single_exit (loop);
469*38fd1498Szrj   vec<edge> exits;
470*38fd1498Szrj   unsigned i;
471*38fd1498Szrj   edge ex;
472*38fd1498Szrj 
473*38fd1498Szrj   if (found)
474*38fd1498Szrj     return found;
475*38fd1498Szrj   exits = get_loop_exit_edges (loop);
476*38fd1498Szrj   FOR_EACH_VEC_ELT (exits, i, ex)
477*38fd1498Szrj     {
478*38fd1498Szrj       if (probably_never_executed_edge_p (cfun, ex)
479*38fd1498Szrj 	  /* We want to rule out paths to noreturns but not low probabilities
480*38fd1498Szrj 	     resulting from adjustments or combining.
481*38fd1498Szrj 	     FIXME: once we have better quality tracking, make this more
482*38fd1498Szrj 	     robust.  */
483*38fd1498Szrj 	  || ex->probability <= profile_probability::very_unlikely ())
484*38fd1498Szrj 	continue;
485*38fd1498Szrj       if (!found)
486*38fd1498Szrj 	found = ex;
487*38fd1498Szrj       else
488*38fd1498Szrj 	{
489*38fd1498Szrj 	  exits.release ();
490*38fd1498Szrj 	  return NULL;
491*38fd1498Szrj 	}
492*38fd1498Szrj     }
493*38fd1498Szrj   exits.release ();
494*38fd1498Szrj   return found;
495*38fd1498Szrj }
496*38fd1498Szrj 
497*38fd1498Szrj 
498*38fd1498Szrj /* Gets basic blocks of a LOOP.  Header is the 0-th block, rest is in dfs
499*38fd1498Szrj    order against direction of edges from latch.  Specially, if
500*38fd1498Szrj    header != latch, latch is the 1-st block.  */
501*38fd1498Szrj 
502*38fd1498Szrj vec<basic_block>
get_loop_hot_path(const struct loop * loop)503*38fd1498Szrj get_loop_hot_path (const struct loop *loop)
504*38fd1498Szrj {
505*38fd1498Szrj   basic_block bb = loop->header;
506*38fd1498Szrj   vec<basic_block> path = vNULL;
507*38fd1498Szrj   bitmap visited = BITMAP_ALLOC (NULL);
508*38fd1498Szrj 
509*38fd1498Szrj   while (true)
510*38fd1498Szrj     {
511*38fd1498Szrj       edge_iterator ei;
512*38fd1498Szrj       edge e;
513*38fd1498Szrj       edge best = NULL;
514*38fd1498Szrj 
515*38fd1498Szrj       path.safe_push (bb);
516*38fd1498Szrj       bitmap_set_bit (visited, bb->index);
517*38fd1498Szrj       FOR_EACH_EDGE (e, ei, bb->succs)
518*38fd1498Szrj         if ((!best || e->probability > best->probability)
519*38fd1498Szrj 	    && !loop_exit_edge_p (loop, e)
520*38fd1498Szrj 	    && !bitmap_bit_p (visited, e->dest->index))
521*38fd1498Szrj 	  best = e;
522*38fd1498Szrj       if (!best || best->dest == loop->header)
523*38fd1498Szrj 	break;
524*38fd1498Szrj       bb = best->dest;
525*38fd1498Szrj     }
526*38fd1498Szrj   BITMAP_FREE (visited);
527*38fd1498Szrj   return path;
528*38fd1498Szrj }
529