xref: /dragonfly/contrib/gcc-4.7/gcc/cfgloopanal.c (revision 279dd846)
1 /* Natural loop analysis code for GNU compiler.
2    Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3    Free Software Foundation, Inc.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "hard-reg-set.h"
27 #include "obstack.h"
28 #include "basic-block.h"
29 #include "cfgloop.h"
30 #include "expr.h"
31 #include "output.h"
32 #include "graphds.h"
33 #include "params.h"
34 
35 struct target_cfgloop default_target_cfgloop;
36 #if SWITCHABLE_TARGET
37 struct target_cfgloop *this_target_cfgloop = &default_target_cfgloop;
38 #endif
39 
40 /* Checks whether BB is executed exactly once in each LOOP iteration.  */
41 
42 bool
43 just_once_each_iteration_p (const struct loop *loop, const_basic_block bb)
44 {
45   /* It must be executed at least once each iteration.  */
46   if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
47     return false;
48 
49   /* And just once.  */
50   if (bb->loop_father != loop)
51     return false;
52 
53   /* But this was not enough.  We might have some irreducible loop here.  */
54   if (bb->flags & BB_IRREDUCIBLE_LOOP)
55     return false;
56 
57   return true;
58 }
59 
60 /* Marks blocks and edges that are part of non-recognized loops; i.e. we
61    throw away all latch edges and mark blocks inside any remaining cycle.
62    Everything is a bit complicated due to fact we do not want to do this
63    for parts of cycles that only "pass" through some loop -- i.e. for
64    each cycle, we want to mark blocks that belong directly to innermost
65    loop containing the whole cycle.
66 
67    LOOPS is the loop tree.  */
68 
69 #define LOOP_REPR(LOOP) ((LOOP)->num + last_basic_block)
70 #define BB_REPR(BB) ((BB)->index + 1)
71 
72 bool
73 mark_irreducible_loops (void)
74 {
75   basic_block act;
76   struct graph_edge *ge;
77   edge e;
78   edge_iterator ei;
79   int src, dest;
80   unsigned depth;
81   struct graph *g;
82   int num = number_of_loops ();
83   struct loop *cloop;
84   bool irred_loop_found = false;
85   int i;
86 
87   gcc_assert (current_loops != NULL);
88 
89   /* Reset the flags.  */
90   FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
91     {
92       act->flags &= ~BB_IRREDUCIBLE_LOOP;
93       FOR_EACH_EDGE (e, ei, act->succs)
94 	e->flags &= ~EDGE_IRREDUCIBLE_LOOP;
95     }
96 
97   /* Create the edge lists.  */
98   g = new_graph (last_basic_block + num);
99 
100   FOR_BB_BETWEEN (act, ENTRY_BLOCK_PTR, EXIT_BLOCK_PTR, next_bb)
101     FOR_EACH_EDGE (e, ei, act->succs)
102       {
103 	/* Ignore edges to exit.  */
104 	if (e->dest == EXIT_BLOCK_PTR)
105 	  continue;
106 
107 	src = BB_REPR (act);
108 	dest = BB_REPR (e->dest);
109 
110 	/* Ignore latch edges.  */
111 	if (e->dest->loop_father->header == e->dest
112 	    && e->dest->loop_father->latch == act)
113 	  continue;
114 
115 	/* Edges inside a single loop should be left where they are.  Edges
116 	   to subloop headers should lead to representative of the subloop,
117 	   but from the same place.
118 
119 	   Edges exiting loops should lead from representative
120 	   of the son of nearest common ancestor of the loops in that
121 	   act lays.  */
122 
123 	if (e->dest->loop_father->header == e->dest)
124 	  dest = LOOP_REPR (e->dest->loop_father);
125 
126 	if (!flow_bb_inside_loop_p (act->loop_father, e->dest))
127 	  {
128 	    depth = 1 + loop_depth (find_common_loop (act->loop_father,
129 						      e->dest->loop_father));
130 	    if (depth == loop_depth (act->loop_father))
131 	      cloop = act->loop_father;
132 	    else
133 	      cloop = VEC_index (loop_p, act->loop_father->superloops, depth);
134 
135 	    src = LOOP_REPR (cloop);
136 	  }
137 
138 	add_edge (g, src, dest)->data = e;
139       }
140 
141   /* Find the strongly connected components.  */
142   graphds_scc (g, NULL);
143 
144   /* Mark the irreducible loops.  */
145   for (i = 0; i < g->n_vertices; i++)
146     for (ge = g->vertices[i].succ; ge; ge = ge->succ_next)
147       {
148 	edge real = (edge) ge->data;
149 	/* edge E in graph G is irreducible if it connects two vertices in the
150 	   same scc.  */
151 
152 	/* All edges should lead from a component with higher number to the
153 	   one with lower one.  */
154 	gcc_assert (g->vertices[ge->src].component >= g->vertices[ge->dest].component);
155 
156 	if (g->vertices[ge->src].component != g->vertices[ge->dest].component)
157 	  continue;
158 
159 	real->flags |= EDGE_IRREDUCIBLE_LOOP;
160 	irred_loop_found = true;
161 	if (flow_bb_inside_loop_p (real->src->loop_father, real->dest))
162 	  real->src->flags |= BB_IRREDUCIBLE_LOOP;
163       }
164 
165   free_graph (g);
166 
167   loops_state_set (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS);
168   return irred_loop_found;
169 }
170 
171 /* Counts number of insns inside LOOP.  */
172 int
173 num_loop_insns (const struct loop *loop)
174 {
175   basic_block *bbs, bb;
176   unsigned i, ninsns = 0;
177   rtx insn;
178 
179   bbs = get_loop_body (loop);
180   for (i = 0; i < loop->num_nodes; i++)
181     {
182       bb = bbs[i];
183       FOR_BB_INSNS (bb, insn)
184 	if (NONDEBUG_INSN_P (insn))
185 	  ninsns++;
186     }
187   free (bbs);
188 
189   if (!ninsns)
190     ninsns = 1;	/* To avoid division by zero.  */
191 
192   return ninsns;
193 }
194 
195 /* Counts number of insns executed on average per iteration LOOP.  */
196 int
197 average_num_loop_insns (const struct loop *loop)
198 {
199   basic_block *bbs, bb;
200   unsigned i, binsns, ninsns, ratio;
201   rtx insn;
202 
203   ninsns = 0;
204   bbs = get_loop_body (loop);
205   for (i = 0; i < loop->num_nodes; i++)
206     {
207       bb = bbs[i];
208 
209       binsns = 0;
210       FOR_BB_INSNS (bb, insn)
211 	if (NONDEBUG_INSN_P (insn))
212 	  binsns++;
213 
214       ratio = loop->header->frequency == 0
215 	      ? BB_FREQ_MAX
216 	      : (bb->frequency * BB_FREQ_MAX) / loop->header->frequency;
217       ninsns += binsns * ratio;
218     }
219   free (bbs);
220 
221   ninsns /= BB_FREQ_MAX;
222   if (!ninsns)
223     ninsns = 1; /* To avoid division by zero.  */
224 
225   return ninsns;
226 }
227 
228 /* Returns expected number of iterations of LOOP, according to
229    measured or guessed profile.  No bounding is done on the
230    value.  */
231 
232 gcov_type
233 expected_loop_iterations_unbounded (const struct loop *loop)
234 {
235   edge e;
236   edge_iterator ei;
237 
238   if (loop->latch->count || loop->header->count)
239     {
240       gcov_type count_in, count_latch, expected;
241 
242       count_in = 0;
243       count_latch = 0;
244 
245       FOR_EACH_EDGE (e, ei, loop->header->preds)
246 	if (e->src == loop->latch)
247 	  count_latch = e->count;
248 	else
249 	  count_in += e->count;
250 
251       if (count_in == 0)
252 	expected = count_latch * 2;
253       else
254 	expected = (count_latch + count_in - 1) / count_in;
255 
256       return expected;
257     }
258   else
259     {
260       int freq_in, freq_latch;
261 
262       freq_in = 0;
263       freq_latch = 0;
264 
265       FOR_EACH_EDGE (e, ei, loop->header->preds)
266 	if (e->src == loop->latch)
267 	  freq_latch = EDGE_FREQUENCY (e);
268 	else
269 	  freq_in += EDGE_FREQUENCY (e);
270 
271       if (freq_in == 0)
272 	return freq_latch * 2;
273 
274       return (freq_latch + freq_in - 1) / freq_in;
275     }
276 }
277 
278 /* Returns expected number of LOOP iterations.  The returned value is bounded
279    by REG_BR_PROB_BASE.  */
280 
281 unsigned
282 expected_loop_iterations (const struct loop *loop)
283 {
284   gcov_type expected = expected_loop_iterations_unbounded (loop);
285   return (expected > REG_BR_PROB_BASE ? REG_BR_PROB_BASE : expected);
286 }
287 
288 /* Returns the maximum level of nesting of subloops of LOOP.  */
289 
290 unsigned
291 get_loop_level (const struct loop *loop)
292 {
293   const struct loop *ploop;
294   unsigned mx = 0, l;
295 
296   for (ploop = loop->inner; ploop; ploop = ploop->next)
297     {
298       l = get_loop_level (ploop);
299       if (l >= mx)
300 	mx = l + 1;
301     }
302   return mx;
303 }
304 
305 /* Returns estimate on cost of computing SEQ.  */
306 
307 static unsigned
308 seq_cost (const_rtx seq, bool speed)
309 {
310   unsigned cost = 0;
311   rtx set;
312 
313   for (; seq; seq = NEXT_INSN (seq))
314     {
315       set = single_set (seq);
316       if (set)
317 	cost += set_rtx_cost (set, speed);
318       else
319 	cost++;
320     }
321 
322   return cost;
323 }
324 
325 /* Initialize the constants for computing set costs.  */
326 
327 void
328 init_set_costs (void)
329 {
330   int speed;
331   rtx seq;
332   rtx reg1 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER);
333   rtx reg2 = gen_raw_REG (SImode, FIRST_PSEUDO_REGISTER + 1);
334   rtx addr = gen_raw_REG (Pmode, FIRST_PSEUDO_REGISTER + 2);
335   rtx mem = validize_mem (gen_rtx_MEM (SImode, addr));
336   unsigned i;
337 
338   target_avail_regs = 0;
339   target_clobbered_regs = 0;
340   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
341     if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], i)
342 	&& !fixed_regs[i])
343       {
344 	target_avail_regs++;
345 	if (call_used_regs[i])
346 	  target_clobbered_regs++;
347       }
348 
349   target_res_regs = 3;
350 
351   for (speed = 0; speed < 2; speed++)
352      {
353       crtl->maybe_hot_insn_p = speed;
354       /* Set up the costs for using extra registers:
355 
356 	 1) If not many free registers remain, we should prefer having an
357 	    additional move to decreasing the number of available registers.
358 	    (TARGET_REG_COST).
359 	 2) If no registers are available, we need to spill, which may require
360 	    storing the old value to memory and loading it back
361 	    (TARGET_SPILL_COST).  */
362 
363       start_sequence ();
364       emit_move_insn (reg1, reg2);
365       seq = get_insns ();
366       end_sequence ();
367       target_reg_cost [speed] = seq_cost (seq, speed);
368 
369       start_sequence ();
370       emit_move_insn (mem, reg1);
371       emit_move_insn (reg2, mem);
372       seq = get_insns ();
373       end_sequence ();
374       target_spill_cost [speed] = seq_cost (seq, speed);
375     }
376   default_rtl_profile ();
377 }
378 
379 /* Estimates cost of increased register pressure caused by making N_NEW new
380    registers live around the loop.  N_OLD is the number of registers live
381    around the loop.  If CALL_P is true, also take into account that
382    call-used registers may be clobbered in the loop body, reducing the
383    number of available registers before we spill.  */
384 
385 unsigned
386 estimate_reg_pressure_cost (unsigned n_new, unsigned n_old, bool speed,
387 			    bool call_p)
388 {
389   unsigned cost;
390   unsigned regs_needed = n_new + n_old;
391   unsigned available_regs = target_avail_regs;
392 
393   /* If there is a call in the loop body, the call-clobbered registers
394      are not available for loop invariants.  */
395   if (call_p)
396     available_regs = available_regs - target_clobbered_regs;
397 
398   /* If we have enough registers, we should use them and not restrict
399      the transformations unnecessarily.  */
400   if (regs_needed + target_res_regs <= available_regs)
401     return 0;
402 
403   if (regs_needed <= available_regs)
404     /* If we are close to running out of registers, try to preserve
405        them.  */
406     cost = target_reg_cost [speed] * n_new;
407   else
408     /* If we run out of registers, it is very expensive to add another
409        one.  */
410     cost = target_spill_cost [speed] * n_new;
411 
412   if (optimize && (flag_ira_region == IRA_REGION_ALL
413 		   || flag_ira_region == IRA_REGION_MIXED)
414       && number_of_loops () <= (unsigned) IRA_MAX_LOOPS_NUM)
415     /* IRA regional allocation deals with high register pressure
416        better.  So decrease the cost (to do more accurate the cost
417        calculation for IRA, we need to know how many registers lives
418        through the loop transparently).  */
419     cost /= 2;
420 
421   return cost;
422 }
423 
424 /* Sets EDGE_LOOP_EXIT flag for all loop exits.  */
425 
426 void
427 mark_loop_exit_edges (void)
428 {
429   basic_block bb;
430   edge e;
431 
432   if (number_of_loops () <= 1)
433     return;
434 
435   FOR_EACH_BB (bb)
436     {
437       edge_iterator ei;
438 
439       FOR_EACH_EDGE (e, ei, bb->succs)
440 	{
441 	  if (loop_outer (bb->loop_father)
442 	      && loop_exit_edge_p (bb->loop_father, e))
443 	    e->flags |= EDGE_LOOP_EXIT;
444 	  else
445 	    e->flags &= ~EDGE_LOOP_EXIT;
446 	}
447     }
448 }
449 
450