1 /*
2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
3 *
4 * This file is part of libFirm.
5 *
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
10 *
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
14 *
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE.
18 */
19
20 /**
21 * @file
22 * @brief Representation and computation of the callgraph.
23 * @author Goetz Lindenmaier
24 * @date 21.7.2004
25 */
26 #include "config.h"
27
28 #include <string.h>
29 #include <stdlib.h>
30
31 #include "callgraph.h"
32
33 #include "irloop_t.h"
34 #include "irprog_t.h"
35 #include "irgraph_t.h"
36 #include "irnode_t.h"
37
38 #include "cgana.h"
39
40 #include "array.h"
41 #include "pmap.h"
42 #include "hashptr.h"
43 #include "raw_bitset.h"
44
45 #include "irgwalk.h"
46
47 static ir_visited_t master_cg_visited = 0;
48 static inline int cg_irg_visited (ir_graph *n);
49 static inline void mark_cg_irg_visited(ir_graph *n);
50
get_irp_callgraph_state(void)51 irp_callgraph_state get_irp_callgraph_state(void)
52 {
53 return irp->callgraph_state;
54 }
55
set_irp_callgraph_state(irp_callgraph_state s)56 void set_irp_callgraph_state(irp_callgraph_state s)
57 {
58 irp->callgraph_state = s;
59 }
60
get_irg_n_callers(const ir_graph * irg)61 size_t get_irg_n_callers(const ir_graph *irg)
62 {
63 assert(irg->callers);
64 return irg->callers ? ARR_LEN(irg->callers) : 0;
65 }
66
get_irg_caller(const ir_graph * irg,size_t pos)67 ir_graph *get_irg_caller(const ir_graph *irg, size_t pos)
68 {
69 assert(pos < get_irg_n_callers(irg));
70 return irg->callers ? irg->callers[pos] : NULL;
71 }
72
is_irg_caller_backedge(const ir_graph * irg,size_t pos)73 int is_irg_caller_backedge(const ir_graph *irg, size_t pos)
74 {
75 assert(pos < get_irg_n_callers(irg));
76 return irg->caller_isbe != NULL ? rbitset_is_set(irg->caller_isbe, pos) : 0;
77 }
78
79 /** Search the caller in the list of all callers and set its backedge property. */
set_irg_caller_backedge(ir_graph * irg,const ir_graph * caller)80 static void set_irg_caller_backedge(ir_graph *irg, const ir_graph *caller)
81 {
82 size_t i, n_callers = get_irg_n_callers(irg);
83
84 /* allocate a new array on demand */
85 if (irg->caller_isbe == NULL)
86 irg->caller_isbe = rbitset_malloc(n_callers);
87 for (i = 0; i < n_callers; ++i) {
88 if (get_irg_caller(irg, i) == caller) {
89 rbitset_set(irg->caller_isbe, i);
90 break;
91 }
92 }
93 }
94
has_irg_caller_backedge(const ir_graph * irg)95 int has_irg_caller_backedge(const ir_graph *irg)
96 {
97 size_t i, n_callers = get_irg_n_callers(irg);
98
99 if (irg->caller_isbe != NULL) {
100 for (i = 0; i < n_callers; ++i)
101 if (rbitset_is_set(irg->caller_isbe, i))
102 return 1;
103 }
104 return 0;
105 }
106
107 /**
108 * Find the reversion position of a caller.
109 * Given the position pos_caller of an caller of irg, return
110 * irg's callee position on that caller.
111 */
reverse_pos(const ir_graph * callee,size_t pos_caller)112 static size_t reverse_pos(const ir_graph *callee, size_t pos_caller)
113 {
114 ir_graph *caller = get_irg_caller(callee, pos_caller);
115 /* search the other relation for the corresponding edge. */
116 size_t i, n_callees = get_irg_n_callees(caller);
117 for (i = 0; i < n_callees; ++i) {
118 if (get_irg_callee(caller, i) == callee) {
119 return i;
120 }
121 }
122
123 assert(!"reverse_pos() did not find position");
124
125 return 0;
126 }
127
get_irg_caller_loop_depth(const ir_graph * irg,size_t pos)128 size_t get_irg_caller_loop_depth(const ir_graph *irg, size_t pos)
129 {
130 ir_graph *caller = get_irg_caller(irg, pos);
131 size_t pos_callee = reverse_pos(irg, pos);
132
133 return get_irg_callee_loop_depth(caller, pos_callee);
134 }
135
get_irg_n_callees(const ir_graph * irg)136 size_t get_irg_n_callees(const ir_graph *irg)
137 {
138 assert(irg->callees);
139 return irg->callees ? ARR_LEN(irg->callees) : 0;
140 }
141
get_irg_callee(const ir_graph * irg,size_t pos)142 ir_graph *get_irg_callee(const ir_graph *irg, size_t pos)
143 {
144 assert(pos < get_irg_n_callees(irg));
145 return irg->callees ? irg->callees[pos]->irg : NULL;
146 }
147
is_irg_callee_backedge(const ir_graph * irg,size_t pos)148 int is_irg_callee_backedge(const ir_graph *irg, size_t pos)
149 {
150 assert(pos < get_irg_n_callees(irg));
151 return irg->callee_isbe != NULL ? rbitset_is_set(irg->callee_isbe, pos) : 0;
152 }
153
has_irg_callee_backedge(const ir_graph * irg)154 int has_irg_callee_backedge(const ir_graph *irg)
155 {
156 size_t i, n_callees = get_irg_n_callees(irg);
157
158 if (irg->callee_isbe != NULL) {
159 for (i = 0; i < n_callees; ++i)
160 if (rbitset_is_set(irg->callee_isbe, i))
161 return 1;
162 }
163 return 0;
164 }
165
166 /**
167 * Mark the callee at position pos as a backedge.
168 */
set_irg_callee_backedge(ir_graph * irg,size_t pos)169 static void set_irg_callee_backedge(ir_graph *irg, size_t pos)
170 {
171 size_t n = get_irg_n_callees(irg);
172
173 /* allocate a new array on demand */
174 if (irg->callee_isbe == NULL)
175 irg->callee_isbe = rbitset_malloc(n);
176 assert(pos < n);
177 rbitset_set(irg->callee_isbe, pos);
178 }
179
get_irg_callee_loop_depth(const ir_graph * irg,size_t pos)180 size_t get_irg_callee_loop_depth(const ir_graph *irg, size_t pos)
181 {
182 assert(pos < get_irg_n_callees(irg));
183 return irg->callees ? irg->callees[pos]->max_depth : 0;
184 }
185
186
187 /**
188 * Pre-Walker called by compute_callgraph(), analyses all Call nodes.
189 */
ana_Call(ir_node * n,void * env)190 static void ana_Call(ir_node *n, void *env)
191 {
192 size_t i, n_callees;
193 ir_graph *irg;
194 (void) env;
195
196 if (! is_Call(n)) return;
197
198 irg = get_irn_irg(n);
199 n_callees = get_Call_n_callees(n);
200 for (i = 0; i < n_callees; ++i) {
201 ir_entity *callee_e = get_Call_callee(n, i);
202 ir_graph *callee = get_entity_irg(callee_e);
203
204 if (callee) {
205 cg_callee_entry buf;
206 cg_callee_entry *found;
207 unsigned depth;
208
209 buf.irg = callee;
210
211 pset_insert((pset *)callee->callers, irg, hash_ptr(irg));
212 found = (cg_callee_entry*) pset_find((pset *)irg->callees, &buf, hash_ptr(callee));
213 if (found) { /* add Call node to list, compute new nesting. */
214 ir_node **arr = found->call_list;
215 ARR_APP1(ir_node *, arr, n);
216 found->call_list = arr;
217 } else { /* New node, add Call node and init nesting. */
218 found = OALLOC(irg->obst, cg_callee_entry);
219 found->irg = callee;
220 found->call_list = NEW_ARR_F(ir_node *, 1);
221 found->call_list[0] = n;
222 found->max_depth = 0;
223 pset_insert((pset *)irg->callees, found, hash_ptr(callee));
224 }
225 depth = get_loop_depth(get_irn_loop(get_nodes_block(n)));
226 found->max_depth = (depth > found->max_depth) ? depth : found->max_depth;
227 }
228 }
229 }
230
231 /** compare two ir graphs in a cg_callee_entry */
cg_callee_entry_cmp(const void * elt,const void * key)232 static int cg_callee_entry_cmp(const void *elt, const void *key)
233 {
234 const cg_callee_entry *e1 = (const cg_callee_entry*) elt;
235 const cg_callee_entry *e2 = (const cg_callee_entry*) key;
236 return e1->irg != e2->irg;
237 }
238
239 /** compare two ir graphs for pointer identity */
graph_cmp(const void * elt,const void * key)240 static int graph_cmp(const void *elt, const void *key)
241 {
242 const ir_graph *e1 = (const ir_graph*) elt;
243 const ir_graph *e2 = (const ir_graph*) key;
244 return e1 != e2;
245 }
246
compute_callgraph(void)247 void compute_callgraph(void)
248 {
249 size_t i, n_irgs;
250
251 /* initialize */
252 free_callgraph();
253
254 n_irgs = get_irp_n_irgs();
255 for (i = 0; i < n_irgs; ++i) {
256 ir_graph *irg = get_irp_irg(i);
257 assert(get_irg_callee_info_state(irg) == irg_callee_info_consistent);
258 irg->callees = (cg_callee_entry **)new_pset(cg_callee_entry_cmp, 8);
259 irg->callers = (ir_graph **)new_pset(graph_cmp, 8);
260 //construct_cf_backedges(irg);
261 }
262
263 /* Compute the call graph */
264 for (i = 0; i < n_irgs; ++i) {
265 ir_graph *irg = get_irp_irg(i);
266 construct_cf_backedges(irg); // We also find the maximal loop depth of a call.
267 irg_walk_graph(irg, ana_Call, NULL, NULL);
268 }
269
270 /* Change the sets to arrays. */
271 for (i = 0; i < n_irgs; ++i) {
272 size_t j, count;
273 ir_graph *irg = get_irp_irg(i);
274 pset *callee_set, *caller_set;
275
276 callee_set = (pset *)irg->callees;
277 count = pset_count(callee_set);
278 irg->callees = NEW_ARR_F(cg_callee_entry *, count);
279 irg->callee_isbe = NULL;
280 j = 0;
281 foreach_pset(callee_set, cg_callee_entry, callee) {
282 irg->callees[j++] = callee;
283 }
284 del_pset(callee_set);
285 assert(j == count);
286
287 caller_set = (pset *)irg->callers;
288 count = pset_count(caller_set);
289 irg->callers = NEW_ARR_F(ir_graph *, count);
290 irg->caller_isbe = NULL;
291 j = 0;
292 foreach_pset(caller_set, ir_graph, c) {
293 irg->callers[j++] = c;
294 }
295 del_pset(caller_set);
296 assert(j == count);
297 }
298 set_irp_callgraph_state(irp_callgraph_consistent);
299 }
300
free_callgraph(void)301 void free_callgraph(void)
302 {
303 size_t i, n_irgs = get_irp_n_irgs();
304 for (i = 0; i < n_irgs; ++i) {
305 ir_graph *irg = get_irp_irg(i);
306 if (irg->callees) DEL_ARR_F(irg->callees);
307 if (irg->callers) DEL_ARR_F(irg->callers);
308 if (irg->callee_isbe) free(irg->callee_isbe);
309 if (irg->caller_isbe) free(irg->caller_isbe);
310 irg->callees = NULL;
311 irg->callers = NULL;
312 irg->callee_isbe = NULL;
313 irg->caller_isbe = NULL;
314 }
315 set_irp_callgraph_state(irp_callgraph_none);
316 }
317
318
do_walk(ir_graph * irg,callgraph_walk_func * pre,callgraph_walk_func * post,void * env)319 static void do_walk(ir_graph *irg, callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
320 {
321 size_t i, n_callees;
322
323 if (cg_irg_visited(irg))
324 return;
325 mark_cg_irg_visited(irg);
326
327 if (pre)
328 pre(irg, env);
329
330 n_callees = get_irg_n_callees(irg);
331 for (i = 0; i < n_callees; i++) {
332 ir_graph *m = get_irg_callee(irg, i);
333 do_walk(m, pre, post, env);
334 }
335
336 if (post)
337 post(irg, env);
338 }
339
callgraph_walk(callgraph_walk_func * pre,callgraph_walk_func * post,void * env)340 void callgraph_walk(callgraph_walk_func *pre, callgraph_walk_func *post, void *env)
341 {
342 size_t i, n_irgs = get_irp_n_irgs();
343 ++master_cg_visited;
344
345 /* roots are methods which have no callers in the current program */
346 for (i = 0; i < n_irgs; ++i) {
347 ir_graph *irg = get_irp_irg(i);
348
349 if (get_irg_n_callers(irg) == 0)
350 do_walk(irg, pre, post, env);
351 }
352
353 /* in case of unreachable call loops we haven't visited some irgs yet */
354 for (i = 0; i < n_irgs; i++) {
355 ir_graph *irg = get_irp_irg(i);
356 do_walk(irg, pre, post, env);
357 }
358 }
359
360 static ir_graph *outermost_ir_graph; /**< The outermost graph the scc is computed
361 for */
362 static ir_loop *current_loop; /**< Current cfloop construction is working
363 on. */
364 static size_t loop_node_cnt = 0; /**< Counts the number of allocated cfloop nodes.
365 Each cfloop node gets a unique number.
366 What for? ev. remove. @@@ */
367 static size_t current_dfn = 1; /**< Counter to generate depth first numbering
368 of visited nodes. */
369
370 typedef struct scc_info {
371 size_t dfn; /**< Depth first search number. */
372 size_t uplink; /**< dfn number of ancestor. */
373 ir_visited_t visited; /**< visited counter */
374 int in_stack; /**< Marks whether node is on the stack. */
375 } scc_info;
376
377 /**
378 * allocates a new scc_info on the obstack
379 */
new_scc_info(struct obstack * obst)380 static inline scc_info *new_scc_info(struct obstack *obst)
381 {
382 return OALLOCZ(obst, scc_info);
383 }
384
385 /**
386 * Returns non-zero if a graph was already visited.
387 */
cg_irg_visited(ir_graph * irg)388 static inline int cg_irg_visited(ir_graph *irg)
389 {
390 return irg->self_visited >= master_cg_visited;
391 }
392
393 /**
394 * Marks a graph as visited.
395 */
mark_cg_irg_visited(ir_graph * irg)396 static inline void mark_cg_irg_visited(ir_graph *irg)
397 {
398 irg->self_visited = master_cg_visited;
399 }
400
401 /**
402 * Set a graphs visited flag to i.
403 */
set_cg_irg_visited(ir_graph * irg,ir_visited_t i)404 static inline void set_cg_irg_visited(ir_graph *irg, ir_visited_t i)
405 {
406 irg->self_visited = i;
407 }
408
409 /**
410 * Returns the visited flag of a graph.
411 */
get_cg_irg_visited(const ir_graph * irg)412 static inline ir_visited_t get_cg_irg_visited(const ir_graph *irg)
413 {
414 return irg->self_visited;
415 }
416
mark_irg_in_stack(ir_graph * irg)417 static inline void mark_irg_in_stack(ir_graph *irg)
418 {
419 scc_info *info = (scc_info*) get_irg_link(irg);
420 assert(info && "missing call to init_scc()");
421 info->in_stack = 1;
422 }
423
mark_irg_not_in_stack(ir_graph * irg)424 static inline void mark_irg_not_in_stack(ir_graph *irg)
425 {
426 scc_info *info = (scc_info*) get_irg_link(irg);
427 assert(info && "missing call to init_scc()");
428 info->in_stack = 0;
429 }
430
irg_is_in_stack(const ir_graph * irg)431 static inline int irg_is_in_stack(const ir_graph *irg)
432 {
433 scc_info *info = (scc_info*) get_irg_link(irg);
434 assert(info && "missing call to init_scc()");
435 return info->in_stack;
436 }
437
set_irg_uplink(ir_graph * irg,size_t uplink)438 static inline void set_irg_uplink(ir_graph *irg, size_t uplink)
439 {
440 scc_info *info = (scc_info*) get_irg_link(irg);
441 assert(info && "missing call to init_scc()");
442 info->uplink = uplink;
443 }
444
get_irg_uplink(const ir_graph * irg)445 static inline size_t get_irg_uplink(const ir_graph *irg)
446 {
447 const scc_info *info = (scc_info*) get_irg_link(irg);
448 assert(info && "missing call to init_scc()");
449 return info->uplink;
450 }
451
set_irg_dfn(ir_graph * irg,size_t dfn)452 static inline void set_irg_dfn(ir_graph *irg, size_t dfn)
453 {
454 scc_info *info = (scc_info*) get_irg_link(irg);
455 assert(info && "missing call to init_scc()");
456 info->dfn = dfn;
457 }
458
get_irg_dfn(const ir_graph * irg)459 static inline size_t get_irg_dfn(const ir_graph *irg)
460 {
461 const scc_info *info = (scc_info*) get_irg_link(irg);
462 assert(info && "missing call to init_scc()");
463 return info->dfn;
464 }
465
466 static ir_graph **stack = NULL;
467 static size_t tos = 0; /**< top of stack */
468
469 /**
470 * Initialize the irg stack.
471 */
init_stack(void)472 static inline void init_stack(void)
473 {
474 if (stack) {
475 ARR_RESIZE(ir_graph *, stack, 1000);
476 } else {
477 stack = NEW_ARR_F(ir_graph *, 1000);
478 }
479 tos = 0;
480 }
481
482 /**
483 * push a graph on the irg stack
484 * @param n the graph to be pushed
485 */
push(ir_graph * irg)486 static inline void push(ir_graph *irg)
487 {
488 if (tos == ARR_LEN(stack)) {
489 size_t nlen = ARR_LEN(stack) * 2;
490 ARR_RESIZE(ir_graph*, stack, nlen);
491 }
492 stack [tos++] = irg;
493 mark_irg_in_stack(irg);
494 }
495
496 /**
497 * return the topmost graph on the stack and pop it
498 */
pop(void)499 static inline ir_graph *pop(void)
500 {
501 ir_graph *irg;
502
503 assert(tos > 0);
504 irg = stack[--tos];
505 mark_irg_not_in_stack(irg);
506 return irg;
507 }
508
509 /**
510 * The nodes up to irg belong to the current loop.
511 * Removes them from the stack and adds them to the current loop.
512 */
pop_scc_to_loop(ir_graph * irg)513 static inline void pop_scc_to_loop(ir_graph *irg)
514 {
515 ir_graph *m;
516
517 do {
518 m = pop();
519 ++loop_node_cnt;
520 set_irg_dfn(m, loop_node_cnt);
521 add_loop_irg(current_loop, m);
522 m->l = current_loop;
523 //m->callgraph_loop_depth = current_loop->depth;
524 } while (m != irg);
525 }
526
527 /* GL ??? my last son is my grandson??? Removes cfloops with no
528 ir_nodes in them. Such loops have only another loop as son. (Why
529 can't they have two loops as sons? Does it never get that far? ) */
close_loop(ir_loop * l)530 static void close_loop(ir_loop *l)
531 {
532 size_t last = get_loop_n_elements(l) - 1;
533 loop_element lelement = get_loop_element(l, last);
534 ir_loop *last_son = lelement.son;
535
536 if (get_kind(last_son) == k_ir_loop &&
537 get_loop_n_elements(last_son) == 1) {
538 ir_loop *gson;
539
540 lelement = get_loop_element(last_son, 0);
541 gson = lelement.son;
542 if (get_kind(gson) == k_ir_loop) {
543 loop_element new_last_son;
544
545 gson->outer_loop = l;
546 new_last_son.son = gson;
547 l->children[last] = new_last_son;
548 }
549 }
550 current_loop = l;
551 }
552
553 /**
554 * Removes and unmarks all nodes up to n from the stack.
555 * The nodes must be visited once more to assign them to a scc.
556 */
pop_scc_unmark_visit(ir_graph * n)557 static inline void pop_scc_unmark_visit(ir_graph *n)
558 {
559 ir_graph *m = NULL;
560
561 while (m != n) {
562 m = pop();
563 set_cg_irg_visited(m, 0);
564 }
565 }
566
567 /**
568 * Allocates a new loop as son of current_loop. Sets current_loop
569 * to the new loop and returns the father.
570 */
new_loop(void)571 static ir_loop *new_loop(void)
572 {
573 ir_loop *father = current_loop;
574 ir_loop *son = alloc_loop(father, outermost_ir_graph->obst);
575
576 current_loop = son;
577 return father;
578 }
579
580
init_scc(struct obstack * obst)581 static void init_scc(struct obstack *obst)
582 {
583 size_t i, n_irgs;
584
585 current_dfn = 1;
586 loop_node_cnt = 0;
587 init_stack();
588
589 n_irgs = get_irp_n_irgs();
590 for (i = 0; i < n_irgs; ++i) {
591 ir_graph *irg = get_irp_irg(i);
592 set_irg_link(irg, new_scc_info(obst));
593 irg->callgraph_recursion_depth = 0;
594 irg->callgraph_loop_depth = 0;
595 }
596 }
597
598 /** Returns non-zero if n is a loop header, i.e., it is a Block node
599 * and has predecessors within the cfloop and out of the cfloop.
600 *
601 * @param root: only needed for assertion.
602 */
is_head(const ir_graph * n,const ir_graph * root)603 static int is_head(const ir_graph *n, const ir_graph *root)
604 {
605 size_t i, n_callees;
606 int some_outof_loop = 0, some_in_loop = 0;
607
608 n_callees = get_irg_n_callees(n);
609 for (i = 0; i < n_callees; ++i) {
610 const ir_graph *pred = get_irg_callee(n, i);
611 if (is_irg_callee_backedge(n, i)) continue;
612 if (!irg_is_in_stack(pred)) {
613 some_outof_loop = 1;
614 } else {
615 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
616 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
617 }
618 some_in_loop = 1;
619 }
620 }
621
622 return some_outof_loop && some_in_loop;
623 }
624
625 /**
626 * Returns non-zero if n is possible loop head of an endless loop.
627 * I.e., it is a Block or Phi node and has only predecessors
628 * within the loop.
629 * @arg root: only needed for assertion.
630 */
is_endless_head(const ir_graph * n,const ir_graph * root)631 static int is_endless_head(const ir_graph *n, const ir_graph *root)
632 {
633 size_t i, n_calless;
634 int some_outof_loop = 0, some_in_loop = 0;
635
636 n_calless = get_irg_n_callees(n);
637 for (i = 0; i < n_calless; ++i) {
638 const ir_graph *pred = get_irg_callee(n, i);
639 assert(pred);
640 if (is_irg_callee_backedge(n, i))
641 continue;
642 if (!irg_is_in_stack(pred)) {
643 some_outof_loop = 1;
644 } else {
645 if (get_irg_uplink(pred) < get_irg_uplink(root)) {
646 assert(get_irg_uplink(pred) >= get_irg_uplink(root));
647 }
648 some_in_loop = 1;
649 }
650 }
651 return !some_outof_loop && some_in_loop;
652 }
653
654 /**
655 * Finds index of the predecessor with the smallest dfn number
656 * greater-equal than limit.
657 */
smallest_dfn_pred(const ir_graph * n,size_t limit,size_t * result)658 static bool smallest_dfn_pred(const ir_graph *n, size_t limit, size_t *result)
659 {
660 size_t index = 0, min = 0;
661 bool found = false;
662
663 size_t i, n_callees = get_irg_n_callees(n);
664 for (i = 0; i < n_callees; ++i) {
665 const ir_graph *pred = get_irg_callee(n, i);
666 if (is_irg_callee_backedge(n, i) || !irg_is_in_stack(pred))
667 continue;
668 if (get_irg_dfn(pred) >= limit && (!found || get_irg_dfn(pred) < min)) {
669 index = i;
670 min = get_irg_dfn(pred);
671 found = true;
672 }
673 }
674
675 *result = index;
676 return found;
677 }
678
679 /** Finds index of the predecessor with the largest dfn number. */
largest_dfn_pred(const ir_graph * n,size_t * result)680 static bool largest_dfn_pred(const ir_graph *n, size_t *result)
681 {
682 size_t index = 0, max = 0;
683 bool found = false;
684
685 size_t i, n_callees = get_irg_n_callees(n);
686 for (i = 0; i < n_callees; ++i) {
687 const ir_graph *pred = get_irg_callee(n, i);
688 if (is_irg_callee_backedge (n, i) || !irg_is_in_stack(pred))
689 continue;
690 /* Note: dfn is always > 0 */
691 if (get_irg_dfn(pred) > max) {
692 index = i;
693 max = get_irg_dfn(pred);
694 found = true;
695 }
696 }
697
698 *result = index;
699 return found;
700 }
701
find_tail(const ir_graph * n)702 static ir_graph *find_tail(const ir_graph *n)
703 {
704 bool found = false;
705 ir_graph *m;
706 size_t i, res_index;
707
708 /*
709 if (!icfg && rm_cyclic_phis && remove_cyclic_phis (n)) return NULL;
710 */
711 m = stack[tos - 1]; /* tos = top of stack */
712 if (is_head(m, n)) {
713 found = smallest_dfn_pred(m, 0, &res_index);
714 if (!found && /* no smallest dfn pred found. */
715 n == m)
716 return NULL;
717 } else {
718 if (m == n) return NULL; // Is this to catch Phi - self loops?
719 for (i = tos - 1; i > 0;) {
720 m = stack[--i];
721
722 if (is_head(m, n)) {
723 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
724 if (! found) /* no smallest dfn pred found. */
725 found = largest_dfn_pred(m, &res_index);
726
727 break;
728 }
729
730 /* We should not walk past our selves on the stack: The upcoming nodes
731 are not in this loop. We assume a loop not reachable from Start. */
732 if (m == n) {
733 found = false;
734 break;
735 }
736
737 }
738
739 if (! found) {
740 /* A dead loop not reachable from Start. */
741 for (i = tos-1; i > 0;) {
742 m = stack[--i];
743 if (is_endless_head(m, n)) {
744 found = smallest_dfn_pred(m, get_irg_dfn(m) + 1, &res_index);
745 if (!found) /* no smallest dfn pred found. */
746 found = largest_dfn_pred(m, &res_index);
747 break;
748 }
749 /* It's not an unreachable loop, either. */
750 if (m == n)
751 break;
752 }
753 //assert(0 && "no head found on stack");
754 }
755
756 }
757 assert(found);
758
759 set_irg_callee_backedge(m, res_index);
760 return get_irg_callee(m, res_index);
761 }
762
cgscc(ir_graph * n)763 static void cgscc(ir_graph *n)
764 {
765 size_t i, n_callees;
766
767 if (cg_irg_visited(n)) return;
768 mark_cg_irg_visited(n);
769
770 /* Initialize the node */
771 set_irg_dfn(n, current_dfn); /* Depth first number for this node */
772 set_irg_uplink(n, current_dfn); /* ... is default uplink. */
773 ++current_dfn;
774 push(n);
775
776 n_callees = get_irg_n_callees(n);
777 for (i = 0; i < n_callees; ++i) {
778 ir_graph *m;
779 if (is_irg_callee_backedge(n, i)) continue;
780 m = get_irg_callee(n, i);
781
782 /** This marks the backedge, but does it guarantee a correct loop tree? */
783 //if (m == n) { set_irg_callee_backedge(n, i); continue; }
784
785 cgscc(m);
786 if (irg_is_in_stack(m)) {
787 /* Uplink of m is smaller if n->m is a backedge.
788 Propagate the uplink to mark the cfloop. */
789 if (get_irg_uplink(m) < get_irg_uplink(n))
790 set_irg_uplink(n, get_irg_uplink(m));
791 }
792 }
793
794 if (get_irg_dfn(n) == get_irg_uplink(n)) {
795 /* This condition holds for
796 1) the node with the incoming backedge.
797 That is: We found a cfloop!
798 2) Straight line code, because no uplink has been propagated, so the
799 uplink still is the same as the dfn.
800
801 But n might not be a proper cfloop head for the analysis. Proper cfloop
802 heads are Block and Phi nodes. find_tail searches the stack for
803 Block's and Phi's and takes those nodes as cfloop heads for the current
804 cfloop instead and marks the incoming edge as backedge. */
805
806 ir_graph *tail = find_tail(n);
807 if (tail) {
808 /* We have a cfloop, that is no straight line code,
809 because we found a cfloop head!
810 Next actions: Open a new cfloop on the cfloop tree and
811 try to find inner cfloops */
812
813
814 ir_loop *l = new_loop();
815
816 /* Remove the cfloop from the stack ... */
817 pop_scc_unmark_visit(n);
818
819 /* The current backedge has been marked, that is temporarily eliminated,
820 by find tail. Start the scc algorithm
821 anew on the subgraph thats left (the current cfloop without the backedge)
822 in order to find more inner cfloops. */
823
824 cgscc(tail);
825
826 assert(cg_irg_visited(n));
827 close_loop(l);
828 } else {
829 pop_scc_to_loop(n);
830 }
831 }
832 }
833
834
835 /**
836 * reset the backedge information for all callers in all irgs
837 */
reset_isbe(void)838 static void reset_isbe(void)
839 {
840 size_t i, n_irgs = get_irp_n_irgs();
841
842 for (i = 0; i < n_irgs; ++i) {
843 ir_graph *irg = get_irp_irg(i);
844
845 if (irg->caller_isbe)
846 xfree(irg->caller_isbe);
847 irg->caller_isbe = NULL;
848
849 if (irg->callee_isbe)
850 xfree(irg->callee_isbe);
851 irg->callee_isbe = NULL;
852 }
853 }
854
find_callgraph_recursions(void)855 void find_callgraph_recursions(void)
856 {
857 size_t i, n_irgs;
858 struct obstack temp;
859
860 reset_isbe();
861
862 /* -- compute the looptree. -- */
863
864 /* The outermost graph. We start here. Then we start at all
865 functions in irg list that are never called, then at the remaining
866 unvisited ones. The third step is needed for functions that are not
867 reachable from the outermost graph, but call themselves in a cycle. */
868 assert(get_irp_main_irg());
869 outermost_ir_graph = get_irp_main_irg();
870 obstack_init(&temp);
871 init_scc(&temp);
872
873 current_loop = NULL;
874 new_loop(); /* sets current_loop */
875
876 ++master_cg_visited;
877 cgscc(outermost_ir_graph);
878 n_irgs = get_irp_n_irgs();
879 for (i = 0; i < n_irgs; ++i) {
880 ir_graph *irg = get_irp_irg(i);
881 if (!cg_irg_visited(irg) && get_irg_n_callers(irg) == 0)
882 cgscc(irg);
883 }
884 for (i = 0; i < n_irgs; ++i) {
885 ir_graph *irg = get_irp_irg(i);
886 if (!cg_irg_visited(irg))
887 cgscc(irg);
888 }
889 obstack_free(&temp, NULL);
890
891 irp->outermost_cg_loop = current_loop;
892 mature_loops(current_loop, outermost_ir_graph->obst);
893
894 /* -- Reverse the backedge information. -- */
895 for (i = 0; i < n_irgs; ++i) {
896 ir_graph *irg = get_irp_irg(i);
897 size_t j, n_callees = get_irg_n_callees(irg);
898 for (j = 0; j < n_callees; ++j) {
899 if (is_irg_callee_backedge(irg, j))
900 set_irg_caller_backedge(get_irg_callee(irg, j), irg);
901 }
902 }
903
904 irp->callgraph_state = irp_callgraph_and_calltree_consistent;
905 }
906
get_irg_loop_depth(const ir_graph * irg)907 size_t get_irg_loop_depth(const ir_graph *irg)
908 {
909 assert(irp->callgraph_state == irp_callgraph_consistent ||
910 irp->callgraph_state == irp_callgraph_and_calltree_consistent);
911 return irg->callgraph_loop_depth;
912 }
913
get_irg_recursion_depth(const ir_graph * irg)914 size_t get_irg_recursion_depth(const ir_graph *irg)
915 {
916 assert(irp->callgraph_state == irp_callgraph_and_calltree_consistent);
917 return irg->callgraph_recursion_depth;
918 }
919
analyse_loop_nesting_depth(void)920 void analyse_loop_nesting_depth(void)
921 {
922 /* establish preconditions. */
923 if (get_irp_callee_info_state() != irg_callee_info_consistent) {
924 ir_entity **free_methods = NULL;
925
926 cgana(&free_methods);
927 xfree(free_methods);
928 }
929
930 if (irp_callgraph_consistent != get_irp_callgraph_state()) {
931 compute_callgraph();
932 }
933
934 find_callgraph_recursions();
935
936 set_irp_loop_nesting_depth_state(loop_nesting_depth_consistent);
937 }
938
get_irp_loop_nesting_depth_state(void)939 loop_nesting_depth_state get_irp_loop_nesting_depth_state(void)
940 {
941 return irp->lnd_state;
942 }
set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)943 void set_irp_loop_nesting_depth_state(loop_nesting_depth_state s)
944 {
945 irp->lnd_state = s;
946 }
set_irp_loop_nesting_depth_state_inconsistent(void)947 void set_irp_loop_nesting_depth_state_inconsistent(void)
948 {
949 if (irp->lnd_state == loop_nesting_depth_consistent)
950 irp->lnd_state = loop_nesting_depth_inconsistent;
951 }
952