1 /*
2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
3 *
4 * This file is part of libFirm.
5 *
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
10 *
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
14 *
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE.
18 */
19
20 /**
21 * @file
22 * @brief Always available outs.
23 * @author Sebastian Hack, Michael Beck, Andreas Schoesser
24 * @date 14.1.2005
25 * @brief
26 * This are out-edges (also called def-use edges) that are dynamically
27 * updated as the graph changes.
28 */
29 #include "config.h"
30
31 #include "irnode_t.h"
32 #include "iropt_t.h"
33 #include "iredgekinds.h"
34 #include "iredges_t.h"
35 #include "irgwalk.h"
36 #include "irdump_t.h"
37 #include "irprintf.h"
38 #include "debug.h"
39 #include "set.h"
40 #include "bitset.h"
41 #include "error.h"
42 #include "irpass_t.h"
43
44 #include "iredgeset.h"
45 #include "hashptr.h"
46
47 #define DO_REHASH
48 #define SCALAR_RETURN
49 #define HashSet ir_edgeset_t
50 #define HashSetIterator ir_edgeset_iterator_t
51 #define ValueType ir_edge_t*
52 #define NullValue NULL
53 #define DeletedValue ((ir_edge_t*)-1)
54 #define Hash(this,key) (hash_ptr(key->src) ^ (key->pos * 40013))
55 #define KeysEqual(this,key1,key2) ((key1->src) == (key2->src) && (key1->pos == key2->pos))
56 #define SetRangeEmpty(ptr,size) memset(ptr, 0, (size) * sizeof((ptr)[0]))
57
58 #define hashset_init ir_edgeset_init
59 void ir_edgeset_init_size(ir_edgeset_t *self, size_t size);
60 #define hashset_init_size ir_edgeset_init_size
61 #define hashset_destroy ir_edgeset_destroy
62 #define hashset_insert ir_edgeset_insert
63 #define hashset_remove ir_edgeset_remove
64 ir_edge_t *ir_edgeset_find(const ir_edgeset_t *self, const ir_edge_t*);
65 #define hashset_find ir_edgeset_find
66 size_t ir_edgeset_size(const ir_edgeset_t *self);
67 #define hashset_size ir_edgeset_size
68 #define hashset_iterator_init ir_edgeset_iterator_init
69 #define hashset_iterator_next ir_edgeset_iterator_next
70 #define hashset_remove_iterator ir_edgeset_remove_iterator
71
72 #include "hashset.c.inl"
73
74 /**
75 * A function that allows for setting an edge.
76 * This abstraction is necessary since different edge kind have
77 * different methods of setting edges.
78 */
79 typedef void (set_edge_func_t)(ir_node *src, int pos, ir_node *tgt);
80
81 /**
82 * A function that returns the "arity" of a given edge kind
83 * for a node.
84 */
85 typedef int (get_edge_src_arity_func_t)(const ir_node *src);
86
87 /**
88 * A function that returns the pos'th edge of a given edge kind for a node.
89 */
90 typedef ir_node *(get_edge_src_n_func_t)(const ir_node *src, int pos);
91
92 /**
93 * Additional data for an edge kind.
94 */
95 typedef struct {
96 const char *name; /**< name of this edge kind */
97 set_edge_func_t *set_edge; /**< the set_edge function */
98 int first_idx; /**< index of the first possible edge */
99 get_edge_src_arity_func_t *get_arity; /**< the get_arity function */
100 get_edge_src_n_func_t *get_n; /**< the get_n function */
101 } ir_edge_kind_info_t;
102
103 /**
104 * Get the predecessor block.
105 */
get_block_n(const ir_node * block,int pos)106 static ir_node *get_block_n(const ir_node *block, int pos)
107 {
108 if (is_Block(block))
109 return get_Block_cfgpred_block(block, pos);
110 /* might be a Bad */
111 return NULL;
112 }
113
get_irn_safe_n(const ir_node * node,int n)114 static ir_node *get_irn_safe_n(const ir_node *node, int n)
115 {
116 if (n == -1 && is_Block(node))
117 return NULL;
118 return get_irn_n(node, n);
119 }
120
121 static const ir_edge_kind_info_t edge_kind_info[EDGE_KIND_LAST] = {
122 { "normal" , set_irn_n, -1, get_irn_arity, get_irn_safe_n },
123 { "block succs", NULL, 0, get_irn_arity, get_block_n },
124 { "dependency", set_irn_dep, 0, get_irn_deps, get_irn_dep }
125 };
126
127 #define foreach_tgt(irn, i, n, kind) for (i = edge_kind_info[kind].first_idx, n = edge_kind_info[kind].get_arity(irn); i < n; ++i)
128 #define get_n(irn, pos, kind) (edge_kind_info[kind].get_n(irn, pos))
129 #define get_kind_str(kind) (edge_kind_info[kind].name)
130
131 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
132
133 /**
134 * This flag is set to 1, if the edges get initialized for an irg.
135 * Then register additional data is forbidden.
136 */
137 static int edges_used = 0;
138
139 /**
140 * Summed size of all users private data
141 */
142
143 static size_t edges_private_size = 0;
144 #define EDGE_SIZE (sizeof(ir_edge_t) + edges_private_size)
145
146 /**
147 * If set to 1, the list heads are checked every time an edge is changed.
148 */
149 static int edges_dbg = 0;
150
151 /**
152 * Returns an ID for the given edge.
153 */
edge_get_id(const ir_edge_t * e)154 static inline long edge_get_id(const ir_edge_t *e)
155 {
156 return (long)e;
157 }
158
edges_register_private_data(size_t n)159 size_t edges_register_private_data(size_t n)
160 {
161 size_t res = edges_private_size;
162
163 assert(!edges_used && "you cannot register private edge data, if edges have been initialized");
164
165 edges_private_size += n;
166 return res;
167 }
168
edges_reset_private_data(ir_graph * irg,int offset,unsigned size)169 void edges_reset_private_data(ir_graph *irg, int offset, unsigned size)
170 {
171 irg_edge_info_t *info = get_irg_edge_info(irg, EDGE_KIND_NORMAL);
172 ir_edge_t *edge;
173 ir_edgeset_iterator_t iter;
174
175 foreach_ir_edgeset(&info->edges, edge, iter) {
176 memset(edge + sizeof(*edge) + offset, 0, size);
177 }
178 }
179
180 #define TIMES37(x) (((x) << 5) + ((x) << 2) + (x))
181
182 #define get_irn_out_list_head(irn) (&get_irn_out_info(irn)->outs)
183
184 #define edge_hash(edge) (TIMES37((edge)->pos) + hash_ptr((edge)->src))
185
edges_init_graph_kind(ir_graph * irg,ir_edge_kind_t kind)186 void edges_init_graph_kind(ir_graph *irg, ir_edge_kind_t kind)
187 {
188 if (edges_activated_kind(irg, kind)) {
189 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
190 size_t amount = irg->estimated_node_count * 2;
191
192 edges_used = 1;
193 if (info->allocated) {
194 amount = ir_edgeset_size(&info->edges);
195 ir_edgeset_destroy(&info->edges);
196 obstack_free(&info->edges_obst, NULL);
197 }
198 obstack_init(&info->edges_obst);
199 INIT_LIST_HEAD(&info->free_edges);
200 ir_edgeset_init_size(&info->edges, amount);
201 info->allocated = 1;
202 }
203 }
204
get_irn_edge_kind(const ir_node * src,int pos,ir_edge_kind_t kind)205 const ir_edge_t *get_irn_edge_kind(const ir_node *src, int pos, ir_edge_kind_t kind)
206 {
207 ir_graph *irg = get_irn_irg(src);
208 if (edges_activated_kind(irg, kind)) {
209 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
210 ir_edge_t key;
211
212 key.src = (ir_node *)src;
213 key.pos = pos;
214
215 return ir_edgeset_find(&info->edges, &key);
216 }
217
218 return NULL;
219 }
220
221 /**
222 * Change the out count
223 *
224 * @param tgt the edge target
225 * @param kind the kind of the edge
226 */
edge_change_cnt(ir_node * tgt,ir_edge_kind_t kind,int ofs)227 static inline void edge_change_cnt(ir_node *tgt, ir_edge_kind_t kind, int ofs)
228 {
229 irn_edge_info_t *info = get_irn_edge_info(tgt, kind);
230 info->out_count += ofs;
231 }
232
233 /**
234 * Verify the edge list of a node, ie. ensure it's a loop:
235 * head -> e_1 -> ... -> e_n -> head
236 */
verify_list_head(ir_node * irn,ir_edge_kind_t kind)237 static inline void verify_list_head(ir_node *irn, ir_edge_kind_t kind)
238 {
239 int err = 0;
240 int num = 0;
241 pset *lh_set = pset_new_ptr(16);
242 const struct list_head *head = &get_irn_edge_info(irn, kind)->outs_head;
243 const struct list_head *pos;
244
245 list_for_each(pos, head) {
246 if (pset_find_ptr(lh_set, pos)) {
247 const ir_edge_t *edge = list_entry(pos, ir_edge_t, list);
248
249 ir_fprintf(stderr, "EDGE Verifier: edge list broken (self loop not to head) for %+F:\n", irn);
250 fprintf(stderr, "- at list entry %d\n", num);
251 if (edge->invalid)
252 fprintf(stderr, "- edge(%ld) is invalid\n", edge_get_id(edge));
253 if (edge->src)
254 ir_fprintf(stderr, "- edge(%ld) %+F(%d)\n", edge_get_id(edge), edge->src, edge->pos);
255 err = 1;
256 break;
257 }
258 num++;
259 pset_insert_ptr(lh_set, pos);
260 }
261
262 del_pset(lh_set);
263
264 assert(err == 0);
265 }
266
edges_dump_kind(ir_graph * irg,ir_edge_kind_t kind)267 void edges_dump_kind(ir_graph *irg, ir_edge_kind_t kind)
268 {
269 irg_edge_info_t *info;
270 ir_edgeset_t *edges;
271 ir_edgeset_iterator_t iter;
272 ir_edge_t *e;
273
274 if (!edges_activated_kind(irg, kind))
275 return;
276
277 info = get_irg_edge_info(irg, kind);
278 edges = &info->edges;
279 foreach_ir_edgeset(edges, e, iter) {
280 ir_printf("%+F %d %d\n", e->src, e->pos, e->invalid);
281 }
282 }
283
edges_notify_edge_kind(ir_node * src,int pos,ir_node * tgt,ir_node * old_tgt,ir_edge_kind_t kind,ir_graph * irg)284 void edges_notify_edge_kind(ir_node *src, int pos, ir_node *tgt,
285 ir_node *old_tgt, ir_edge_kind_t kind,
286 ir_graph *irg)
287 {
288 const char *msg = "";
289 irg_edge_info_t *info;
290 ir_edgeset_t *edges;
291 ir_edge_t templ;
292
293 assert(edges_activated_kind(irg, kind));
294
295 /*
296 * Only do something, if the old and new target differ.
297 */
298 if (tgt == old_tgt)
299 return;
300
301 info = get_irg_edge_info(irg, kind);
302 edges = &info->edges;
303
304 /* Initialize the edge template to search in the set. */
305 templ.src = src;
306 templ.pos = pos;
307
308 /*
309 * If the target is NULL, the edge shall be deleted.
310 */
311 if (tgt == NULL) {
312 /* search the edge in the set. */
313 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
314
315 /* mark the edge invalid if it was found */
316 if (edge) {
317 msg = "deleting";
318 list_del(&edge->list);
319 ir_edgeset_remove(edges, edge);
320 list_add(&edge->list, &info->free_edges);
321 edge->invalid = 1;
322 edge->pos = -2;
323 edge->src = NULL;
324 edge_change_cnt(old_tgt, kind, -1);
325 } else {
326 /* If the edge was not found issue a warning on the debug stream */
327 msg = "edge to delete not found!\n";
328 }
329 } else {
330 /*
331 * The target is not NULL and the old target differs
332 * from the new target, the edge shall be moved (if the
333 * old target was != NULL) or added (if the old target was
334 * NULL).
335 */
336 struct list_head *head = &get_irn_edge_info(tgt, kind)->outs_head;
337
338 assert(head->next && head->prev &&
339 "target list head must have been initialized");
340
341 /* If the old target is not null, the edge is moved. */
342 if (old_tgt) {
343 ir_edge_t *edge = ir_edgeset_find(edges, &templ);
344 assert(edge && "edge to redirect not found!");
345 assert(! edge->invalid && "Invalid edge encountered");
346
347 msg = "redirecting";
348
349 list_move(&edge->list, head);
350 edge_change_cnt(old_tgt, kind, -1);
351 } else {
352 /* The old target was NULL, thus, the edge is newly created. */
353 ir_edge_t *new_edge;
354 ir_edge_t *edge;
355
356 if (list_empty(&info->free_edges)) {
357 edge = (ir_edge_t*)obstack_alloc(&info->edges_obst, EDGE_SIZE);
358 } else {
359 edge = list_entry(info->free_edges.next, ir_edge_t, list);
360 list_del(&edge->list);
361 }
362
363 edge->src = src;
364 edge->pos = pos;
365 edge->invalid = 0;
366 edge->present = 0;
367 edge->kind = kind;
368 edge->list.next = NULL;
369 edge->list.prev = NULL;
370 memset(edge + 1, 0, edges_private_size);
371
372 new_edge = ir_edgeset_insert(edges, edge);
373 if (new_edge != edge) {
374 panic("new edge exists already");
375 }
376
377 msg = "adding";
378 list_add(&edge->list, head);
379 }
380
381 edge_change_cnt(tgt, kind, +1);
382 }
383
384 #ifndef DEBUG_libfirm
385 /* verify list heads */
386 if (edges_dbg) {
387 if (tgt)
388 verify_list_head(tgt, kind);
389 if (old_tgt)
390 verify_list_head(old_tgt, kind);
391 }
392 #endif
393
394 DBG((dbg, LEVEL_5, "announce out edge: %+F %d-> %+F(%+F): %s\n", src, pos, tgt, old_tgt, msg));
395 }
396
edges_notify_edge(ir_node * src,int pos,ir_node * tgt,ir_node * old_tgt,ir_graph * irg)397 void edges_notify_edge(ir_node *src, int pos, ir_node *tgt, ir_node *old_tgt,
398 ir_graph *irg)
399 {
400 if (edges_activated_kind(irg, EDGE_KIND_NORMAL)) {
401 edges_notify_edge_kind(src, pos, tgt, old_tgt, EDGE_KIND_NORMAL, irg);
402 }
403
404 if (edges_activated_kind(irg, EDGE_KIND_BLOCK)) {
405 if (is_Block(src)) {
406 ir_node *bl_old = old_tgt ? get_nodes_block(old_tgt) : NULL;
407 ir_node *bl_tgt = NULL;
408
409 if (tgt)
410 bl_tgt = is_Bad(tgt) ? tgt : get_nodes_block(tgt);
411
412 edges_notify_edge_kind(src, pos, bl_tgt, bl_old, EDGE_KIND_BLOCK, irg);
413 } else if (get_irn_mode(src) == mode_X && old_tgt != NULL && is_Block(old_tgt)) {
414 /* moving a jump node from one block to another */
415 foreach_out_edge_kind_safe(old_tgt, edge, EDGE_KIND_BLOCK) {
416 ir_node *succ = get_edge_src_irn(edge);
417 int succ_pos = get_edge_src_pos(edge);
418 ir_node *block_pred = get_Block_cfgpred(succ, succ_pos);
419 if (block_pred != src)
420 continue;
421 edges_notify_edge_kind(succ, succ_pos, tgt, old_tgt,
422 EDGE_KIND_BLOCK, irg);
423 }
424 }
425 }
426 }
427
428 /**
429 * Delete all in edges of a given kind from the node old.
430 *
431 * @param old the node
432 * @param kind the kind of edges to remove
433 * @param irg the irg of the old node
434 */
edges_node_deleted_kind(ir_node * old,ir_edge_kind_t kind)435 static void edges_node_deleted_kind(ir_node *old, ir_edge_kind_t kind)
436 {
437 int i, n;
438 ir_graph *irg = get_irn_irg(old);
439
440 if (!edges_activated_kind(irg, kind))
441 return;
442
443 DBG((dbg, LEVEL_5, "node deleted (kind: %s): %+F\n", get_kind_str(kind), old));
444
445 foreach_tgt(old, i, n, kind) {
446 ir_node *old_tgt = get_n(old, i, kind);
447 edges_notify_edge_kind(old, i, NULL, old_tgt, kind, irg);
448 }
449 }
450
451 /**
452 * A node might be revivaled by CSE. Assure its edges.
453 *
454 * @param irn the node
455 * @param kind the kind of edges to remove
456 * @param irg the irg of the old node
457 */
edges_node_revival_kind(ir_node * irn,ir_edge_kind_t kind)458 static void edges_node_revival_kind(ir_node *irn, ir_edge_kind_t kind)
459 {
460 irn_edge_info_t *info;
461 int i, n;
462 ir_graph *irg = get_irn_irg(irn);
463
464 if (!edges_activated_kind(irg, kind))
465 return;
466
467 info = get_irn_edge_info(irn, kind);
468 if (info->edges_built)
469 return;
470
471 DBG((dbg, LEVEL_5, "node revivaled (kind: %s): %+F\n", get_kind_str(kind), irn));
472
473 foreach_tgt(irn, i, n, kind) {
474 ir_node *tgt = get_n(irn, i, kind);
475 edges_notify_edge_kind(irn, i, tgt, NULL, kind, irg);
476 }
477 info->edges_built = 1;
478 }
479
480 typedef struct build_walker {
481 ir_edge_kind_t kind;
482 bitset_t *reachable;
483 unsigned problem_found;
484 } build_walker;
485
486 /**
487 * Post-Walker: notify all edges
488 */
build_edges_walker(ir_node * irn,void * data)489 static void build_edges_walker(ir_node *irn, void *data)
490 {
491 build_walker *w = (build_walker*)data;
492 int i, n;
493 ir_edge_kind_t kind = w->kind;
494 ir_graph *irg = get_irn_irg(irn);
495
496 foreach_tgt(irn, i, n, kind) {
497 ir_node *pred = get_n(irn, i, kind);
498 edges_notify_edge_kind(irn, i, pred, NULL, kind, irg);
499 }
500 get_irn_edge_info(irn, kind)->edges_built = 1;
501 }
502
503 /**
504 * Pre-Walker: initializes the list-heads and set the out-count
505 * of all nodes to 0.
506 */
init_lh_walker(ir_node * irn,void * data)507 static void init_lh_walker(ir_node *irn, void *data)
508 {
509 build_walker *w = (build_walker*)data;
510 ir_edge_kind_t kind = w->kind;
511 list_head *head = &get_irn_edge_info(irn, kind)->outs_head;
512 INIT_LIST_HEAD(head);
513 get_irn_edge_info(irn, kind)->edges_built = 0;
514 get_irn_edge_info(irn, kind)->out_count = 0;
515 }
516
517 /**
518 * Pre-Walker: initializes the list-heads and set the out-count
519 * of all nodes to 0.
520 *
521 * Additionally touches DEP nodes, as they might be DEAD.
522 * THIS IS UGLY, but I don't find a better way until we
523 *
524 * a) ensure that dead nodes are not used as input
525 * b) it might be sufficient to add those stupid NO_REG nodes
526 * to the anchor
527 */
init_lh_walker_dep(ir_node * irn,void * data)528 static void init_lh_walker_dep(ir_node *irn, void *data)
529 {
530 build_walker *w = (build_walker*)data;
531 ir_edge_kind_t kind = w->kind;
532 list_head *head = &get_irn_edge_info(irn, kind)->outs_head;
533 int i;
534
535 INIT_LIST_HEAD(head);
536 get_irn_edge_info(irn, kind)->edges_built = 0;
537 get_irn_edge_info(irn, kind)->out_count = 0;
538
539 for (i = get_irn_deps(irn) - 1; i >= 0; --i) {
540 ir_node *dep = get_irn_dep(irn, i);
541
542 head = &get_irn_edge_info(dep, kind)->outs_head;
543
544 INIT_LIST_HEAD(head);
545 get_irn_edge_info(dep, kind)->edges_built = 0;
546 get_irn_edge_info(dep, kind)->out_count = 0;
547 }
548 }
549
550 typedef struct visitor_info_t {
551 irg_walk_func *visit;
552 void *data;
553 } visitor_info_t;
554
555 /**
556 * Visitor: initializes the list-heads and set the out-count
557 * of all nodes to 0 of nodes that are not seen so far.
558 */
visitor(ir_node * irn,void * data)559 static void visitor(ir_node *irn, void *data)
560 {
561 visitor_info_t *info = (visitor_info_t*)data;
562
563 if (is_Deleted(irn))
564 return;
565 if (!is_Block(irn) && is_Deleted(get_nodes_block(irn)))
566 return;
567
568 if (!irn_visited_else_mark(irn)) {
569 info->visit(irn, info->data);
570 }
571 }
572
edges_activate_kind(ir_graph * irg,ir_edge_kind_t kind)573 void edges_activate_kind(ir_graph *irg, ir_edge_kind_t kind)
574 {
575 /*
576 * Build the initial edge set.
577 * Beware, this is not a simple task because it suffers from two
578 * difficulties:
579 * - the anchor set allows access to Nodes that may not be reachable from
580 * the End node
581 * - the identities add nodes to the "root set" that are not yet reachable
582 * from End. However, after some transformations, the CSE may revival these
583 * nodes
584 *
585 * These problems can be fixed using different strategies:
586 * - Add an age flag to every node. Whenever the edge of a node is older
587 * then the current edge, invalidate the edges of this node.
588 * While this would help for revivaled nodes, it increases memory and runtime.
589 * - Delete the identities set.
590 * Solves the revival problem, but may increase the memory consumption, as
591 * nodes cannot be revivaled at all.
592 * - Manually iterate over the identities root set. This did not consume more memory
593 * but increase the computation time because the |identities| >= |V|
594 *
595 * Currently, we use the last option.
596 */
597 struct build_walker w;
598 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
599 visitor_info_t visit;
600
601 w.kind = kind;
602
603 visit.data = &w;
604
605 assert(!info->activated);
606
607 info->activated = 1;
608 edges_init_graph_kind(irg, kind);
609 if (kind == EDGE_KIND_DEP) {
610 irg_walk_anchors(irg, init_lh_walker_dep, NULL, &w);
611 /* Argh: Dep nodes might be dead, so we MUST visit identities first */
612 visit.visit = init_lh_walker_dep;
613 visit_all_identities(irg, visitor, &visit);
614 irg_walk_anchors(irg, NULL, build_edges_walker, &w);
615 } else {
616 visit.visit = init_lh_walker;
617 visit_all_identities(irg, visitor, &visit);
618 irg_walk_anchors(irg, init_lh_walker, build_edges_walker, &w);
619 }
620 }
621
edges_deactivate_kind(ir_graph * irg,ir_edge_kind_t kind)622 void edges_deactivate_kind(ir_graph *irg, ir_edge_kind_t kind)
623 {
624 irg_edge_info_t *info = get_irg_edge_info(irg, kind);
625
626 info->activated = 0;
627 if (info->allocated) {
628 obstack_free(&info->edges_obst, NULL);
629 ir_edgeset_destroy(&info->edges);
630 info->allocated = 0;
631 }
632 clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
633 }
634
635 int (edges_activated_kind)(const ir_graph *irg, ir_edge_kind_t kind)
636 {
637 return edges_activated_kind_(irg, kind);
638 }
639
edges_reroute_kind(ir_node * from,ir_node * to,ir_edge_kind_t kind)640 void edges_reroute_kind(ir_node *from, ir_node *to, ir_edge_kind_t kind)
641 {
642 ir_graph *irg = get_irn_irg(from);
643 set_edge_func_t *set_edge = edge_kind_info[kind].set_edge;
644
645 if (set_edge && edges_activated_kind(irg, kind)) {
646 struct list_head *head = &get_irn_edge_info(from, kind)->outs_head;
647
648 DBG((dbg, LEVEL_5, "reroute from %+F to %+F\n", from, to));
649
650 while (head != head->next) {
651 ir_edge_t *edge = list_entry(head->next, ir_edge_t, list);
652 assert(edge->pos >= -1);
653 set_edge(edge->src, edge->pos, to);
654 }
655 }
656 }
657
edges_reroute_except(ir_node * from,ir_node * to,ir_node * exception)658 void edges_reroute_except(ir_node *from, ir_node *to, ir_node *exception)
659 {
660 foreach_out_edge_safe(from, edge) {
661 ir_node *src = get_edge_src_irn(edge);
662 if (src == exception)
663 continue;
664 set_irn_n(src, edge->pos, to);
665 }
666 }
667
verify_set_presence(ir_node * irn,void * data)668 static void verify_set_presence(ir_node *irn, void *data)
669 {
670 build_walker *w = (build_walker*)data;
671 ir_graph *irg = get_irn_irg(irn);
672 ir_edgeset_t *edges = &get_irg_edge_info(irg, w->kind)->edges;
673 int i, n;
674
675 foreach_tgt(irn, i, n, w->kind) {
676 ir_edge_t templ, *e;
677
678 templ.src = irn;
679 templ.pos = i;
680
681 e = ir_edgeset_find(edges, &templ);
682 if (e != NULL) {
683 e->present = 1;
684 } else {
685 w->problem_found = 1;
686 }
687 }
688 }
689
verify_list_presence(ir_node * irn,void * data)690 static void verify_list_presence(ir_node *irn, void *data)
691 {
692 build_walker *w = (build_walker*)data;
693
694 bitset_set(w->reachable, get_irn_idx(irn));
695
696 /* check list heads */
697 verify_list_head(irn, w->kind);
698
699 foreach_out_edge_kind(irn, e, w->kind) {
700 ir_node *tgt;
701
702 if (w->kind == EDGE_KIND_NORMAL && get_irn_arity(e->src) <= e->pos) {
703 w->problem_found = 1;
704 continue;
705 }
706
707 tgt = get_n(e->src, e->pos, w->kind);
708
709 if (irn != tgt) {
710 w->problem_found = 1;
711 }
712 }
713 }
714
edges_verify_kind(ir_graph * irg,ir_edge_kind_t kind)715 int edges_verify_kind(ir_graph *irg, ir_edge_kind_t kind)
716 {
717 struct build_walker w;
718 ir_edgeset_t *edges = &get_irg_edge_info(irg, kind)->edges;
719 ir_edge_t *e;
720 ir_edgeset_iterator_t iter;
721
722 w.kind = kind;
723 w.reachable = bitset_alloca(get_irg_last_idx(irg));
724 w.problem_found = 0;
725
726 /* Clear the present bit in all edges available. */
727 foreach_ir_edgeset(edges, e, iter) {
728 e->present = 0;
729 }
730
731 irg_walk_graph(irg, verify_set_presence, verify_list_presence, &w);
732
733 /*
734 * Dump all edges which are not invalid and not present.
735 * These edges are superfluous and their presence in the
736 * edge set is wrong.
737 */
738 foreach_ir_edgeset(edges, e, iter) {
739 if (! e->invalid && ! e->present && bitset_is_set(w.reachable, get_irn_idx(e->src))) {
740 w.problem_found = 1;
741 ir_fprintf(stderr, "Edge Verifier: edge(%ld) %+F,%d is superfluous\n", edge_get_id(e), e->src, e->pos);
742 }
743 }
744
745 return w.problem_found;
746 }
747
748 #define IGNORE_NODE(irn) (is_Bad((irn)) || is_Block((irn)))
749
750 /**
751 * Clear link field of all nodes.
752 */
clear_links(ir_node * irn,void * env)753 static void clear_links(ir_node *irn, void *env)
754 {
755 bitset_t *bs;
756 ir_graph *irg;
757 (void) env;
758
759 if (IGNORE_NODE(irn)) {
760 set_irn_link(irn, NULL);
761 return;
762 }
763
764 irg = get_irn_irg(irn);
765 bs = bitset_malloc(get_irg_last_idx(irg));
766 set_irn_link(irn, bs);
767 }
768
769 /**
770 * Increases count (stored in link field) for all operands of a node.
771 */
count_user(ir_node * irn,void * env)772 static void count_user(ir_node *irn, void *env)
773 {
774 int i;
775 int first;
776 (void) env;
777
778 first = is_Block(irn) ? 0 : -1;
779 for (i = get_irn_arity(irn) - 1; i >= first; --i) {
780 ir_node *op = get_irn_n(irn, i);
781 bitset_t *bs = (bitset_t*)get_irn_link(op);
782
783 if (bs)
784 bitset_set(bs, get_irn_idx(irn));
785 }
786 }
787
788 /**
789 * Verifies if collected count, number of edges in list and stored edge count are in sync.
790 */
verify_edge_counter(ir_node * irn,void * env)791 static void verify_edge_counter(ir_node *irn, void *env)
792 {
793 build_walker *w = (build_walker*)env;
794 if (IGNORE_NODE(irn))
795 return;
796
797 bitset_t *bs = (bitset_t*)get_irn_link(irn);
798 int list_cnt = 0;
799 int edge_cnt = get_irn_edge_info(irn, EDGE_KIND_NORMAL)->out_count;
800 const struct list_head *head
801 = &get_irn_edge_info(irn, EDGE_KIND_NORMAL)->outs_head;
802
803 /* We can iterate safely here, list heads have already been verified. */
804 const struct list_head *pos;
805 list_for_each(pos, head) {
806 ++list_cnt;
807 }
808
809 /* check all nodes that reference us and count edges that point number
810 * of ins that actually point to us */
811 ir_graph *irg = get_irn_irg(irn);
812 int ref_cnt = 0;
813 bitset_foreach(bs, idx) {
814 int i, arity;
815 ir_node *src = get_idx_irn(irg, idx);
816
817 arity = get_irn_arity(src);
818 for (i = 0; i < arity; ++i) {
819 ir_node *in = get_irn_n(src, i);
820 if (in == irn)
821 ++ref_cnt;
822 }
823 }
824
825 if (edge_cnt != list_cnt) {
826 w->problem_found = 1;
827 ir_fprintf(stderr, "Edge Verifier: edge count is %d, but %d edge(s) are recorded in list at %+F\n",
828 edge_cnt, list_cnt, irn);
829 }
830
831 if (ref_cnt != list_cnt) {
832 w->problem_found = 1;
833 ir_fprintf(stderr, "Edge Verifier: %+F reachable by %d node(s), but the list contains %d edge(s)\n",
834 irn, ref_cnt, list_cnt);
835 }
836
837 bitset_free(bs);
838 }
839
edges_verify(ir_graph * irg)840 int edges_verify(ir_graph *irg)
841 {
842 struct build_walker w;
843 int problem_found = 0;
844
845 /* verify normal edges only */
846 problem_found = edges_verify_kind(irg, EDGE_KIND_NORMAL);
847
848 w.kind = EDGE_KIND_NORMAL;
849 w.problem_found = 0;
850
851 /* verify counter */
852 irg_walk_anchors(irg, clear_links, count_user, &w);
853 irg_walk_anchors(irg, NULL, verify_edge_counter, &w);
854
855 return problem_found ? 1 : w.problem_found;
856 }
857
858 typedef struct pass_t {
859 ir_graph_pass_t pass;
860 unsigned assert_on_problem;
861 } pass_t;
862
863 /**
864 * Wrapper to edges_verify to be run as an ir_graph pass.
865 */
edges_verify_wrapper(ir_graph * irg,void * context)866 static int edges_verify_wrapper(ir_graph *irg, void *context)
867 {
868 pass_t *pass = (pass_t*)context;
869 int problems_found = edges_verify(irg);
870 /* do NOT rerun the pass if verify is ok :-) */
871 assert(problems_found && pass->assert_on_problem);
872 return 0;
873 }
874
irg_verify_edges_pass(const char * name,unsigned assert_on_problem)875 ir_graph_pass_t *irg_verify_edges_pass(const char *name, unsigned assert_on_problem)
876 {
877 pass_t *pass = XMALLOCZ(pass_t);
878
879 def_graph_pass_constructor(
880 &pass->pass, name ? name : "edges_verify", edges_verify_wrapper);
881
882 /* neither dump nor verify */
883 pass->pass.dump_irg = (DUMP_ON_IRG_FUNC)ir_prog_no_dump;
884 pass->pass.verify_irg = (RUN_ON_IRG_FUNC)ir_prog_no_verify;
885
886 pass->assert_on_problem = assert_on_problem;
887 return &pass->pass;
888 }
889
init_edges(void)890 void init_edges(void)
891 {
892 FIRM_DBG_REGISTER(dbg, DBG_EDGES);
893 }
894
edges_init_dbg(int do_dbg)895 void edges_init_dbg(int do_dbg)
896 {
897 edges_dbg = do_dbg;
898 }
899
edges_activate(ir_graph * irg)900 void edges_activate(ir_graph *irg)
901 {
902 edges_activate_kind(irg, EDGE_KIND_NORMAL);
903 edges_activate_kind(irg, EDGE_KIND_BLOCK);
904 edges_activate_kind(irg, EDGE_KIND_DEP);
905 }
906
edges_deactivate(ir_graph * irg)907 void edges_deactivate(ir_graph *irg)
908 {
909 edges_deactivate_kind(irg, EDGE_KIND_DEP);
910 edges_deactivate_kind(irg, EDGE_KIND_BLOCK);
911 edges_deactivate_kind(irg, EDGE_KIND_NORMAL);
912 }
913
assure_edges(ir_graph * irg)914 void assure_edges(ir_graph *irg)
915 {
916 assure_edges_kind(irg, EDGE_KIND_BLOCK);
917 assure_edges_kind(irg, EDGE_KIND_NORMAL);
918 assure_edges_kind(irg, EDGE_KIND_DEP);
919 add_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUT_EDGES);
920 }
921
assure_edges_kind(ir_graph * irg,ir_edge_kind_t kind)922 void assure_edges_kind(ir_graph *irg, ir_edge_kind_t kind)
923 {
924 if (!edges_activated_kind(irg, kind))
925 edges_activate_kind(irg, kind);
926 }
927
edges_node_deleted(ir_node * irn)928 void edges_node_deleted(ir_node *irn)
929 {
930 edges_node_deleted_kind(irn, EDGE_KIND_NORMAL);
931 edges_node_deleted_kind(irn, EDGE_KIND_BLOCK);
932 edges_node_deleted_kind(irn, EDGE_KIND_DEP);
933 }
934
edges_node_revival(ir_node * irn)935 void edges_node_revival(ir_node *irn)
936 {
937 edges_node_revival_kind(irn, EDGE_KIND_NORMAL);
938 edges_node_revival_kind(irn, EDGE_KIND_BLOCK);
939 }
940
941 const ir_edge_t *(get_irn_out_edge_first_kind)(const ir_node *irn, ir_edge_kind_t kind)
942 {
943 return get_irn_out_edge_first_kind_(irn, kind);
944 }
945
946 const ir_edge_t *(get_irn_out_edge_next)(const ir_node *irn, const ir_edge_t *last)
947 {
948 return get_irn_out_edge_next_(irn, last);
949 }
950
951 ir_node *(get_edge_src_irn)(const ir_edge_t *edge)
952 {
953 return get_edge_src_irn_(edge);
954 }
955
956 int (get_edge_src_pos)(const ir_edge_t *edge)
957 {
958 return get_edge_src_pos_(edge);
959 }
960
961 int (get_irn_n_edges_kind)(const ir_node *irn, ir_edge_kind_t kind)
962 {
963 return get_irn_n_edges_kind_(irn, kind);
964 }
965
irg_walk_edges2(ir_node * node,irg_walk_func * pre,irg_walk_func * post,void * env)966 static void irg_walk_edges2(ir_node *node, irg_walk_func *pre,
967 irg_walk_func *post, void *env)
968 {
969 if (irn_visited_else_mark(node))
970 return;
971
972 if (pre != NULL)
973 pre(node, env);
974
975 foreach_out_edge_kind_safe(node, edge, EDGE_KIND_NORMAL) {
976 /* find the corresponding successor block. */
977 ir_node *pred = get_edge_src_irn(edge);
978 irg_walk_edges2(pred, pre, post, env);
979 }
980
981 if (post != NULL)
982 post(node, env);
983 }
984
irg_walk_edges(ir_node * node,irg_walk_func * pre,irg_walk_func * post,void * env)985 void irg_walk_edges(ir_node *node, irg_walk_func *pre, irg_walk_func *post,
986 void *env)
987 {
988 ir_graph *irg = get_irn_irg(node);
989
990 assert(edges_activated(irg));
991 assert(is_Block(node));
992
993 ir_reserve_resources(irg, IR_RESOURCE_IRN_VISITED);
994
995 inc_irg_visited(irg);
996 irg_walk_edges2(node, pre, post, env);
997
998 ir_free_resources(irg, IR_RESOURCE_IRN_VISITED);
999 }
1000
irg_block_edges_walk2(ir_node * bl,irg_walk_func * pre,irg_walk_func * post,void * env)1001 static void irg_block_edges_walk2(ir_node *bl, irg_walk_func *pre,
1002 irg_walk_func *post, void *env)
1003 {
1004 if (!Block_block_visited(bl)) {
1005 mark_Block_block_visited(bl);
1006
1007 if (pre)
1008 pre(bl, env);
1009
1010 foreach_out_edge_kind_safe(bl, edge, EDGE_KIND_BLOCK) {
1011 /* find the corresponding successor block. */
1012 ir_node *pred = get_edge_src_irn(edge);
1013 irg_block_edges_walk2(pred, pre, post, env);
1014 }
1015
1016 if (post)
1017 post(bl, env);
1018 }
1019 }
1020
irg_block_edges_walk(ir_node * node,irg_walk_func * pre,irg_walk_func * post,void * env)1021 void irg_block_edges_walk(ir_node *node, irg_walk_func *pre,
1022 irg_walk_func *post, void *env)
1023 {
1024 ir_graph *irg = get_irn_irg(node);
1025
1026 assert(edges_activated(irg));
1027 assert(is_Block(node));
1028
1029 ir_reserve_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1030
1031 inc_irg_block_visited(irg);
1032 irg_block_edges_walk2(node, pre, post, env);
1033
1034 ir_free_resources(irg, IR_RESOURCE_BLOCK_VISITED);
1035 }
1036