1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief   Representation of an intermediate operation.
23  * @author  Martin Trapp, Christian Schaefer, Goetz Lindenmaier, Michael Beck
24  */
25 #include "config.h"
26 
27 #include <string.h>
28 
29 #include "pset_new.h"
30 #include "ident.h"
31 #include "irnode_t.h"
32 #include "irgraph_t.h"
33 #include "irmode_t.h"
34 #include "irbackedge_t.h"
35 #include "irdump.h"
36 #include "irop_t.h"
37 #include "irprog_t.h"
38 #include "iredgekinds.h"
39 #include "iredges_t.h"
40 #include "ircons.h"
41 #include "error.h"
42 
43 #include "irhooks.h"
44 #include "irtools.h"
45 #include "util.h"
46 
47 #include "beinfo.h"
48 
49 /* some constants fixing the positions of nodes predecessors
50    in the in array */
51 #define CALL_PARAM_OFFSET     (n_Call_max+1)
52 #define BUILTIN_PARAM_OFFSET  (n_Builtin_max+1)
53 #define ASM_PARAM_OFFSET      (n_ASM_max+1)
54 #define SEL_INDEX_OFFSET      (n_Sel_max+1)
55 #define RETURN_RESULT_OFFSET  (n_Return_max+1)
56 #define END_KEEPALIVE_OFFSET  0
57 
58 static const char *relation_names [] = {
59 	"false",
60 	"equal",
61 	"less",
62 	"less_equal",
63 	"greater",
64 	"greater_equal",
65 	"less_greater",
66 	"less_equal_greater",
67 	"unordered",
68 	"unordered_equal",
69 	"unordered_less",
70 	"unordered_less_equal",
71 	"unordered_greater",
72 	"unordered_greater_equal",
73 	"not_equal",
74 	"true"
75 };
76 
get_relation_string(ir_relation relation)77 const char *get_relation_string(ir_relation relation)
78 {
79 	assert(relation < (ir_relation)ARRAY_SIZE(relation_names));
80 	return relation_names[relation];
81 }
82 
get_negated_relation(ir_relation relation)83 ir_relation get_negated_relation(ir_relation relation)
84 {
85 	return relation ^ ir_relation_true;
86 }
87 
get_inversed_relation(ir_relation relation)88 ir_relation get_inversed_relation(ir_relation relation)
89 {
90 	ir_relation code    = relation & ~(ir_relation_less|ir_relation_greater);
91 	bool        less    = relation & ir_relation_less;
92 	bool        greater = relation & ir_relation_greater;
93 	code |= (less ? ir_relation_greater : ir_relation_false)
94 	      | (greater ? ir_relation_less : ir_relation_false);
95 	return code;
96 }
97 
new_ir_node(dbg_info * db,ir_graph * irg,ir_node * block,ir_op * op,ir_mode * mode,int arity,ir_node * const * in)98 ir_node *new_ir_node(dbg_info *db, ir_graph *irg, ir_node *block, ir_op *op,
99                      ir_mode *mode, int arity, ir_node *const *in)
100 {
101 	int i;
102 
103 	assert(irg);
104 	assert(op);
105 	assert(mode);
106 
107 	size_t   const node_size = offsetof(ir_node, attr) + op->attr_size;
108 	ir_node *const res       = (ir_node*)OALLOCNZ(irg->obst, char, node_size);
109 
110 	res->kind     = k_ir_node;
111 	res->op       = op;
112 	res->mode     = mode;
113 	res->visited  = 0;
114 	res->node_idx = irg_register_node_idx(irg, res);
115 	res->link     = NULL;
116 	res->deps     = NULL;
117 
118 	if (arity < 0) {
119 		res->in = NEW_ARR_F(ir_node *, 1);  /* 1: space for block */
120 	} else {
121 		/* not nice but necessary: End and Sync must always have a flexible array */
122 		if (op == op_End || op == op_Sync)
123 			res->in = NEW_ARR_F(ir_node *, (arity+1));
124 		else
125 			res->in = NEW_ARR_D(ir_node *, irg->obst, (arity+1));
126 		memcpy(&res->in[1], in, sizeof(ir_node *) * arity);
127 	}
128 
129 	res->in[0]   = block;
130 	set_irn_dbg_info(res, db);
131 	res->node_nr = get_irp_new_node_nr();
132 
133 	for (i = 0; i < EDGE_KIND_LAST; ++i) {
134 		INIT_LIST_HEAD(&res->edge_info[i].outs_head);
135 		/* edges will be build immediately */
136 		res->edge_info[i].edges_built = 1;
137 		res->edge_info[i].out_count = 0;
138 	}
139 
140 	/* don't put this into the for loop, arity is -1 for some nodes! */
141 	edges_notify_edge(res, -1, res->in[0], NULL, irg);
142 	for (i = 1; i <= arity; ++i)
143 		edges_notify_edge(res, i - 1, res->in[i], NULL, irg);
144 
145 	hook_new_node(irg, res);
146 	if (irg_is_constrained(irg, IR_GRAPH_CONSTRAINT_BACKEND)) {
147 		be_info_new_node(irg, res);
148 	}
149 
150 	return res;
151 }
152 
153 int (is_ir_node)(const void *thing)
154 {
155 	return is_ir_node_(thing);
156 }
157 
158 int (get_irn_arity)(const ir_node *node)
159 {
160 	return get_irn_arity_(node);
161 }
162 
get_irn_in(const ir_node * node)163 ir_node **get_irn_in(const ir_node *node)
164 {
165 	return node->in;
166 }
167 
set_irn_in(ir_node * node,int arity,ir_node ** in)168 void set_irn_in(ir_node *node, int arity, ir_node **in)
169 {
170 	int i;
171 	ir_node *** pOld_in;
172 	ir_graph *irg = get_irn_irg(node);
173 
174 	pOld_in = &node->in;
175 
176 #ifndef NDEBUG
177 	assert(node != NULL && node->kind == k_ir_node);
178 	assert(arity >= 0);
179 	for (i = 0; i < arity; ++i) {
180 		assert(in[i] != NULL && in[0]->kind == k_ir_node);
181 	}
182 #endif
183 
184 	for (i = 0; i < arity; i++) {
185 		if (i < (int)ARR_LEN(*pOld_in)-1)
186 			edges_notify_edge(node, i, in[i], (*pOld_in)[i+1], irg);
187 		else
188 			edges_notify_edge(node, i, in[i], NULL,            irg);
189 	}
190 	for (;i < (int)ARR_LEN(*pOld_in)-1; i++) {
191 		edges_notify_edge(node, i, NULL, (*pOld_in)[i+1], irg);
192 	}
193 
194 	if (arity != (int)ARR_LEN(*pOld_in) - 1) {
195 		ir_node * block = (*pOld_in)[0];
196 		*pOld_in = NEW_ARR_D(ir_node *, irg->obst, arity + 1);
197 		(*pOld_in)[0] = block;
198 	}
199 	fix_backedges(irg->obst, node);
200 
201 	memcpy((*pOld_in) + 1, in, sizeof(ir_node *) * arity);
202 
203 	/* update irg flags */
204 	clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
205 }
206 
207 ir_node *(get_irn_n)(const ir_node *node, int n)
208 {
209 	return get_irn_n_(node, n);
210 }
211 
set_irn_n(ir_node * node,int n,ir_node * in)212 void set_irn_n(ir_node *node, int n, ir_node *in)
213 {
214 	ir_graph *irg = get_irn_irg(node);
215 	assert(node && node->kind == k_ir_node);
216 	assert(-1 <= n);
217 	assert(n < get_irn_arity(node));
218 	assert(in && in->kind == k_ir_node);
219 
220 	/* Call the hook */
221 	hook_set_irn_n(node, n, in, node->in[n + 1]);
222 
223 	/* Here, we rely on src and tgt being in the current ir graph */
224 	edges_notify_edge(node, n, in, node->in[n + 1], irg);
225 
226 	node->in[n + 1] = in;
227 
228 	/* update irg flags */
229 	clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS | IR_GRAPH_PROPERTY_CONSISTENT_LOOPINFO);
230 }
231 
add_irn_n(ir_node * node,ir_node * in)232 int add_irn_n(ir_node *node, ir_node *in)
233 {
234 	int pos;
235 	ir_graph *irg = get_irn_irg(node);
236 
237 	assert(node->op->opar == oparity_dynamic);
238 	pos = ARR_LEN(node->in) - 1;
239 	ARR_APP1(ir_node *, node->in, in);
240 	edges_notify_edge(node, pos, node->in[pos + 1], NULL, irg);
241 
242 	/* Call the hook */
243 	hook_set_irn_n(node, pos, node->in[pos + 1], NULL);
244 
245 	return pos;
246 }
247 
del_Sync_n(ir_node * n,int i)248 void del_Sync_n(ir_node *n, int i)
249 {
250 	int      arity     = get_Sync_n_preds(n);
251 	ir_node *last_pred = get_Sync_pred(n, arity - 1);
252 	set_Sync_pred(n, i, last_pred);
253 	edges_notify_edge(n, arity - 1, NULL, last_pred, get_irn_irg(n));
254 	ARR_SHRINKLEN(get_irn_in(n), arity);
255 }
256 
257 int (get_irn_deps)(const ir_node *node)
258 {
259 	return get_irn_deps_(node);
260 }
261 
262 ir_node *(get_irn_dep)(const ir_node *node, int pos)
263 {
264 	return get_irn_dep_(node, pos);
265 }
266 
set_irn_dep(ir_node * node,int pos,ir_node * dep)267 void set_irn_dep(ir_node *node, int pos, ir_node *dep)
268 {
269 	ir_node *old;
270 	ir_graph *irg;
271 
272 	assert(node->deps && "dependency array node yet allocated. use add_irn_dep()");
273 	assert(pos >= 0 && pos < (int)ARR_LEN(node->deps) && "dependency index out of range");
274 	assert(dep != NULL);
275 	old = node->deps[pos];
276 	node->deps[pos] = dep;
277 	irg = get_irn_irg(node);
278 	if (edges_activated_kind(irg, EDGE_KIND_DEP))
279 		edges_notify_edge_kind(node, pos, dep, old, EDGE_KIND_DEP, irg);
280 }
281 
add_irn_dep(ir_node * node,ir_node * dep)282 void add_irn_dep(ir_node *node, ir_node *dep)
283 {
284 	ir_graph *irg;
285 	assert(dep != NULL);
286 	if (node->deps == NULL) {
287 		node->deps = NEW_ARR_F(ir_node *, 0);
288 	}
289 	ARR_APP1(ir_node*, node->deps, dep);
290 	irg = get_irn_irg(node);
291 	if (edges_activated_kind(irg, EDGE_KIND_DEP))
292 		edges_notify_edge_kind(node, ARR_LEN(node->deps)-1, dep, NULL, EDGE_KIND_DEP, irg);
293 }
294 
delete_irn_dep(ir_node * node,ir_node * dep)295 void delete_irn_dep(ir_node *node, ir_node *dep)
296 {
297 	size_t i;
298 	size_t n_deps;
299 	if (node->deps == NULL)
300 		return;
301 
302 	n_deps = ARR_LEN(node->deps);
303 	for (i = 0; i < n_deps; ++i) {
304 		if (node->deps[i] == dep) {
305 			set_irn_dep(node, i, node->deps[n_deps-1]);
306 			edges_notify_edge(node, i, NULL, dep, get_irn_irg(node));
307 			ARR_SHRINKLEN(node->deps, n_deps-1);
308 			break;
309 		}
310 	}
311 }
312 
add_irn_deps(ir_node * tgt,ir_node * src)313 void add_irn_deps(ir_node *tgt, ir_node *src)
314 {
315 	int i, n;
316 
317 	for (i = 0, n = get_irn_deps(src); i < n; ++i)
318 		add_irn_dep(tgt, get_irn_dep(src, i));
319 }
320 
321 
322 ir_mode *(get_irn_mode)(const ir_node *node)
323 {
324 	return get_irn_mode_(node);
325 }
326 
327 void (set_irn_mode)(ir_node *node, ir_mode *mode)
328 {
329 	set_irn_mode_(node, mode);
330 }
331 
332 ir_op *(get_irn_op)(const ir_node *node)
333 {
334 	return get_irn_op_(node);
335 }
336 
337 void (set_irn_op)(ir_node *node, ir_op *op)
338 {
339 	set_irn_op_(node, op);
340 }
341 
342 unsigned (get_irn_opcode)(const ir_node *node)
343 {
344 	return get_irn_opcode_(node);
345 }
346 
get_irn_opname(const ir_node * node)347 const char *get_irn_opname(const ir_node *node)
348 {
349 	return get_id_str(node->op->name);
350 }
351 
get_irn_opident(const ir_node * node)352 ident *get_irn_opident(const ir_node *node)
353 {
354 	assert(node);
355 	return node->op->name;
356 }
357 
ir_visited_t(get_irn_visited)358 ir_visited_t (get_irn_visited)(const ir_node *node)
359 {
360 	return get_irn_visited_(node);
361 }
362 
363 void (set_irn_visited)(ir_node *node, ir_visited_t visited)
364 {
365 	set_irn_visited_(node, visited);
366 }
367 
368 void (mark_irn_visited)(ir_node *node)
369 {
370 	mark_irn_visited_(node);
371 }
372 
373 int (irn_visited)(const ir_node *node)
374 {
375 	return irn_visited_(node);
376 }
377 
378 int (irn_visited_else_mark)(ir_node *node)
379 {
380 	return irn_visited_else_mark_(node);
381 }
382 
383 void (set_irn_link)(ir_node *node, void *link)
384 {
385 	set_irn_link_(node, link);
386 }
387 
388 void *(get_irn_link)(const ir_node *node)
389 {
390 	return get_irn_link_(node);
391 }
392 
op_pin_state(get_irn_pinned)393 op_pin_state (get_irn_pinned)(const ir_node *node)
394 {
395 	return get_irn_pinned_(node);
396 }
397 
op_pin_state(is_irn_pinned_in_irg)398 op_pin_state (is_irn_pinned_in_irg) (const ir_node *node)
399 {
400 	return is_irn_pinned_in_irg_(node);
401 }
402 
set_irn_pinned(ir_node * node,op_pin_state state)403 void set_irn_pinned(ir_node *node, op_pin_state state)
404 {
405 	/* due to optimization an opt may be turned into a Tuple */
406 	if (is_Tuple(node))
407 		return;
408 
409 	assert(node && get_op_pinned(get_irn_op(node)) >= op_pin_state_exc_pinned);
410 	assert(state == op_pin_state_pinned || state == op_pin_state_floats);
411 
412 	node->attr.except.pin_state = state;
413 }
414 
get_irn_node_nr(const ir_node * node)415 long get_irn_node_nr(const ir_node *node)
416 {
417 	assert(node);
418 	return node->node_nr;
419 }
420 
421 void *(get_irn_generic_attr)(ir_node *node)
422 {
423 	assert(is_ir_node(node));
424 	return get_irn_generic_attr_(node);
425 }
426 
427 const void *(get_irn_generic_attr_const)(const ir_node *node)
428 {
429 	assert(is_ir_node(node));
430 	return get_irn_generic_attr_const_(node);
431 }
432 
433 unsigned (get_irn_idx)(const ir_node *node)
434 {
435 	assert(is_ir_node(node));
436 	return get_irn_idx_(node);
437 }
438 
439 ir_node *(get_nodes_block)(const ir_node *node)
440 {
441 	return get_nodes_block_(node);
442 }
443 
set_nodes_block(ir_node * node,ir_node * block)444 void set_nodes_block(ir_node *node, ir_node *block)
445 {
446 	assert(!is_Block(node));
447 	set_irn_n(node, -1, block);
448 }
449 
is_frame_pointer(const ir_node * n)450 ir_type *is_frame_pointer(const ir_node *n)
451 {
452 	if (is_Proj(n) && (get_Proj_proj(n) == pn_Start_P_frame_base)) {
453 		ir_node *start = get_Proj_pred(n);
454 		if (is_Start(start)) {
455 			return get_irg_frame_type(get_irn_irg(start));
456 		}
457 	}
458 	return NULL;
459 }
460 
get_Block_cfgpred_arr(ir_node * node)461 ir_node **get_Block_cfgpred_arr(ir_node *node)
462 {
463 	assert(is_Block(node));
464 	return (ir_node **)&(get_irn_in(node)[1]);
465 }
466 
467 int (get_Block_n_cfgpreds)(const ir_node *node)
468 {
469 	return get_Block_n_cfgpreds_(node);
470 }
471 
472 ir_node *(get_Block_cfgpred)(const ir_node *node, int pos)
473 {
474 	return get_Block_cfgpred_(node, pos);
475 }
476 
set_Block_cfgpred(ir_node * node,int pos,ir_node * pred)477 void set_Block_cfgpred(ir_node *node, int pos, ir_node *pred)
478 {
479 	assert(is_Block(node));
480 	set_irn_n(node, pos, pred);
481 }
482 
get_Block_cfgpred_pos(const ir_node * block,const ir_node * pred)483 int get_Block_cfgpred_pos(const ir_node *block, const ir_node *pred)
484 {
485 	int i;
486 
487 	for (i = get_Block_n_cfgpreds(block) - 1; i >= 0; --i) {
488 		if (get_Block_cfgpred_block(block, i) == pred)
489 			return i;
490 	}
491 	return -1;
492 }
493 
494 ir_node *(get_Block_cfgpred_block)(const ir_node *node, int pos)
495 {
496 	return get_Block_cfgpred_block_(node, pos);
497 }
498 
get_Block_matured(const ir_node * node)499 int get_Block_matured(const ir_node *node)
500 {
501 	assert(is_Block(node));
502 	return (int)node->attr.block.is_matured;
503 }
504 
set_Block_matured(ir_node * node,int matured)505 void set_Block_matured(ir_node *node, int matured)
506 {
507 	assert(is_Block(node));
508 	node->attr.block.is_matured = matured;
509 }
510 
ir_visited_t(get_Block_block_visited)511 ir_visited_t (get_Block_block_visited)(const ir_node *node)
512 {
513 	return get_Block_block_visited_(node);
514 }
515 
516 void (set_Block_block_visited)(ir_node *node, ir_visited_t visit)
517 {
518 	set_Block_block_visited_(node, visit);
519 }
520 
521 void (mark_Block_block_visited)(ir_node *node)
522 {
523 	mark_Block_block_visited_(node);
524 }
525 
526 int (Block_block_visited)(const ir_node *node)
527 {
528 	return Block_block_visited_(node);
529 }
530 
531 ir_graph *(get_Block_irg)(const ir_node *block)
532 {
533 	return get_Block_irg_(block);
534 }
535 
create_Block_entity(ir_node * block)536 ir_entity *create_Block_entity(ir_node *block)
537 {
538 	ir_entity *entity;
539 	assert(is_Block(block));
540 
541 	entity = block->attr.block.entity;
542 	if (entity == NULL) {
543 		ir_label_t nr = get_irp_next_label_nr();
544 		entity = new_label_entity(nr);
545 		set_entity_visibility(entity, ir_visibility_local);
546 		set_entity_linkage(entity, IR_LINKAGE_CONSTANT);
547 		set_entity_compiler_generated(entity, 1);
548 
549 		block->attr.block.entity = entity;
550 	}
551 	return entity;
552 }
553 
554 ir_node *(get_Block_phis)(const ir_node *block)
555 {
556 	return get_Block_phis_(block);
557 }
558 
559 void (set_Block_phis)(ir_node *block, ir_node *phi)
560 {
561 	set_Block_phis_(block, phi);
562 }
563 
564 void (add_Block_phi)(ir_node *block, ir_node *phi)
565 {
566 	add_Block_phi_(block, phi);
567 }
568 
569 unsigned (get_Block_mark)(const ir_node *block)
570 {
571 	return get_Block_mark_(block);
572 }
573 
574 void (set_Block_mark)(ir_node *block, unsigned mark)
575 {
576 	set_Block_mark_(block, mark);
577 }
578 
get_End_n_keepalives(const ir_node * end)579 int get_End_n_keepalives(const ir_node *end)
580 {
581 	assert(is_End(end));
582 	return (get_irn_arity(end) - END_KEEPALIVE_OFFSET);
583 }
584 
get_End_keepalive(const ir_node * end,int pos)585 ir_node *get_End_keepalive(const ir_node *end, int pos)
586 {
587 	assert(is_End(end));
588 	return get_irn_n(end, pos + END_KEEPALIVE_OFFSET);
589 }
590 
add_End_keepalive(ir_node * end,ir_node * ka)591 void add_End_keepalive(ir_node *end, ir_node *ka)
592 {
593 	assert(is_End(end));
594 	add_irn_n(end, ka);
595 }
596 
set_End_keepalive(ir_node * end,int pos,ir_node * ka)597 void set_End_keepalive(ir_node *end, int pos, ir_node *ka)
598 {
599 	assert(is_End(end));
600 	set_irn_n(end, pos + END_KEEPALIVE_OFFSET, ka);
601 }
602 
set_End_keepalives(ir_node * end,int n,ir_node * in[])603 void set_End_keepalives(ir_node *end, int n, ir_node *in[])
604 {
605 	size_t e;
606 	int    i;
607 	ir_graph *irg = get_irn_irg(end);
608 
609 	/* notify that edges are deleted */
610 	for (e = END_KEEPALIVE_OFFSET; e < ARR_LEN(end->in) - 1; ++e) {
611 		edges_notify_edge(end, e, NULL, end->in[e + 1], irg);
612 	}
613 	ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
614 
615 	for (i = 0; i < n; ++i) {
616 		end->in[1 + END_KEEPALIVE_OFFSET + i] = in[i];
617 		edges_notify_edge(end, END_KEEPALIVE_OFFSET + i, end->in[1 + END_KEEPALIVE_OFFSET + i], NULL, irg);
618 	}
619 
620 	/* update irg flags */
621 	clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
622 }
623 
remove_End_keepalive(ir_node * end,ir_node * irn)624 void remove_End_keepalive(ir_node *end, ir_node *irn)
625 {
626 	int      n = get_End_n_keepalives(end);
627 	ir_graph *irg;
628 
629 	int idx = -1;
630 	for (int i = n;;) {
631 		if (i-- == 0)
632 			return;
633 
634 		ir_node *old_ka = end->in[1 + END_KEEPALIVE_OFFSET + i];
635 
636 		/* find irn */
637 		if (old_ka == irn) {
638 			idx = i;
639 			break;
640 		}
641 	}
642 	irg = get_irn_irg(end);
643 
644 	/* remove the edge */
645 	edges_notify_edge(end, idx, NULL, irn, irg);
646 
647 	if (idx != n - 1) {
648 		/* exchange with the last one */
649 		ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
650 		edges_notify_edge(end, n - 1, NULL, old, irg);
651 		end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
652 		edges_notify_edge(end, idx, old, NULL, irg);
653 	}
654 	/* now n - 1 keeps, 1 block input */
655 	ARR_RESIZE(ir_node *, end->in, (n - 1) + 1 + END_KEEPALIVE_OFFSET);
656 
657 	/* update irg flags */
658 	clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
659 }
660 
remove_End_Bads_and_doublets(ir_node * end)661 void remove_End_Bads_and_doublets(ir_node *end)
662 {
663 	pset_new_t keeps;
664 	int        idx, n = get_End_n_keepalives(end);
665 	ir_graph   *irg;
666 	bool       changed = false;
667 
668 	if (n <= 0)
669 		return;
670 
671 	irg = get_irn_irg(end);
672 	pset_new_init(&keeps);
673 
674 	for (idx = n - 1; idx >= 0; --idx) {
675 		ir_node *ka = get_End_keepalive(end, idx);
676 
677 		if (is_Bad(ka) || is_NoMem(ka) || pset_new_contains(&keeps, ka)) {
678 			changed = true;
679 			/* remove the edge */
680 			edges_notify_edge(end, idx, NULL, ka, irg);
681 
682 			if (idx != n - 1) {
683 				/* exchange with the last one */
684 				ir_node *old = end->in[1 + END_KEEPALIVE_OFFSET + n - 1];
685 				edges_notify_edge(end, n - 1, NULL, old, irg);
686 				end->in[1 + END_KEEPALIVE_OFFSET + idx] = old;
687 				edges_notify_edge(end, idx, old, NULL, irg);
688 			}
689 			--n;
690 		} else {
691 			pset_new_insert(&keeps, ka);
692 		}
693 	}
694 	/* n keeps, 1 block input */
695 	ARR_RESIZE(ir_node *, end->in, n + 1 + END_KEEPALIVE_OFFSET);
696 
697 	pset_new_destroy(&keeps);
698 
699 	if (changed) {
700 		clear_irg_properties(irg, IR_GRAPH_PROPERTY_CONSISTENT_OUTS);
701 	}
702 }
703 
free_End(ir_node * end)704 void free_End(ir_node *end)
705 {
706 	assert(is_End(end));
707 	end->kind = k_BAD;
708 	DEL_ARR_F(end->in);
709 	end->in = NULL;   /* @@@ make sure we get an error if we use the
710 	                     in array afterwards ... */
711 }
712 
get_Return_n_ress(const ir_node * node)713 size_t get_Return_n_ress(const ir_node *node)
714 {
715 	assert(is_Return(node));
716 	return (size_t)(get_irn_arity(node) - RETURN_RESULT_OFFSET);
717 }
718 
get_Return_res_arr(ir_node * node)719 ir_node **get_Return_res_arr(ir_node *node)
720 {
721 	assert(is_Return(node));
722 	if (get_Return_n_ress(node) > 0)
723 		return (ir_node **)&(get_irn_in(node)[1 + RETURN_RESULT_OFFSET]);
724 	else
725 		return NULL;
726 }
727 
get_Return_res(const ir_node * node,int pos)728 ir_node *get_Return_res(const ir_node *node, int pos)
729 {
730 	assert(is_Return(node));
731 	assert(pos >= 0);
732 	assert(get_Return_n_ress(node) > (size_t)pos);
733 	return get_irn_n(node, pos + RETURN_RESULT_OFFSET);
734 }
735 
set_Return_res(ir_node * node,int pos,ir_node * res)736 void set_Return_res(ir_node *node, int pos, ir_node *res)
737 {
738 	assert(is_Return(node));
739 	set_irn_n(node, pos + RETURN_RESULT_OFFSET, res);
740 }
741 
742 int (is_Const_null)(const ir_node *node)
743 {
744 	return is_Const_null_(node);
745 }
746 
747 int (is_Const_one)(const ir_node *node)
748 {
749 	return is_Const_one_(node);
750 }
751 
752 int (is_Const_all_one)(const ir_node *node)
753 {
754 	return is_Const_all_one_(node);
755 }
756 
757 
758 
get_SymConst_kind(const ir_node * node)759 symconst_kind get_SymConst_kind(const ir_node *node)
760 {
761 	assert(is_SymConst(node));
762 	return node->attr.symc.kind;
763 }
764 
set_SymConst_kind(ir_node * node,symconst_kind kind)765 void set_SymConst_kind(ir_node *node, symconst_kind kind)
766 {
767 	assert(is_SymConst(node));
768 	node->attr.symc.kind = kind;
769 }
770 
get_SymConst_type(const ir_node * node)771 ir_type *get_SymConst_type(const ir_node *node)
772 {
773 	/* the cast here is annoying, but we have to compensate for
774 	   the skip_tip() */
775 	ir_node *irn = (ir_node *)node;
776 	assert(is_SymConst(node) &&
777 	       (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
778 	return irn->attr.symc.sym.type_p;
779 }
780 
set_SymConst_type(ir_node * node,ir_type * tp)781 void set_SymConst_type(ir_node *node, ir_type *tp)
782 {
783 	assert(is_SymConst(node) &&
784 	       (SYMCONST_HAS_TYPE(get_SymConst_kind(node))));
785 	node->attr.symc.sym.type_p = tp;
786 }
787 
get_SymConst_entity(const ir_node * node)788 ir_entity *get_SymConst_entity(const ir_node *node)
789 {
790 	assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
791 	return node->attr.symc.sym.entity_p;
792 }
793 
set_SymConst_entity(ir_node * node,ir_entity * ent)794 void set_SymConst_entity(ir_node *node, ir_entity *ent)
795 {
796 	assert(is_SymConst(node) && SYMCONST_HAS_ENT(get_SymConst_kind(node)));
797 	node->attr.symc.sym.entity_p  = ent;
798 }
799 
get_SymConst_enum(const ir_node * node)800 ir_enum_const *get_SymConst_enum(const ir_node *node)
801 {
802 	assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
803 	return node->attr.symc.sym.enum_p;
804 }
805 
set_SymConst_enum(ir_node * node,ir_enum_const * ec)806 void set_SymConst_enum(ir_node *node, ir_enum_const *ec)
807 {
808 	assert(is_SymConst(node) && SYMCONST_HAS_ENUM(get_SymConst_kind(node)));
809 	node->attr.symc.sym.enum_p  = ec;
810 }
811 
812 union symconst_symbol
get_SymConst_symbol(const ir_node * node)813 get_SymConst_symbol(const ir_node *node)
814 {
815 	assert(is_SymConst(node));
816 	return node->attr.symc.sym;
817 }
818 
set_SymConst_symbol(ir_node * node,union symconst_symbol sym)819 void set_SymConst_symbol(ir_node *node, union symconst_symbol sym)
820 {
821 	assert(is_SymConst(node));
822 	node->attr.symc.sym = sym;
823 }
824 
get_Sel_n_indexs(const ir_node * node)825 int get_Sel_n_indexs(const ir_node *node)
826 {
827 	assert(is_Sel(node));
828 	return (get_irn_arity(node) - SEL_INDEX_OFFSET);
829 }
830 
get_Sel_index_arr(ir_node * node)831 ir_node **get_Sel_index_arr(ir_node *node)
832 {
833 	assert(is_Sel(node));
834 	if (get_Sel_n_indexs(node) > 0)
835 		return (ir_node **)& get_irn_in(node)[SEL_INDEX_OFFSET + 1];
836 	else
837 		return NULL;
838 }
839 
get_Sel_index(const ir_node * node,int pos)840 ir_node *get_Sel_index(const ir_node *node, int pos)
841 {
842 	assert(is_Sel(node));
843 	return get_irn_n(node, pos + SEL_INDEX_OFFSET);
844 }
845 
set_Sel_index(ir_node * node,int pos,ir_node * index)846 void set_Sel_index(ir_node *node, int pos, ir_node *index)
847 {
848 	assert(is_Sel(node));
849 	set_irn_n(node, pos + SEL_INDEX_OFFSET, index);
850 }
851 
get_Call_param_arr(ir_node * node)852 ir_node **get_Call_param_arr(ir_node *node)
853 {
854 	assert(is_Call(node));
855 	return &get_irn_in(node)[CALL_PARAM_OFFSET + 1];
856 }
857 
get_Call_n_params(const ir_node * node)858 int get_Call_n_params(const ir_node *node)
859 {
860 	assert(is_Call(node));
861 	return get_irn_arity(node) - CALL_PARAM_OFFSET;
862 }
863 
get_Call_param(const ir_node * node,int pos)864 ir_node *get_Call_param(const ir_node *node, int pos)
865 {
866 	assert(is_Call(node));
867 	return get_irn_n(node, pos + CALL_PARAM_OFFSET);
868 }
869 
set_Call_param(ir_node * node,int pos,ir_node * param)870 void set_Call_param(ir_node *node, int pos, ir_node *param)
871 {
872 	assert(is_Call(node));
873 	set_irn_n(node, pos + CALL_PARAM_OFFSET, param);
874 }
875 
get_Builtin_param_arr(ir_node * node)876 ir_node **get_Builtin_param_arr(ir_node *node)
877 {
878 	assert(is_Builtin(node));
879 	return &get_irn_in(node)[BUILTIN_PARAM_OFFSET + 1];
880 }
881 
get_Builtin_n_params(const ir_node * node)882 int get_Builtin_n_params(const ir_node *node)
883 {
884 	assert(is_Builtin(node));
885 	return (get_irn_arity(node) - BUILTIN_PARAM_OFFSET);
886 }
887 
get_Builtin_param(const ir_node * node,int pos)888 ir_node *get_Builtin_param(const ir_node *node, int pos)
889 {
890 	assert(is_Builtin(node));
891 	return get_irn_n(node, pos + BUILTIN_PARAM_OFFSET);
892 }
893 
set_Builtin_param(ir_node * node,int pos,ir_node * param)894 void set_Builtin_param(ir_node *node, int pos, ir_node *param)
895 {
896 	assert(is_Builtin(node));
897 	set_irn_n(node, pos + BUILTIN_PARAM_OFFSET, param);
898 }
899 
get_builtin_kind_name(ir_builtin_kind kind)900 const char *get_builtin_kind_name(ir_builtin_kind kind)
901 {
902 #define X(a)    case a: return #a
903 	switch (kind) {
904 		X(ir_bk_trap);
905 		X(ir_bk_debugbreak);
906 		X(ir_bk_return_address);
907 		X(ir_bk_frame_address);
908 		X(ir_bk_prefetch);
909 		X(ir_bk_ffs);
910 		X(ir_bk_clz);
911 		X(ir_bk_ctz);
912 		X(ir_bk_popcount);
913 		X(ir_bk_parity);
914 		X(ir_bk_bswap);
915 		X(ir_bk_inport);
916 		X(ir_bk_outport);
917 		X(ir_bk_inner_trampoline);
918 	}
919 	return "<unknown>";
920 #undef X
921 }
922 
923 
Call_has_callees(const ir_node * node)924 int Call_has_callees(const ir_node *node)
925 {
926 	assert(is_Call(node));
927 	return ((get_irg_callee_info_state(get_irn_irg(node)) != irg_callee_info_none) &&
928 	        (node->attr.call.callee_arr != NULL));
929 }
930 
get_Call_n_callees(const ir_node * node)931 size_t get_Call_n_callees(const ir_node *node)
932 {
933   assert(is_Call(node) && node->attr.call.callee_arr);
934   return ARR_LEN(node->attr.call.callee_arr);
935 }
936 
get_Call_callee(const ir_node * node,size_t pos)937 ir_entity *get_Call_callee(const ir_node *node, size_t pos)
938 {
939 	assert(pos < get_Call_n_callees(node));
940 	return node->attr.call.callee_arr[pos];
941 }
942 
set_Call_callee_arr(ir_node * node,size_t n,ir_entity ** arr)943 void set_Call_callee_arr(ir_node *node, size_t n, ir_entity ** arr)
944 {
945 	ir_graph *irg = get_irn_irg(node);
946 
947 	assert(is_Call(node));
948 	if (node->attr.call.callee_arr == NULL || get_Call_n_callees(node) != n) {
949 		node->attr.call.callee_arr = NEW_ARR_D(ir_entity *, irg->obst, n);
950 	}
951 	memcpy(node->attr.call.callee_arr, arr, n * sizeof(ir_entity *));
952 }
953 
remove_Call_callee_arr(ir_node * node)954 void remove_Call_callee_arr(ir_node *node)
955 {
956 	assert(is_Call(node));
957 	node->attr.call.callee_arr = NULL;
958 }
959 
is_Cast_upcast(ir_node * node)960 int is_Cast_upcast(ir_node *node)
961 {
962 	ir_type *totype   = get_Cast_type(node);
963 	ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
964 
965 	assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
966 	assert(fromtype);
967 
968 	while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
969 		totype   = get_pointer_points_to_type(totype);
970 		fromtype = get_pointer_points_to_type(fromtype);
971 	}
972 
973 	assert(fromtype);
974 
975 	if (!is_Class_type(totype)) return 0;
976 	return is_SubClass_of(fromtype, totype);
977 }
978 
is_Cast_downcast(ir_node * node)979 int is_Cast_downcast(ir_node *node)
980 {
981 	ir_type *totype   = get_Cast_type(node);
982 	ir_type *fromtype = get_irn_typeinfo_type(get_Cast_op(node));
983 
984 	assert(get_irg_typeinfo_state(get_irn_irg(node)) == ir_typeinfo_consistent);
985 	assert(fromtype);
986 
987 	while (is_Pointer_type(totype) && is_Pointer_type(fromtype)) {
988 		totype   = get_pointer_points_to_type(totype);
989 		fromtype = get_pointer_points_to_type(fromtype);
990 	}
991 
992 	assert(fromtype);
993 
994 	if (!is_Class_type(totype)) return 0;
995 	return is_SubClass_of(totype, fromtype);
996 }
997 
998 int (is_unop)(const ir_node *node)
999 {
1000 	return is_unop_(node);
1001 }
1002 
get_unop_op(const ir_node * node)1003 ir_node *get_unop_op(const ir_node *node)
1004 {
1005 	if (node->op->opar == oparity_unary)
1006 		return get_irn_n(node, node->op->op_index);
1007 
1008 	assert(node->op->opar == oparity_unary);
1009 	return NULL;
1010 }
1011 
set_unop_op(ir_node * node,ir_node * op)1012 void set_unop_op(ir_node *node, ir_node *op)
1013 {
1014 	if (node->op->opar == oparity_unary)
1015 		set_irn_n(node, node->op->op_index, op);
1016 
1017 	assert(node->op->opar == oparity_unary);
1018 }
1019 
1020 int (is_binop)(const ir_node *node)
1021 {
1022 	return is_binop_(node);
1023 }
1024 
get_binop_left(const ir_node * node)1025 ir_node *get_binop_left(const ir_node *node)
1026 {
1027 	assert(node->op->opar == oparity_binary);
1028 	return get_irn_n(node, node->op->op_index);
1029 }
1030 
set_binop_left(ir_node * node,ir_node * left)1031 void set_binop_left(ir_node *node, ir_node *left)
1032 {
1033 	assert(node->op->opar == oparity_binary);
1034 	set_irn_n(node, node->op->op_index, left);
1035 }
1036 
get_binop_right(const ir_node * node)1037 ir_node *get_binop_right(const ir_node *node)
1038 {
1039 	assert(node->op->opar == oparity_binary);
1040 	return get_irn_n(node, node->op->op_index + 1);
1041 }
1042 
set_binop_right(ir_node * node,ir_node * right)1043 void set_binop_right(ir_node *node, ir_node *right)
1044 {
1045 	assert(node->op->opar == oparity_binary);
1046 	set_irn_n(node, node->op->op_index + 1, right);
1047 }
1048 
get_Phi_preds_arr(ir_node * node)1049 ir_node **get_Phi_preds_arr(ir_node *node)
1050 {
1051   assert(is_Phi(node));
1052   return (ir_node **)&(get_irn_in(node)[1]);
1053 }
1054 
get_Phi_n_preds(const ir_node * node)1055 int get_Phi_n_preds(const ir_node *node)
1056 {
1057 	assert(is_Phi(node));
1058 	return get_irn_arity(node);
1059 }
1060 
get_Phi_pred(const ir_node * node,int pos)1061 ir_node *get_Phi_pred(const ir_node *node, int pos)
1062 {
1063 	assert(is_Phi(node));
1064 	return get_irn_n(node, pos);
1065 }
1066 
set_Phi_pred(ir_node * node,int pos,ir_node * pred)1067 void set_Phi_pred(ir_node *node, int pos, ir_node *pred)
1068 {
1069 	assert(is_Phi(node));
1070 	set_irn_n(node, pos, pred);
1071 }
1072 
1073 ir_node *(get_Phi_next)(const ir_node *phi)
1074 {
1075 	return get_Phi_next_(phi);
1076 }
1077 
1078 void (set_Phi_next)(ir_node *phi, ir_node *next)
1079 {
1080 	set_Phi_next_(phi, next);
1081 }
1082 
is_memop(const ir_node * node)1083 int is_memop(const ir_node *node)
1084 {
1085 	return is_op_uses_memory(get_irn_op(node));
1086 }
1087 
get_memop_mem(const ir_node * node)1088 ir_node *get_memop_mem(const ir_node *node)
1089 {
1090 	const ir_op *op = get_irn_op(node);
1091 	assert(is_memop(node));
1092 	return get_irn_n(node, op->memory_index);
1093 }
1094 
set_memop_mem(ir_node * node,ir_node * mem)1095 void set_memop_mem(ir_node *node, ir_node *mem)
1096 {
1097 	const ir_op *op = get_irn_op(node);
1098 	assert(is_memop(node));
1099 	set_irn_n(node, op->memory_index, mem);
1100 }
1101 
get_Sync_preds_arr(ir_node * node)1102 ir_node **get_Sync_preds_arr(ir_node *node)
1103 {
1104 	assert(is_Sync(node));
1105 	return (ir_node **)&(get_irn_in(node)[1]);
1106 }
1107 
get_Sync_n_preds(const ir_node * node)1108 int get_Sync_n_preds(const ir_node *node)
1109 {
1110 	assert(is_Sync(node));
1111 	return (get_irn_arity(node));
1112 }
1113 
get_Sync_pred(const ir_node * node,int pos)1114 ir_node *get_Sync_pred(const ir_node *node, int pos)
1115 {
1116 	assert(is_Sync(node));
1117 	return get_irn_n(node, pos);
1118 }
1119 
set_Sync_pred(ir_node * node,int pos,ir_node * pred)1120 void set_Sync_pred(ir_node *node, int pos, ir_node *pred)
1121 {
1122 	assert(is_Sync(node));
1123 	set_irn_n(node, pos, pred);
1124 }
1125 
add_Sync_pred(ir_node * node,ir_node * pred)1126 void add_Sync_pred(ir_node *node, ir_node *pred)
1127 {
1128 	assert(is_Sync(node));
1129 	add_irn_n(node, pred);
1130 }
1131 
1132 int (is_arg_Proj)(const ir_node *node)
1133 {
1134 	return is_arg_Proj_(node);
1135 }
1136 
is_x_except_Proj(const ir_node * node)1137 int is_x_except_Proj(const ir_node *node)
1138 {
1139 	ir_node *pred;
1140 	if (!is_Proj(node))
1141 		return false;
1142 	pred = get_Proj_pred(node);
1143 	if (!is_fragile_op(pred))
1144 		return false;
1145 	return get_Proj_proj(node) == pred->op->pn_x_except;
1146 }
1147 
is_x_regular_Proj(const ir_node * node)1148 int is_x_regular_Proj(const ir_node *node)
1149 {
1150 	ir_node *pred;
1151 	if (!is_Proj(node))
1152 		return false;
1153 	pred = get_Proj_pred(node);
1154 	if (!is_fragile_op(pred))
1155 		return false;
1156 	return get_Proj_proj(node) == pred->op->pn_x_regular;
1157 }
1158 
ir_set_throws_exception(ir_node * node,int throws_exception)1159 void ir_set_throws_exception(ir_node *node, int throws_exception)
1160 {
1161 	except_attr *attr = &node->attr.except;
1162 	assert(is_fragile_op(node));
1163 	attr->throws_exception = throws_exception;
1164 }
1165 
ir_throws_exception(const ir_node * node)1166 int ir_throws_exception(const ir_node *node)
1167 {
1168 	const except_attr *attr = &node->attr.except;
1169 	assert(is_fragile_op(node));
1170 	return attr->throws_exception;
1171 }
1172 
get_Tuple_preds_arr(ir_node * node)1173 ir_node **get_Tuple_preds_arr(ir_node *node)
1174 {
1175 	assert(is_Tuple(node));
1176 	return (ir_node **)&(get_irn_in(node)[1]);
1177 }
1178 
get_Tuple_n_preds(const ir_node * node)1179 int get_Tuple_n_preds(const ir_node *node)
1180 {
1181 	assert(is_Tuple(node));
1182 	return get_irn_arity(node);
1183 }
1184 
get_Tuple_pred(const ir_node * node,int pos)1185 ir_node *get_Tuple_pred(const ir_node *node, int pos)
1186 {
1187   assert(is_Tuple(node));
1188   return get_irn_n(node, pos);
1189 }
1190 
set_Tuple_pred(ir_node * node,int pos,ir_node * pred)1191 void set_Tuple_pred(ir_node *node, int pos, ir_node *pred)
1192 {
1193 	assert(is_Tuple(node));
1194 	set_irn_n(node, pos, pred);
1195 }
1196 
get_ASM_n_inputs(const ir_node * node)1197 int get_ASM_n_inputs(const ir_node *node)
1198 {
1199 	assert(is_ASM(node));
1200 	return get_irn_arity(node) - ASM_PARAM_OFFSET;
1201 }
1202 
get_ASM_input(const ir_node * node,int pos)1203 ir_node *get_ASM_input(const ir_node *node, int pos)
1204 {
1205 	return get_irn_n(node, ASM_PARAM_OFFSET + pos);
1206 }
1207 
get_ASM_n_output_constraints(const ir_node * node)1208 size_t get_ASM_n_output_constraints(const ir_node *node)
1209 {
1210 	assert(is_ASM(node));
1211 	return ARR_LEN(node->attr.assem.output_constraints);
1212 }
1213 
get_ASM_n_clobbers(const ir_node * node)1214 size_t get_ASM_n_clobbers(const ir_node *node)
1215 {
1216 	assert(is_ASM(node));
1217 	return ARR_LEN(node->attr.assem.clobbers);
1218 }
1219 
1220 ir_graph *(get_irn_irg)(const ir_node *node)
1221 {
1222 	return get_irn_irg_(node);
1223 }
1224 
skip_Proj(ir_node * node)1225 ir_node *skip_Proj(ir_node *node)
1226 {
1227 	/* don't assert node !!! */
1228 	if (node == NULL)
1229 		return NULL;
1230 
1231 	if (is_Proj(node))
1232 		node = get_Proj_pred(node);
1233 
1234 	return node;
1235 }
1236 
1237 const ir_node *
skip_Proj_const(const ir_node * node)1238 skip_Proj_const(const ir_node *node)
1239 {
1240 	/* don't assert node !!! */
1241 	if (node == NULL)
1242 		return NULL;
1243 
1244 	if (is_Proj(node))
1245 		node = get_Proj_pred(node);
1246 
1247 	return node;
1248 }
1249 
skip_Tuple(ir_node * node)1250 ir_node *skip_Tuple(ir_node *node)
1251 {
1252   ir_node *pred;
1253 
1254 restart:
1255 	if (is_Proj(node)) {
1256 	    pred = get_Proj_pred(node);
1257 
1258 		if (is_Proj(pred)) { /* nested Tuple ? */
1259 		    pred = skip_Tuple(pred);
1260 
1261 			if (is_Tuple(pred)) {
1262 				node = get_Tuple_pred(pred, get_Proj_proj(node));
1263 				goto restart;
1264 			}
1265 		} else if (is_Tuple(pred)) {
1266 			node = get_Tuple_pred(pred, get_Proj_proj(node));
1267 			goto restart;
1268 		}
1269 	}
1270 	return node;
1271 }
1272 
skip_Cast(ir_node * node)1273 ir_node *skip_Cast(ir_node *node)
1274 {
1275 	if (is_Cast(node))
1276 		return get_Cast_op(node);
1277 	return node;
1278 }
1279 
skip_Cast_const(const ir_node * node)1280 const ir_node *skip_Cast_const(const ir_node *node)
1281 {
1282 	if (is_Cast(node))
1283 		return get_Cast_op(node);
1284 	return node;
1285 }
1286 
skip_Pin(ir_node * node)1287 ir_node *skip_Pin(ir_node *node)
1288 {
1289 	if (is_Pin(node))
1290 		return get_Pin_op(node);
1291 	return node;
1292 }
1293 
skip_Confirm(ir_node * node)1294 ir_node *skip_Confirm(ir_node *node)
1295 {
1296 	if (is_Confirm(node))
1297 		return get_Confirm_value(node);
1298 	return node;
1299 }
1300 
skip_HighLevel_ops(ir_node * node)1301 ir_node *skip_HighLevel_ops(ir_node *node)
1302 {
1303 	while (is_op_highlevel(get_irn_op(node))) {
1304 		node = get_irn_n(node, 0);
1305 	}
1306 	return node;
1307 }
1308 
1309 
skip_Id(ir_node * node)1310 ir_node *skip_Id(ir_node *node)
1311 {
1312 	/* This should compact Id-cycles to self-cycles. It has the same (or less?) complexity
1313 	 * than any other approach, as Id chains are resolved and all point to the real node, or
1314 	 * all id's are self loops.
1315 	 *
1316 	 * Note: This function takes 10% of mostly ANY the compiler run, so it's
1317 	 * a little bit "hand optimized".
1318 	 */
1319 	ir_node *pred;
1320 	/* don't assert node !!! */
1321 
1322 	if (!node || (node->op != op_Id)) return node;
1323 
1324 	/* Don't use get_Id_pred():  We get into an endless loop for
1325 	   self-referencing Ids. */
1326 	pred = node->in[0+1];
1327 
1328 	if (pred->op != op_Id) return pred;
1329 
1330 	if (node != pred) {  /* not a self referencing Id. Resolve Id chain. */
1331 		ir_node *rem_pred, *res;
1332 
1333 		if (pred->op != op_Id) return pred; /* shortcut */
1334 		rem_pred = pred;
1335 
1336 		assert(get_irn_arity (node) > 0);
1337 
1338 		node->in[0+1] = node;   /* turn us into a self referencing Id:  shorten Id cycles. */
1339 		res = skip_Id(rem_pred);
1340 		if (is_Id(res)) /* self-loop */ return node;
1341 
1342 		node->in[0+1] = res;    /* Turn Id chain into Ids all referencing the chain end. */
1343 		return res;
1344 	} else {
1345 		return node;
1346 	}
1347 }
1348 
1349 int (is_SymConst_addr_ent)(const ir_node *node)
1350 {
1351 	return is_SymConst_addr_ent_(node);
1352 }
1353 
is_cfop(const ir_node * node)1354 int is_cfop(const ir_node *node)
1355 {
1356 	if (is_fragile_op(node) && ir_throws_exception(node))
1357 		return true;
1358 
1359 	return is_op_cfopcode(get_irn_op(node));
1360 }
1361 
is_unknown_jump(const ir_node * node)1362 int is_unknown_jump(const ir_node *node)
1363 {
1364 	return is_op_unknown_jump(get_irn_op(node));
1365 }
1366 
is_fragile_op(const ir_node * node)1367 int is_fragile_op(const ir_node *node)
1368 {
1369 	return is_op_fragile(get_irn_op(node));
1370 }
1371 
1372 int (is_irn_forking)(const ir_node *node)
1373 {
1374 	return is_irn_forking_(node);
1375 }
1376 
1377 void (copy_node_attr)(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1378 {
1379 	copy_node_attr_(irg, old_node, new_node);
1380 }
1381 
1382 ir_type *(get_irn_type_attr)(ir_node *node)
1383 {
1384 	return get_irn_type_attr_(node);
1385 }
1386 
1387 ir_entity *(get_irn_entity_attr)(ir_node *node)
1388 {
1389 	return get_irn_entity_attr_(node);
1390 }
1391 
1392 int (is_irn_constlike)(const ir_node *node)
1393 {
1394 	return is_irn_constlike_(node);
1395 }
1396 
1397 int (is_irn_keep)(const ir_node *node)
1398 {
1399 	return is_irn_keep_(node);
1400 }
1401 
1402 int (is_irn_start_block_placed)(const ir_node *node)
1403 {
1404 	return is_irn_start_block_placed_(node);
1405 }
1406 
1407 int (is_irn_cse_neutral)(const ir_node *node)
1408 {
1409 	return is_irn_cse_neutral_(node);
1410 }
1411 
get_cond_jmp_predicate_name(cond_jmp_predicate pred)1412 const char *get_cond_jmp_predicate_name(cond_jmp_predicate pred)
1413 {
1414 #define X(a)    case a: return #a
1415 	switch (pred) {
1416 		X(COND_JMP_PRED_NONE);
1417 		X(COND_JMP_PRED_TRUE);
1418 		X(COND_JMP_PRED_FALSE);
1419 	}
1420 	return "<unknown>";
1421 #undef X
1422 }
1423 
1424 /** Return the attribute type of a SymConst node if exists */
get_SymConst_attr_type(const ir_node * self)1425 static ir_type *get_SymConst_attr_type(const ir_node *self)
1426 {
1427 	symconst_kind kind = get_SymConst_kind(self);
1428 	if (SYMCONST_HAS_TYPE(kind))
1429 		return get_SymConst_type(self);
1430 	return NULL;
1431 }
1432 
1433 /** Return the attribute entity of a SymConst node if exists */
get_SymConst_attr_entity(const ir_node * self)1434 static ir_entity *get_SymConst_attr_entity(const ir_node *self)
1435 {
1436 	symconst_kind kind = get_SymConst_kind(self);
1437 	if (SYMCONST_HAS_ENT(kind))
1438 		return get_SymConst_entity(self);
1439 	return NULL;
1440 }
1441 
register_get_type_func(ir_op * op,get_type_attr_func func)1442 static void register_get_type_func(ir_op *op, get_type_attr_func func)
1443 {
1444 	op->ops.get_type_attr = func;
1445 }
1446 
register_get_entity_func(ir_op * op,get_entity_attr_func func)1447 static void register_get_entity_func(ir_op *op, get_entity_attr_func func)
1448 {
1449 	op->ops.get_entity_attr = func;
1450 }
1451 
ir_register_getter_ops(void)1452 void ir_register_getter_ops(void)
1453 {
1454 	register_get_type_func(op_Alloc,    get_Alloc_type);
1455 	register_get_type_func(op_Builtin,  get_Builtin_type);
1456 	register_get_type_func(op_Call,     get_Call_type);
1457 	register_get_type_func(op_Cast,     get_Cast_type);
1458 	register_get_type_func(op_CopyB,    get_CopyB_type);
1459 	register_get_type_func(op_Free,     get_Free_type);
1460 	register_get_type_func(op_InstOf,   get_InstOf_type);
1461 	register_get_type_func(op_SymConst, get_SymConst_attr_type);
1462 
1463 	register_get_entity_func(op_SymConst, get_SymConst_attr_entity);
1464 	register_get_entity_func(op_Sel,      get_Sel_entity);
1465 	register_get_entity_func(op_Block,    get_Block_entity);
1466 }
1467 
1468 void (set_irn_dbg_info)(ir_node *n, dbg_info *db)
1469 {
1470 	set_irn_dbg_info_(n, db);
1471 }
1472 
1473 dbg_info *(get_irn_dbg_info)(const ir_node *n)
1474 {
1475 	return get_irn_dbg_info_(n);
1476 }
1477 
ir_new_switch_table(ir_graph * irg,size_t n_entries)1478 ir_switch_table *ir_new_switch_table(ir_graph *irg, size_t n_entries)
1479 {
1480 	struct obstack *obst = get_irg_obstack(irg);
1481 	ir_switch_table *res = OALLOCFZ(obst, ir_switch_table, entries, n_entries);
1482 	res->n_entries = n_entries;
1483 	return res;
1484 }
1485 
ir_switch_table_set(ir_switch_table * table,size_t n,ir_tarval * min,ir_tarval * max,long pn)1486 void ir_switch_table_set(ir_switch_table *table, size_t n,
1487                          ir_tarval *min, ir_tarval *max, long pn)
1488 {
1489 	ir_switch_table_entry *entry = ir_switch_table_get_entry(table, n);
1490 	entry->min = min;
1491 	entry->max = max;
1492 	entry->pn  = pn;
1493 }
1494 
size_t(ir_switch_table_get_n_entries)1495 size_t (ir_switch_table_get_n_entries)(const ir_switch_table *table)
1496 {
1497 	return ir_switch_table_get_n_entries_(table);
1498 }
1499 
ir_switch_table_get_max(const ir_switch_table * table,size_t e)1500 ir_tarval *ir_switch_table_get_max(const ir_switch_table *table, size_t e)
1501 {
1502 	return ir_switch_table_get_entry_const(table, e)->max;
1503 }
1504 
ir_switch_table_get_min(const ir_switch_table * table,size_t e)1505 ir_tarval *ir_switch_table_get_min(const ir_switch_table *table, size_t e)
1506 {
1507 	return ir_switch_table_get_entry_const(table, e)->min;
1508 }
1509 
ir_switch_table_get_pn(const ir_switch_table * table,size_t e)1510 long ir_switch_table_get_pn(const ir_switch_table *table, size_t e)
1511 {
1512 	return ir_switch_table_get_entry_const(table, e)->pn;
1513 }
1514 
ir_switch_table_duplicate(ir_graph * irg,const ir_switch_table * table)1515 ir_switch_table *ir_switch_table_duplicate(ir_graph *irg,
1516                                            const ir_switch_table *table)
1517 {
1518 	size_t n_entries = ir_switch_table_get_n_entries(table);
1519 	size_t e;
1520 	ir_switch_table *res = ir_new_switch_table(irg, n_entries);
1521 	for (e = 0; e < n_entries; ++e) {
1522 		const ir_switch_table_entry *entry
1523 			= ir_switch_table_get_entry_const(table, e);
1524 		ir_switch_table_entry *new_entry = ir_switch_table_get_entry(res, e);
1525 		*new_entry = *entry;
1526 	}
1527 	return res;
1528 }
1529 
only_used_by_keepalive(const ir_node * node)1530 bool only_used_by_keepalive(const ir_node *node)
1531 {
1532 	foreach_out_edge(node, edge) {
1533 		ir_node *succ = get_edge_src_irn(edge);
1534 		if (is_End(succ))
1535 			continue;
1536 		if (is_Proj(succ) && only_used_by_keepalive(succ))
1537 			return true;
1538 		/* found a real user */
1539 		return false;
1540 	}
1541 	return true;
1542 }
1543 
1544 /* include generated code */
1545 #include "gen_irnode.c.inl"
1546