1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief       Backend node support for generic backend nodes.
23  * @author      Sebastian Hack
24  * @date        17.05.2005
25  *
26  * Backend node support for generic backend nodes.
27  * This file provides Perm, Copy, Spill and Reload nodes.
28  */
29 #include "config.h"
30 
31 #include <stdlib.h>
32 
33 #include "obst.h"
34 #include "set.h"
35 #include "pmap.h"
36 #include "util.h"
37 #include "debug.h"
38 #include "fourcc.h"
39 #include "bitfiddle.h"
40 #include "raw_bitset.h"
41 #include "error.h"
42 #include "array_t.h"
43 
44 #include "irop_t.h"
45 #include "irmode_t.h"
46 #include "irnode_t.h"
47 #include "ircons_t.h"
48 #include "irprintf.h"
49 #include "irgwalk.h"
50 #include "iropt_t.h"
51 #include "irbackedge_t.h"
52 #include "irverify_t.h"
53 
54 #include "be_t.h"
55 #include "belive_t.h"
56 #include "besched.h"
57 #include "benode.h"
58 #include "bearch.h"
59 
60 #include "beirgmod.h"
61 
62 typedef struct be_node_attr_t {
63 	except_attr  exc;
64 } be_node_attr_t;
65 
66 /** The be_Return nodes attribute type. */
67 typedef struct {
68 	be_node_attr_t base;
69 	int            num_ret_vals; /**< number of return values */
70 	unsigned       pop;          /**< number of bytes that should be popped */
71 	int            emit_pop;     /**< if set, emit pop bytes, even if pop = 0 */
72 } be_return_attr_t;
73 
74 /** The be_IncSP attribute type. */
75 typedef struct {
76 	be_node_attr_t base;
77 	int            offset;    /**< The offset by which the stack shall be
78 	                               expanded/shrinked. */
79 	int            align;     /**< whether stack should be aligned after the
80 	                               IncSP */
81 } be_incsp_attr_t;
82 
83 /** The be_Frame attribute type. */
84 typedef struct {
85 	be_node_attr_t  base;
86 	ir_entity      *ent;
87 	int             offset;
88 } be_frame_attr_t;
89 
90 /** The be_Call attribute type. */
91 typedef struct {
92 	be_node_attr_t  base;
93 	ir_entity      *ent;        /**< called entity if this is a static call. */
94 	unsigned        pop;
95 	ir_type        *call_tp;    /**< call type, copied from the original Call */
96 } be_call_attr_t;
97 
98 typedef struct {
99 	be_node_attr_t base;
100 	ir_entity    **in_entities;
101 	ir_entity    **out_entities;
102 } be_memperm_attr_t;
103 
104 ir_op *op_be_Spill;
105 ir_op *op_be_Reload;
106 ir_op *op_be_Perm;
107 ir_op *op_be_MemPerm;
108 ir_op *op_be_Copy;
109 ir_op *op_be_Keep;
110 ir_op *op_be_CopyKeep;
111 ir_op *op_be_Call;
112 ir_op *op_be_Return;
113 ir_op *op_be_IncSP;
114 ir_op *op_be_AddSP;
115 ir_op *op_be_SubSP;
116 ir_op *op_be_Start;
117 ir_op *op_be_FrameAddr;
118 
119 /**
120  * Compare the attributes of two be_FrameAddr nodes.
121  *
122  * @return zero if both nodes have identically attributes
123  */
FrameAddr_cmp_attr(const ir_node * a,const ir_node * b)124 static int FrameAddr_cmp_attr(const ir_node *a, const ir_node *b)
125 {
126 	const be_frame_attr_t *a_attr = (const be_frame_attr_t*)get_irn_generic_attr_const(a);
127 	const be_frame_attr_t *b_attr = (const be_frame_attr_t*)get_irn_generic_attr_const(b);
128 
129 	if (a_attr->ent != b_attr->ent || a_attr->offset != b_attr->offset)
130 		return 1;
131 
132 	return be_nodes_equal(a, b);
133 }
134 
135 /**
136  * Compare the attributes of two be_Return nodes.
137  *
138  * @return zero if both nodes have identically attributes
139  */
Return_cmp_attr(const ir_node * a,const ir_node * b)140 static int Return_cmp_attr(const ir_node *a, const ir_node *b)
141 {
142 	const be_return_attr_t *a_attr = (const be_return_attr_t*)get_irn_generic_attr_const(a);
143 	const be_return_attr_t *b_attr = (const be_return_attr_t*)get_irn_generic_attr_const(b);
144 
145 	if (a_attr->num_ret_vals != b_attr->num_ret_vals)
146 		return 1;
147 	if (a_attr->pop != b_attr->pop)
148 		return 1;
149 	if (a_attr->emit_pop != b_attr->emit_pop)
150 		return 1;
151 
152 	return be_nodes_equal(a, b);
153 }
154 
155 /**
156  * Compare the attributes of two be_IncSP nodes.
157  *
158  * @return zero if both nodes have identically attributes
159  */
IncSP_cmp_attr(const ir_node * a,const ir_node * b)160 static int IncSP_cmp_attr(const ir_node *a, const ir_node *b)
161 {
162 	const be_incsp_attr_t *a_attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(a);
163 	const be_incsp_attr_t *b_attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(b);
164 
165 	if (a_attr->offset != b_attr->offset)
166 		return 1;
167 
168 	return be_nodes_equal(a, b);
169 }
170 
171 /**
172  * Compare the attributes of two be_Call nodes.
173  *
174  * @return zero if both nodes have identically attributes
175  */
Call_cmp_attr(const ir_node * a,const ir_node * b)176 static int Call_cmp_attr(const ir_node *a, const ir_node *b)
177 {
178 	const be_call_attr_t *a_attr = (const be_call_attr_t*)get_irn_generic_attr_const(a);
179 	const be_call_attr_t *b_attr = (const be_call_attr_t*)get_irn_generic_attr_const(b);
180 
181 	if (a_attr->ent != b_attr->ent ||
182 		a_attr->call_tp != b_attr->call_tp)
183 		return 1;
184 
185 	return be_nodes_equal(a, b);
186 }
187 
allocate_reg_req(const ir_node * node)188 static arch_register_req_t *allocate_reg_req(const ir_node *node)
189 {
190 	ir_graph       *irg  = get_irn_irg(node);
191 	struct obstack *obst = be_get_be_obst(irg);
192 
193 	arch_register_req_t *req = OALLOCZ(obst, arch_register_req_t);
194 	return req;
195 }
196 
be_set_constr_in(ir_node * node,int pos,const arch_register_req_t * req)197 void be_set_constr_in(ir_node *node, int pos, const arch_register_req_t *req)
198 {
199 	backend_info_t *info = be_get_info(node);
200 	assert(pos < get_irn_arity(node));
201 	info->in_reqs[pos] = req;
202 }
203 
be_set_constr_out(ir_node * node,int pos,const arch_register_req_t * req)204 void be_set_constr_out(ir_node *node, int pos, const arch_register_req_t *req)
205 {
206 	backend_info_t *info = be_get_info(node);
207 	info->out_infos[pos].req = req;
208 }
209 
210 /**
211  * Initializes the generic attribute of all be nodes and return it.
212  */
init_node_attr(ir_node * node,int n_inputs,int n_outputs)213 static void init_node_attr(ir_node *node, int n_inputs, int n_outputs)
214 {
215 	ir_graph       *irg  = get_irn_irg(node);
216 	struct obstack *obst = be_get_be_obst(irg);
217 	backend_info_t *info = be_get_info(node);
218 	const arch_register_req_t **in_reqs;
219 
220 	if (n_inputs >= 0) {
221 		int i;
222 		assert(n_inputs == get_irn_arity(node));
223 		in_reqs = OALLOCN(obst, const arch_register_req_t*, n_inputs);
224 		for (i = 0; i < n_inputs; ++i) {
225 			in_reqs[i] = arch_no_register_req;
226 		}
227 	} else {
228 		in_reqs = NEW_ARR_F(const arch_register_req_t*, 0);
229 	}
230 	info->in_reqs = in_reqs;
231 
232 	if (n_outputs >= 0) {
233 		int i;
234 		info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outputs);
235 		memset(info->out_infos, 0, n_outputs * sizeof(info->out_infos[0]));
236 		for (i = 0; i < n_outputs; ++i) {
237 			info->out_infos[i].req = arch_no_register_req;
238 		}
239 	} else {
240 		info->out_infos = NEW_ARR_F(reg_out_info_t, 0);
241 	}
242 }
243 
add_register_req_in(ir_node * node,const arch_register_req_t * req)244 static void add_register_req_in(ir_node *node, const arch_register_req_t *req)
245 {
246 	backend_info_t *info = be_get_info(node);
247 	ARR_APP1(const arch_register_req_t*, info->in_reqs, req);
248 }
249 
be_new_Spill(const arch_register_class_t * cls,const arch_register_class_t * cls_frame,ir_node * bl,ir_node * frame,ir_node * to_spill)250 ir_node *be_new_Spill(const arch_register_class_t *cls,
251 		const arch_register_class_t *cls_frame, ir_node *bl,
252 		ir_node *frame, ir_node *to_spill)
253 {
254 	be_frame_attr_t *a;
255 	ir_node         *in[2];
256 	ir_node         *res;
257 	ir_graph        *irg = get_Block_irg(bl);
258 
259 	in[0]     = frame;
260 	in[1]     = to_spill;
261 	res       = new_ir_node(NULL, irg, bl, op_be_Spill, mode_M, 2, in);
262 	init_node_attr(res, 2, 1);
263 	a         = (be_frame_attr_t*) get_irn_generic_attr(res);
264 	a->ent    = NULL;
265 	a->offset = 0;
266 	a->base.exc.pin_state = op_pin_state_pinned;
267 
268 	be_node_set_reg_class_in(res, n_be_Spill_frame, cls_frame);
269 	be_node_set_reg_class_in(res, n_be_Spill_val, cls);
270 	arch_set_irn_register_req_out(res, 0, arch_no_register_req);
271 
272 	return res;
273 }
274 
be_new_Reload(const arch_register_class_t * cls,const arch_register_class_t * cls_frame,ir_node * block,ir_node * frame,ir_node * mem,ir_mode * mode)275 ir_node *be_new_Reload(const arch_register_class_t *cls,
276 		const arch_register_class_t *cls_frame, ir_node *block,
277 		ir_node *frame, ir_node *mem, ir_mode *mode)
278 {
279 	ir_node  *in[2];
280 	ir_node  *res;
281 	ir_graph *irg = get_Block_irg(block);
282 	be_frame_attr_t *a;
283 
284 	in[0] = frame;
285 	in[1] = mem;
286 	res   = new_ir_node(NULL, irg, block, op_be_Reload, mode, 2, in);
287 
288 	init_node_attr(res, 2, 1);
289 	be_node_set_reg_class_out(res, 0, cls);
290 
291 	be_node_set_reg_class_in(res, n_be_Reload_frame, cls_frame);
292 	arch_set_irn_flags(res, arch_irn_flags_rematerializable);
293 
294 	a         = (be_frame_attr_t*) get_irn_generic_attr(res);
295 	a->ent    = NULL;
296 	a->offset = 0;
297 	a->base.exc.pin_state = op_pin_state_pinned;
298 
299 	return res;
300 }
301 
be_get_Reload_mem(const ir_node * irn)302 ir_node *be_get_Reload_mem(const ir_node *irn)
303 {
304 	assert(be_is_Reload(irn));
305 	return get_irn_n(irn, n_be_Reload_mem);
306 }
307 
be_get_Reload_frame(const ir_node * irn)308 ir_node *be_get_Reload_frame(const ir_node *irn)
309 {
310 	assert(be_is_Reload(irn));
311 	return get_irn_n(irn, n_be_Reload_frame);
312 }
313 
be_get_Spill_val(const ir_node * irn)314 ir_node *be_get_Spill_val(const ir_node *irn)
315 {
316 	assert(be_is_Spill(irn));
317 	return get_irn_n(irn, n_be_Spill_val);
318 }
319 
be_get_Spill_frame(const ir_node * irn)320 ir_node *be_get_Spill_frame(const ir_node *irn)
321 {
322 	assert(be_is_Spill(irn));
323 	return get_irn_n(irn, n_be_Spill_frame);
324 }
325 
be_new_Perm(const arch_register_class_t * cls,ir_node * block,int n,ir_node * in[])326 ir_node *be_new_Perm(const arch_register_class_t *cls, ir_node *block,
327                      int n, ir_node *in[])
328 {
329 	int            i;
330 	ir_graph       *irg = get_Block_irg(block);
331 	be_node_attr_t *attr;
332 
333 	ir_node *irn = new_ir_node(NULL, irg, block, op_be_Perm, mode_T, n, in);
334 	init_node_attr(irn, n, n);
335 	attr                = (be_node_attr_t*) get_irn_generic_attr(irn);
336 	attr->exc.pin_state = op_pin_state_pinned;
337 	for (i = 0; i < n; ++i) {
338 		const ir_node             *input = in[i];
339 		const arch_register_req_t *req   = arch_get_irn_register_req(input);
340 		if (req->width == 1) {
341 			be_set_constr_in(irn, i, cls->class_req);
342 			be_set_constr_out(irn, i, cls->class_req);
343 		} else {
344 			arch_register_req_t *new_req = allocate_reg_req(irn);
345 			new_req->cls   = cls;
346 			new_req->type  = (req->type & arch_register_req_type_aligned);
347 			new_req->width = req->width;
348 			be_set_constr_in(irn, i, new_req);
349 			be_set_constr_out(irn, i, new_req);
350 		}
351 	}
352 
353 	return irn;
354 }
355 
be_Perm_reduce(ir_node * perm,int new_size,int * map)356 void be_Perm_reduce(ir_node *perm, int new_size, int *map)
357 {
358 	int             arity      = get_irn_arity(perm);
359 	const arch_register_req_t **old_in_reqs
360 		= ALLOCAN(const arch_register_req_t*, arity);
361 	reg_out_info_t  *old_infos = ALLOCAN(reg_out_info_t, arity);
362 	backend_info_t  *info      = be_get_info(perm);
363 	ir_node        **new_in;
364 	int              i;
365 
366 	assert(be_is_Perm(perm));
367 	assert(new_size <= arity);
368 
369 	new_in = ALLOCAN(ir_node*, new_size);
370 
371 	/* save the old register data */
372 	memcpy(old_in_reqs, info->in_reqs, arity * sizeof(old_in_reqs[0]));
373 	memcpy(old_infos, info->out_infos, arity * sizeof(old_infos[0]));
374 
375 	/* compose the new in array and set the new register data directly */
376 	for (i = 0; i < new_size; ++i) {
377 		int idx = map[i];
378 		new_in[i]          = get_irn_n(perm, idx);
379 		info->in_reqs[i]   = old_in_reqs[idx];
380 		info->out_infos[i] = old_infos[idx];
381 	}
382 
383 	set_irn_in(perm, new_size, new_in);
384 }
385 
be_new_MemPerm(ir_node * block,int n,ir_node * in[])386 ir_node *be_new_MemPerm(ir_node *block, int n, ir_node *in[])
387 {
388 	ir_graph                     *irg       = get_Block_irg(block);
389 	const arch_env_t             *arch_env  = be_get_irg_arch_env(irg);
390 	ir_node                      *frame     = get_irg_frame(irg);
391 	const arch_register_t        *sp        = arch_env->sp;
392 	ir_node                      *irn;
393 	be_memperm_attr_t            *attr;
394 	ir_node                     **real_in;
395 
396 	real_in = ALLOCAN(ir_node*, n + 1);
397 	real_in[0] = frame;
398 	memcpy(&real_in[1], in, n * sizeof(real_in[0]));
399 
400 	irn = new_ir_node(NULL, irg, block, op_be_MemPerm, mode_T, n+1, real_in);
401 
402 	init_node_attr(irn, n + 1, n);
403 	be_node_set_reg_class_in(irn, 0, sp->reg_class);
404 
405 	attr               = (be_memperm_attr_t*)get_irn_generic_attr(irn);
406 	attr->in_entities  = OALLOCNZ(irg->obst, ir_entity*, n);
407 	attr->out_entities = OALLOCNZ(irg->obst, ir_entity*, n);
408 
409 	return irn;
410 }
411 
be_new_Copy(ir_node * bl,ir_node * op)412 ir_node *be_new_Copy(ir_node *bl, ir_node *op)
413 {
414 	ir_node *in[1];
415 	ir_node *res;
416 	arch_register_req_t *req;
417 	be_node_attr_t *attr;
418 	ir_graph *irg = get_Block_irg(bl);
419 	const arch_register_req_t   *in_req = arch_get_irn_register_req(op);
420 	const arch_register_class_t *cls    = in_req->cls;
421 
422 	in[0] = op;
423 	res   = new_ir_node(NULL, irg, bl, op_be_Copy, get_irn_mode(op), 1, in);
424 	init_node_attr(res, 1, 1);
425 	attr = (be_node_attr_t*) get_irn_generic_attr(res);
426 	attr->exc.pin_state = op_pin_state_floats;
427 	be_node_set_reg_class_in(res, 0, cls);
428 	be_node_set_reg_class_out(res, 0, cls);
429 
430 	req = allocate_reg_req(res);
431 	req->cls        = cls;
432 	req->type       = arch_register_req_type_should_be_same
433 		| (in_req->type & arch_register_req_type_aligned);
434 	req->other_same = 1U << 0;
435 	req->width      = in_req->width;
436 	be_set_constr_out(res, 0, req);
437 
438 	return res;
439 }
440 
be_get_Copy_op(const ir_node * cpy)441 ir_node *be_get_Copy_op(const ir_node *cpy)
442 {
443 	return get_irn_n(cpy, n_be_Copy_op);
444 }
445 
be_new_Keep(ir_node * block,int n,ir_node * in[])446 ir_node *be_new_Keep(ir_node *block, int n, ir_node *in[])
447 {
448 	int i;
449 	ir_node *res;
450 	ir_graph *irg = get_Block_irg(block);
451 	be_node_attr_t *attr;
452 
453 	res = new_ir_node(NULL, irg, block, op_be_Keep, mode_ANY, -1, NULL);
454 	init_node_attr(res, -1, 1);
455 	attr = (be_node_attr_t*) get_irn_generic_attr(res);
456 	attr->exc.pin_state = op_pin_state_pinned;
457 
458 	for (i = 0; i < n; ++i) {
459 		ir_node *pred = in[i];
460 		add_irn_n(res, pred);
461 		const arch_register_req_t *req = arch_get_irn_register_req(pred);
462 		req = req->cls != NULL ? req->cls->class_req : arch_no_register_req;
463 		add_register_req_in(res, req);
464 	}
465 	keep_alive(res);
466 
467 	return res;
468 }
469 
be_Keep_add_node(ir_node * keep,const arch_register_class_t * cls,ir_node * node)470 void be_Keep_add_node(ir_node *keep, const arch_register_class_t *cls, ir_node *node)
471 {
472 	assert(be_is_Keep(keep));
473 	add_irn_n(keep, node);
474 	add_register_req_in(keep, cls->class_req);
475 }
476 
be_new_Call(dbg_info * dbg,ir_graph * irg,ir_node * bl,ir_node * mem,const arch_register_req_t * sp_req,ir_node * sp,const arch_register_req_t * ptr_req,ir_node * ptr,int n_outs,int n,ir_node * in[],ir_type * call_tp)477 ir_node *be_new_Call(dbg_info *dbg, ir_graph *irg, ir_node *bl, ir_node *mem,
478 		const arch_register_req_t *sp_req, ir_node *sp,
479 		const arch_register_req_t *ptr_req, ir_node *ptr,
480 		int n_outs, int n, ir_node *in[], ir_type *call_tp)
481 {
482 	be_call_attr_t *a;
483 	int real_n = n_be_Call_first_arg + n;
484 	ir_node *irn;
485 	ir_node **real_in;
486 
487 	NEW_ARR_A(ir_node *, real_in, real_n);
488 	real_in[n_be_Call_mem] = mem;
489 	real_in[n_be_Call_sp]  = sp;
490 	real_in[n_be_Call_ptr] = ptr;
491 	memcpy(&real_in[n_be_Call_first_arg], in, n * sizeof(in[0]));
492 
493 	irn = new_ir_node(dbg, irg, bl, op_be_Call, mode_T, real_n, real_in);
494 	init_node_attr(irn, real_n, n_outs);
495 	a                     = (be_call_attr_t*)get_irn_generic_attr(irn);
496 	a->ent                = NULL;
497 	a->call_tp            = call_tp;
498 	a->pop                = 0;
499 	a->base.exc.pin_state = op_pin_state_pinned;
500 	be_set_constr_in(irn, n_be_Call_sp, sp_req);
501 	be_set_constr_in(irn, n_be_Call_ptr, ptr_req);
502 	return irn;
503 }
504 
be_Call_get_entity(const ir_node * call)505 ir_entity *be_Call_get_entity(const ir_node *call)
506 {
507 	const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(call);
508 	assert(be_is_Call(call));
509 	return a->ent;
510 }
511 
be_Call_set_entity(ir_node * call,ir_entity * ent)512 void be_Call_set_entity(ir_node *call, ir_entity *ent)
513 {
514 	be_call_attr_t *a = (be_call_attr_t*)get_irn_generic_attr(call);
515 	assert(be_is_Call(call));
516 	a->ent = ent;
517 }
518 
be_Call_get_type(ir_node * call)519 ir_type *be_Call_get_type(ir_node *call)
520 {
521 	const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(call);
522 	assert(be_is_Call(call));
523 	return a->call_tp;
524 }
525 
be_Call_set_type(ir_node * call,ir_type * call_tp)526 void be_Call_set_type(ir_node *call, ir_type *call_tp)
527 {
528 	be_call_attr_t *a = (be_call_attr_t*)get_irn_generic_attr(call);
529 	assert(be_is_Call(call));
530 	a->call_tp = call_tp;
531 }
532 
be_Call_set_pop(ir_node * call,unsigned pop)533 void be_Call_set_pop(ir_node *call, unsigned pop)
534 {
535 	be_call_attr_t *a = (be_call_attr_t*)get_irn_generic_attr(call);
536 	a->pop = pop;
537 }
538 
be_Call_get_pop(const ir_node * call)539 unsigned be_Call_get_pop(const ir_node *call)
540 {
541 	const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(call);
542 	return a->pop;
543 }
544 
be_new_Return(dbg_info * dbg,ir_graph * irg,ir_node * block,int n_res,unsigned pop,int n,ir_node * in[])545 ir_node *be_new_Return(dbg_info *dbg, ir_graph *irg, ir_node *block, int n_res,
546                        unsigned pop, int n, ir_node *in[])
547 {
548 	be_return_attr_t *a;
549 	ir_node *res;
550 
551 	res = new_ir_node(dbg, irg, block, op_be_Return, mode_X, n, in);
552 	init_node_attr(res, n, 1);
553 	be_set_constr_out(res, 0, arch_no_register_req);
554 
555 	a = (be_return_attr_t*)get_irn_generic_attr(res);
556 	a->num_ret_vals       = n_res;
557 	a->pop                = pop;
558 	a->emit_pop           = 0;
559 	a->base.exc.pin_state = op_pin_state_pinned;
560 
561 	return res;
562 }
563 
be_Return_get_n_rets(const ir_node * ret)564 int be_Return_get_n_rets(const ir_node *ret)
565 {
566 	const be_return_attr_t *a = (const be_return_attr_t*)get_irn_generic_attr_const(ret);
567 	return a->num_ret_vals;
568 }
569 
be_Return_get_pop(const ir_node * ret)570 unsigned be_Return_get_pop(const ir_node *ret)
571 {
572 	const be_return_attr_t *a = (const be_return_attr_t*)get_irn_generic_attr_const(ret);
573 	return a->pop;
574 }
575 
be_Return_get_emit_pop(const ir_node * ret)576 int be_Return_get_emit_pop(const ir_node *ret)
577 {
578 	const be_return_attr_t *a = (const be_return_attr_t*)get_irn_generic_attr_const(ret);
579 	return a->emit_pop;
580 }
581 
be_Return_set_emit_pop(ir_node * ret,int emit_pop)582 void be_Return_set_emit_pop(ir_node *ret, int emit_pop)
583 {
584 	be_return_attr_t *a = (be_return_attr_t*)get_irn_generic_attr(ret);
585 	a->emit_pop = emit_pop;
586 }
587 
be_new_IncSP(const arch_register_t * sp,ir_node * bl,ir_node * old_sp,int offset,int align)588 ir_node *be_new_IncSP(const arch_register_t *sp, ir_node *bl,
589                       ir_node *old_sp, int offset, int align)
590 {
591 	be_incsp_attr_t *a;
592 	ir_node *irn;
593 	ir_node *in[1];
594 	ir_graph *irg = get_Block_irg(bl);
595 
596 	in[0]     = old_sp;
597 	irn       = new_ir_node(NULL, irg, bl, op_be_IncSP, sp->reg_class->mode,
598 	                        ARRAY_SIZE(in), in);
599 	init_node_attr(irn, 1, 1);
600 	a                     = (be_incsp_attr_t*)get_irn_generic_attr(irn);
601 	a->offset             = offset;
602 	a->align              = align;
603 	a->base.exc.pin_state = op_pin_state_pinned;
604 
605 	/* Set output constraint to stack register. */
606 	be_node_set_reg_class_in(irn, 0, sp->reg_class);
607 	be_set_constr_single_reg_out(irn, 0, sp, arch_register_req_type_produces_sp);
608 
609 	return irn;
610 }
611 
be_new_AddSP(const arch_register_t * sp,ir_node * bl,ir_node * old_sp,ir_node * sz)612 ir_node *be_new_AddSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp,
613 		ir_node *sz)
614 {
615 	ir_node *irn;
616 	ir_node *in[n_be_AddSP_last];
617 	ir_graph *irg;
618 	be_node_attr_t *attr;
619 
620 	in[n_be_AddSP_old_sp] = old_sp;
621 	in[n_be_AddSP_size]   = sz;
622 
623 	irg = get_Block_irg(bl);
624 	irn = new_ir_node(NULL, irg, bl, op_be_AddSP, mode_T, n_be_AddSP_last, in);
625 	init_node_attr(irn, n_be_AddSP_last, pn_be_AddSP_last);
626 	attr = (be_node_attr_t*) get_irn_generic_attr(irn);
627 	attr->exc.pin_state = op_pin_state_pinned;
628 
629 	/* Set output constraint to stack register. */
630 	be_set_constr_single_reg_in(irn, n_be_AddSP_old_sp, sp,
631 	                            arch_register_req_type_none);
632 	be_node_set_reg_class_in(irn, n_be_AddSP_size, sp->reg_class);
633 	be_set_constr_single_reg_out(irn, pn_be_AddSP_sp, sp,
634 	                             arch_register_req_type_produces_sp);
635 
636 	return irn;
637 }
638 
be_new_SubSP(const arch_register_t * sp,ir_node * bl,ir_node * old_sp,ir_node * sz)639 ir_node *be_new_SubSP(const arch_register_t *sp, ir_node *bl, ir_node *old_sp, ir_node *sz)
640 {
641 	ir_node *irn;
642 	ir_node *in[n_be_SubSP_last];
643 	ir_graph *irg;
644 	be_node_attr_t *attr;
645 
646 	in[n_be_SubSP_old_sp] = old_sp;
647 	in[n_be_SubSP_size]   = sz;
648 
649 	irg = get_Block_irg(bl);
650 	irn = new_ir_node(NULL, irg, bl, op_be_SubSP, mode_T, n_be_SubSP_last, in);
651 	init_node_attr(irn, n_be_SubSP_last, pn_be_SubSP_last);
652 	attr = (be_node_attr_t*) get_irn_generic_attr(irn);
653 	attr->exc.pin_state = op_pin_state_pinned;
654 
655 	/* Set output constraint to stack register. */
656 	be_set_constr_single_reg_in(irn, n_be_SubSP_old_sp, sp,
657 	                            arch_register_req_type_none);
658 	be_node_set_reg_class_in(irn, n_be_SubSP_size, sp->reg_class);
659 	be_set_constr_single_reg_out(irn, pn_be_SubSP_sp, sp, arch_register_req_type_produces_sp);
660 
661 	return irn;
662 }
663 
be_new_Start(dbg_info * dbgi,ir_node * bl,int n_outs)664 ir_node *be_new_Start(dbg_info *dbgi, ir_node *bl, int n_outs)
665 {
666 	ir_node *res;
667 	ir_graph *irg = get_Block_irg(bl);
668 	be_node_attr_t *attr;
669 
670 	res = new_ir_node(dbgi, irg, bl, op_be_Start, mode_T, 0, NULL);
671 	init_node_attr(res, 0, n_outs);
672 	attr = (be_node_attr_t*) get_irn_generic_attr(res);
673 	attr->exc.pin_state = op_pin_state_pinned;
674 
675 	return res;
676 }
677 
be_new_FrameAddr(const arch_register_class_t * cls_frame,ir_node * bl,ir_node * frame,ir_entity * ent)678 ir_node *be_new_FrameAddr(const arch_register_class_t *cls_frame, ir_node *bl, ir_node *frame, ir_entity *ent)
679 {
680 	be_frame_attr_t *a;
681 	ir_node *irn;
682 	ir_node *in[1];
683 	ir_graph *irg = get_Block_irg(bl);
684 
685 	in[0]  = frame;
686 	irn    = new_ir_node(NULL, irg, bl, op_be_FrameAddr, get_irn_mode(frame), 1, in);
687 	init_node_attr(irn, 1, 1);
688 	a                     = (be_frame_attr_t*)get_irn_generic_attr(irn);
689 	a->ent                = ent;
690 	a->offset             = 0;
691 	a->base.exc.pin_state = op_pin_state_floats;
692 	be_node_set_reg_class_in(irn, 0, cls_frame);
693 	be_node_set_reg_class_out(irn, 0, cls_frame);
694 
695 	return optimize_node(irn);
696 }
697 
be_get_FrameAddr_frame(const ir_node * node)698 ir_node *be_get_FrameAddr_frame(const ir_node *node)
699 {
700 	assert(be_is_FrameAddr(node));
701 	return get_irn_n(node, n_be_FrameAddr_ptr);
702 }
703 
be_get_FrameAddr_entity(const ir_node * node)704 ir_entity *be_get_FrameAddr_entity(const ir_node *node)
705 {
706 	const be_frame_attr_t *attr = (const be_frame_attr_t*)get_irn_generic_attr_const(node);
707 	return attr->ent;
708 }
709 
be_new_CopyKeep(ir_node * bl,ir_node * src,int n,ir_node * in_keep[])710 ir_node *be_new_CopyKeep(ir_node *bl, ir_node *src, int n, ir_node *in_keep[])
711 {
712 	ir_node  *irn;
713 	ir_node **in = ALLOCAN(ir_node*, n + 1);
714 	ir_graph *irg = get_Block_irg(bl);
715 	const arch_register_req_t   *req  = arch_get_irn_register_req(src);
716 	const arch_register_class_t *cls  = req->cls;
717 	ir_mode                     *mode = get_irn_mode(src);
718 	be_node_attr_t *attr;
719 
720 	in[0] = src;
721 	memcpy(&in[1], in_keep, n * sizeof(in[0]));
722 	irn   = new_ir_node(NULL, irg, bl, op_be_CopyKeep, mode, n + 1, in);
723 	init_node_attr(irn, n + 1, 1);
724 	attr = (be_node_attr_t*) get_irn_generic_attr(irn);
725 	attr->exc.pin_state = op_pin_state_floats;
726 	be_node_set_reg_class_in(irn, 0, cls);
727 	be_node_set_reg_class_out(irn, 0, cls);
728 	for (int i = 0; i < n; ++i) {
729 		ir_node *pred = in_keep[i];
730 		const arch_register_req_t *req = arch_get_irn_register_req(pred);
731 		req = req->cls != NULL ? req->cls->class_req : arch_no_register_req;
732 		be_set_constr_in(irn, i+1, req);
733 	}
734 
735 	return irn;
736 }
737 
be_new_CopyKeep_single(ir_node * bl,ir_node * src,ir_node * keep)738 ir_node *be_new_CopyKeep_single(ir_node *bl, ir_node *src, ir_node *keep)
739 {
740 	return be_new_CopyKeep(bl, src, 1, &keep);
741 }
742 
be_get_CopyKeep_op(const ir_node * cpy)743 ir_node *be_get_CopyKeep_op(const ir_node *cpy)
744 {
745 	return get_irn_n(cpy, n_be_CopyKeep_op);
746 }
747 
be_set_CopyKeep_op(ir_node * cpy,ir_node * op)748 void be_set_CopyKeep_op(ir_node *cpy, ir_node *op)
749 {
750 	set_irn_n(cpy, n_be_CopyKeep_op, op);
751 }
752 
be_has_frame_entity(const ir_node * irn)753 static bool be_has_frame_entity(const ir_node *irn)
754 {
755 	switch (get_irn_opcode(irn)) {
756 	case beo_Spill:
757 	case beo_Reload:
758 	case beo_FrameAddr:
759 		return true;
760 	default:
761 		return false;
762 	}
763 }
764 
be_get_frame_entity(const ir_node * irn)765 ir_entity *be_get_frame_entity(const ir_node *irn)
766 {
767 	if (be_has_frame_entity(irn)) {
768 		const be_frame_attr_t *a = (const be_frame_attr_t*)get_irn_generic_attr_const(irn);
769 		return a->ent;
770 	}
771 	return NULL;
772 }
773 
be_get_frame_offset(const ir_node * irn)774 int be_get_frame_offset(const ir_node *irn)
775 {
776 	assert(is_be_node(irn));
777 	if (be_has_frame_entity(irn)) {
778 		const be_frame_attr_t *a = (const be_frame_attr_t*)get_irn_generic_attr_const(irn);
779 		return a->offset;
780 	}
781 	return 0;
782 }
783 
be_set_MemPerm_in_entity(const ir_node * irn,int n,ir_entity * ent)784 void be_set_MemPerm_in_entity(const ir_node *irn, int n, ir_entity *ent)
785 {
786 	const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn);
787 
788 	assert(be_is_MemPerm(irn));
789 	assert(n < be_get_MemPerm_entity_arity(irn));
790 
791 	attr->in_entities[n] = ent;
792 }
793 
be_get_MemPerm_in_entity(const ir_node * irn,int n)794 ir_entity* be_get_MemPerm_in_entity(const ir_node* irn, int n)
795 {
796 	const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn);
797 
798 	assert(be_is_MemPerm(irn));
799 	assert(n < be_get_MemPerm_entity_arity(irn));
800 
801 	return attr->in_entities[n];
802 }
803 
be_set_MemPerm_out_entity(const ir_node * irn,int n,ir_entity * ent)804 void be_set_MemPerm_out_entity(const ir_node *irn, int n, ir_entity *ent)
805 {
806 	const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn);
807 
808 	assert(be_is_MemPerm(irn));
809 	assert(n < be_get_MemPerm_entity_arity(irn));
810 
811 	attr->out_entities[n] = ent;
812 }
813 
be_get_MemPerm_out_entity(const ir_node * irn,int n)814 ir_entity* be_get_MemPerm_out_entity(const ir_node* irn, int n)
815 {
816 	const be_memperm_attr_t *attr = (const be_memperm_attr_t*)get_irn_generic_attr_const(irn);
817 
818 	assert(be_is_MemPerm(irn));
819 	assert(n < be_get_MemPerm_entity_arity(irn));
820 
821 	return attr->out_entities[n];
822 }
823 
be_get_MemPerm_entity_arity(const ir_node * irn)824 int be_get_MemPerm_entity_arity(const ir_node *irn)
825 {
826 	return get_irn_arity(irn) - 1;
827 }
828 
be_create_reg_req(struct obstack * obst,const arch_register_t * reg,arch_register_req_type_t additional_types)829 const arch_register_req_t *be_create_reg_req(struct obstack *obst,
830 		const arch_register_t *reg, arch_register_req_type_t additional_types)
831 {
832 	arch_register_req_t         *req = OALLOC(obst, arch_register_req_t);
833 	const arch_register_class_t *cls = reg->reg_class;
834 	unsigned                    *limited_bitset;
835 
836 	limited_bitset = rbitset_obstack_alloc(obst, arch_register_class_n_regs(cls));
837 	rbitset_set(limited_bitset, reg->index);
838 
839 	req->type    = arch_register_req_type_limited | additional_types;
840 	req->cls     = cls;
841 	req->limited = limited_bitset;
842 	req->width   = 1;
843 	return req;
844 }
845 
be_set_constr_single_reg_in(ir_node * node,int pos,const arch_register_t * reg,arch_register_req_type_t additional_types)846 void be_set_constr_single_reg_in(ir_node *node, int pos,
847 		const arch_register_t *reg, arch_register_req_type_t additional_types)
848 {
849 	const arch_register_req_t *req;
850 
851 	if (additional_types == 0) {
852 		req = reg->single_req;
853 	} else {
854 		ir_graph       *irg  = get_irn_irg(node);
855 		struct obstack *obst = be_get_be_obst(irg);
856 		req = be_create_reg_req(obst, reg, additional_types);
857 	}
858 	be_set_constr_in(node, pos, req);
859 }
860 
be_set_constr_single_reg_out(ir_node * node,int pos,const arch_register_t * reg,arch_register_req_type_t additional_types)861 void be_set_constr_single_reg_out(ir_node *node, int pos,
862 		const arch_register_t *reg, arch_register_req_type_t additional_types)
863 {
864 	ir_graph                  *irg  = get_irn_irg(node);
865 	be_irg_t                  *birg = be_birg_from_irg(irg);
866 	const arch_register_req_t *req;
867 
868 	/* if we have an ignore register, add ignore flag and just assign it */
869 	if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
870 		additional_types |= arch_register_req_type_ignore;
871 	}
872 
873 	if (additional_types == 0) {
874 		req = reg->single_req;
875 	} else {
876 		struct obstack *obst = be_get_be_obst(irg);
877 		req = be_create_reg_req(obst, reg, additional_types);
878 	}
879 
880 	arch_set_irn_register_out(node, pos, reg);
881 	be_set_constr_out(node, pos, req);
882 }
883 
be_node_set_reg_class_in(ir_node * irn,int pos,const arch_register_class_t * cls)884 void be_node_set_reg_class_in(ir_node *irn, int pos,
885                               const arch_register_class_t *cls)
886 {
887 	be_set_constr_in(irn, pos, cls->class_req);
888 }
889 
be_node_set_reg_class_out(ir_node * irn,int pos,const arch_register_class_t * cls)890 void be_node_set_reg_class_out(ir_node *irn, int pos,
891                                const arch_register_class_t *cls)
892 {
893 	be_set_constr_out(irn, pos, cls->class_req);
894 }
895 
be_get_IncSP_pred(ir_node * irn)896 ir_node *be_get_IncSP_pred(ir_node *irn)
897 {
898 	assert(be_is_IncSP(irn));
899 	return get_irn_n(irn, 0);
900 }
901 
be_set_IncSP_pred(ir_node * incsp,ir_node * pred)902 void be_set_IncSP_pred(ir_node *incsp, ir_node *pred)
903 {
904 	assert(be_is_IncSP(incsp));
905 	set_irn_n(incsp, 0, pred);
906 }
907 
be_set_IncSP_offset(ir_node * irn,int offset)908 void be_set_IncSP_offset(ir_node *irn, int offset)
909 {
910 	be_incsp_attr_t *a = (be_incsp_attr_t*)get_irn_generic_attr(irn);
911 	assert(be_is_IncSP(irn));
912 	a->offset = offset;
913 }
914 
be_get_IncSP_offset(const ir_node * irn)915 int be_get_IncSP_offset(const ir_node *irn)
916 {
917 	const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn);
918 	assert(be_is_IncSP(irn));
919 	return a->offset;
920 }
921 
be_get_IncSP_align(const ir_node * irn)922 int be_get_IncSP_align(const ir_node *irn)
923 {
924 	const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn);
925 	assert(be_is_IncSP(irn));
926 	return a->align;
927 }
928 
be_node_get_frame_entity(const ir_node * irn)929 static ir_entity *be_node_get_frame_entity(const ir_node *irn)
930 {
931 	return be_get_frame_entity(irn);
932 }
933 
be_node_set_frame_entity(ir_node * irn,ir_entity * ent)934 void be_node_set_frame_entity(ir_node *irn, ir_entity *ent)
935 {
936 	be_frame_attr_t *a;
937 
938 	assert(be_has_frame_entity(irn));
939 
940 	a = (be_frame_attr_t*)get_irn_generic_attr(irn);
941 	a->ent = ent;
942 }
943 
be_node_set_frame_offset(ir_node * irn,int offset)944 static void be_node_set_frame_offset(ir_node *irn, int offset)
945 {
946 	be_frame_attr_t *a;
947 
948 	if (!be_has_frame_entity(irn))
949 		return;
950 
951 	a = (be_frame_attr_t*)get_irn_generic_attr(irn);
952 	a->offset = offset;
953 }
954 
be_node_get_sp_bias(const ir_node * irn)955 static int be_node_get_sp_bias(const ir_node *irn)
956 {
957 	if (be_is_IncSP(irn))
958 		return be_get_IncSP_offset(irn);
959 	if (be_is_Call(irn))
960 		return -(int)be_Call_get_pop(irn);
961 
962 	return 0;
963 }
964 
965 
966 
967 /* for be nodes */
968 static const arch_irn_ops_t be_node_irn_ops = {
969 	be_node_get_frame_entity,
970 	be_node_set_frame_offset,
971 	be_node_get_sp_bias,
972 	NULL,    /* get_inverse             */
973 	NULL,    /* get_op_estimated_cost   */
974 	NULL,    /* possible_memory_operand */
975 	NULL,    /* perform_memory_operand  */
976 };
977 
get_start_reg_index(ir_graph * irg,const arch_register_t * reg)978 static int get_start_reg_index(ir_graph *irg, const arch_register_t *reg)
979 {
980 	ir_node *start  = get_irg_start(irg);
981 	unsigned n_outs = arch_get_irn_n_outs(start);
982 	int      i;
983 
984 	/* do a naive linear search... */
985 	for (i = 0; i < (int)n_outs; ++i) {
986 		const arch_register_req_t *out_req
987 			= arch_get_irn_register_req_out(start, i);
988 		if (! (out_req->type & arch_register_req_type_limited))
989 			continue;
990 		if (out_req->cls != reg->reg_class)
991 			continue;
992 		if (!rbitset_is_set(out_req->limited, reg->index))
993 			continue;
994 		return i;
995 	}
996 	panic("Tried querying undefined register '%s' at Start", reg->name);
997 }
998 
be_get_initial_reg_value(ir_graph * irg,const arch_register_t * reg)999 ir_node *be_get_initial_reg_value(ir_graph *irg, const arch_register_t *reg)
1000 {
1001 	int      i     = get_start_reg_index(irg, reg);
1002 	ir_node *start = get_irg_start(irg);
1003 	ir_mode *mode  = arch_register_class_mode(reg->reg_class);
1004 
1005 	foreach_out_edge(start, edge) {
1006 		ir_node *proj = get_edge_src_irn(edge);
1007 		if (!is_Proj(proj)) // maybe End/Anchor
1008 			continue;
1009 		if (get_Proj_proj(proj) == i) {
1010 			return proj;
1011 		}
1012 	}
1013 	return new_r_Proj(start, mode, i);
1014 }
1015 
be_find_return_reg_input(ir_node * ret,const arch_register_t * reg)1016 int be_find_return_reg_input(ir_node *ret, const arch_register_t *reg)
1017 {
1018 	int arity = get_irn_arity(ret);
1019 	int i;
1020 	/* do a naive linear search... */
1021 	for (i = 0; i < arity; ++i) {
1022 		const arch_register_req_t *req = arch_get_irn_register_req_in(ret, i);
1023 		if (! (req->type & arch_register_req_type_limited))
1024 			continue;
1025 		if (req->cls != reg->reg_class)
1026 			continue;
1027 		if (!rbitset_is_set(req->limited, reg->index))
1028 			continue;
1029 		return i;
1030 	}
1031 	panic("Tried querying undefined register '%s' at Return", reg->name);
1032 }
1033 
dummy_get_frame_entity(const ir_node * node)1034 static ir_entity* dummy_get_frame_entity(const ir_node *node)
1035 {
1036 	(void) node;
1037 	return NULL;
1038 }
1039 
dummy_set_frame_offset(ir_node * node,int bias)1040 static void dummy_set_frame_offset(ir_node *node, int bias)
1041 {
1042 	(void) node;
1043 	(void) bias;
1044 	panic("should not be called");
1045 }
1046 
dummy_get_sp_bias(const ir_node * node)1047 static int dummy_get_sp_bias(const ir_node *node)
1048 {
1049 	(void) node;
1050 	return 0;
1051 }
1052 
1053 /* for "middleend" nodes */
1054 static const arch_irn_ops_t dummy_be_irn_ops = {
1055 	dummy_get_frame_entity,
1056 	dummy_set_frame_offset,
1057 	dummy_get_sp_bias,
1058 	NULL,      /* get_inverse           */
1059 	NULL,      /* get_op_estimated_cost */
1060 	NULL,      /* possible_memory_operand */
1061 	NULL,      /* perform_memory_operand */
1062 };
1063 
1064 
1065 
be_new_Phi(ir_node * block,int n_ins,ir_node ** ins,ir_mode * mode,const arch_register_req_t * req)1066 ir_node *be_new_Phi(ir_node *block, int n_ins, ir_node **ins, ir_mode *mode,
1067                     const arch_register_req_t *req)
1068 {
1069 	ir_graph       *irg  = get_irn_irg(block);
1070 	struct obstack *obst = be_get_be_obst(irg);
1071 	backend_info_t *info;
1072 	int             i;
1073 
1074 	ir_node *phi = new_ir_node(NULL, irg, block, op_Phi, mode, n_ins, ins);
1075 	phi->attr.phi.u.backedge = new_backedge_arr(irg->obst, n_ins);
1076 	info = be_get_info(phi);
1077 	info->out_infos = NEW_ARR_D(reg_out_info_t, obst, 1);
1078 	memset(info->out_infos, 0, 1 * sizeof(info->out_infos[0]));
1079 	info->in_reqs = OALLOCN(obst, const arch_register_req_t*, n_ins);
1080 
1081 	info->out_infos[0].req = req;
1082 	for (i = 0; i < n_ins; ++i) {
1083 		info->in_reqs[i] = req;
1084 	}
1085 	irn_verify_irg(phi, irg);
1086 	phi = optimize_node(phi);
1087 	return phi;
1088 }
1089 
be_set_phi_reg_req(ir_node * node,const arch_register_req_t * req)1090 void be_set_phi_reg_req(ir_node *node, const arch_register_req_t *req)
1091 {
1092 	int arity = get_irn_arity(node);
1093 	int i;
1094 
1095 	backend_info_t *info = be_get_info(node);
1096 	info->out_infos[0].req = req;
1097 	for (i = 0; i < arity; ++i) {
1098 		info->in_reqs[i] = req;
1099 	}
1100 
1101 	assert(mode_is_datab(get_irn_mode(node)));
1102 }
1103 
be_dump_phi_reg_reqs(FILE * F,const ir_node * node,dump_reason_t reason)1104 void be_dump_phi_reg_reqs(FILE *F, const ir_node *node, dump_reason_t reason)
1105 {
1106 	switch (reason) {
1107 	case dump_node_opcode_txt:
1108 		fputs(get_op_name(get_irn_op(node)), F);
1109 		break;
1110 	case dump_node_mode_txt:
1111 		fprintf(F, "%s", get_mode_name(get_irn_mode(node)));
1112 		break;
1113 	case dump_node_nodeattr_txt:
1114 		break;
1115 	case dump_node_info_txt:
1116 	{
1117 		backend_info_t *info = be_get_info(node);
1118 		if (info != NULL && info->out_infos[0].req != NULL) {
1119 			arch_dump_reqs_and_registers(F, node);
1120 		}
1121 		break;
1122 	}
1123 
1124 	default:
1125 		break;
1126 	}
1127 }
1128 
1129 static const arch_irn_ops_t phi_irn_ops = {
1130 	dummy_get_frame_entity,
1131 	dummy_set_frame_offset,
1132 	dummy_get_sp_bias,
1133 	NULL,    /* get_inverse             */
1134 	NULL,    /* get_op_estimated_cost   */
1135 	NULL,    /* possible_memory_operand */
1136 	NULL,    /* perform_memory_operand  */
1137 };
1138 
1139 
1140 
1141 /**
1142  * ir_op-Operation: dump a be node to file
1143  */
dump_node(FILE * f,const ir_node * irn,dump_reason_t reason)1144 static void dump_node(FILE *f, const ir_node *irn, dump_reason_t reason)
1145 {
1146 	assert(is_be_node(irn));
1147 
1148 	switch (reason) {
1149 		case dump_node_opcode_txt:
1150 			fputs(get_op_name(get_irn_op(irn)), f);
1151 			break;
1152 		case dump_node_mode_txt:
1153 			if (be_is_Copy(irn) || be_is_CopyKeep(irn)) {
1154 				fprintf(f, "%s", get_mode_name(get_irn_mode(irn)));
1155 			}
1156 			break;
1157 		case dump_node_nodeattr_txt:
1158 			if (be_is_Call(irn)) {
1159 				const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(irn);
1160 				if (a->ent)
1161 					fprintf(f, " [%s] ", get_entity_name(a->ent));
1162 			}
1163 			if (be_is_IncSP(irn)) {
1164 				const be_incsp_attr_t *attr = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn);
1165 				fprintf(f, " [%d] ", attr->offset);
1166 			}
1167 			break;
1168 		case dump_node_info_txt:
1169 			arch_dump_reqs_and_registers(f, irn);
1170 
1171 			if (be_has_frame_entity(irn)) {
1172 				const be_frame_attr_t *a = (const be_frame_attr_t*)get_irn_generic_attr_const(irn);
1173 				if (a->ent) {
1174 					unsigned size = get_type_size_bytes(get_entity_type(a->ent));
1175 					ir_fprintf(f, "frame entity: %+F, offset 0x%x (%d), size 0x%x (%d) bytes\n",
1176 					  a->ent, a->offset, a->offset, size, size);
1177 				}
1178 
1179 			}
1180 
1181 			switch (get_irn_opcode(irn)) {
1182 			case beo_IncSP: {
1183 				const be_incsp_attr_t *a = (const be_incsp_attr_t*)get_irn_generic_attr_const(irn);
1184 				fprintf(f, "align: %d\n", a->align);
1185 				fprintf(f, "offset: %d\n", a->offset);
1186 				break;
1187 			}
1188 			case beo_Call: {
1189 				const be_call_attr_t *a = (const be_call_attr_t*)get_irn_generic_attr_const(irn);
1190 
1191 				if (a->ent)
1192 					fprintf(f, "\ncalling: %s\n", get_entity_name(a->ent));
1193 				break;
1194 			}
1195 			case beo_MemPerm: {
1196 				int i;
1197 				for (i = 0; i < be_get_MemPerm_entity_arity(irn); ++i) {
1198 					ir_entity *in, *out;
1199 					in = be_get_MemPerm_in_entity(irn, i);
1200 					out = be_get_MemPerm_out_entity(irn, i);
1201 					if (in) {
1202 						fprintf(f, "\nin[%d]: %s\n", i, get_entity_name(in));
1203 					}
1204 					if (out) {
1205 						fprintf(f, "\nout[%d]: %s\n", i, get_entity_name(out));
1206 					}
1207 				}
1208 				break;
1209 			}
1210 
1211 			default:
1212 				break;
1213 			}
1214 	}
1215 }
1216 
1217 /**
1218  * ir_op-Operation:
1219  * Copies the backend specific attributes from old node to new node.
1220  */
copy_attr(ir_graph * irg,const ir_node * old_node,ir_node * new_node)1221 static void copy_attr(ir_graph *irg, const ir_node *old_node, ir_node *new_node)
1222 {
1223 	const void     *old_attr = get_irn_generic_attr_const(old_node);
1224 	void           *new_attr = get_irn_generic_attr(new_node);
1225 	struct obstack *obst     = be_get_be_obst(irg);
1226 	backend_info_t *old_info = be_get_info(old_node);
1227 	backend_info_t *new_info = be_get_info(new_node);
1228 
1229 	assert(is_be_node(old_node));
1230 	assert(is_be_node(new_node));
1231 
1232 	memcpy(new_attr, old_attr, get_op_attr_size(get_irn_op(old_node)));
1233 
1234 	new_info->flags = old_info->flags;
1235 	if (old_info->out_infos != NULL) {
1236 		size_t n_outs = ARR_LEN(old_info->out_infos);
1237 		/* need dyanmic out infos? */
1238 		if (be_is_Perm(new_node)) {
1239 			new_info->out_infos = NEW_ARR_F(reg_out_info_t, n_outs);
1240 		} else {
1241 			new_info->out_infos = NEW_ARR_D(reg_out_info_t, obst, n_outs);
1242 		}
1243 		memcpy(new_info->out_infos, old_info->out_infos,
1244 			   n_outs * sizeof(new_info->out_infos[0]));
1245 	} else {
1246 		new_info->out_infos = NULL;
1247 	}
1248 
1249 	/* input infos */
1250 	if (old_info->in_reqs != NULL) {
1251 		unsigned n_ins = get_irn_arity(old_node);
1252 		/* need dynamic in infos? */
1253 		if (get_irn_op(old_node)->opar == oparity_dynamic) {
1254 			new_info->in_reqs = NEW_ARR_F(const arch_register_req_t*, n_ins);
1255 		} else {
1256 			new_info->in_reqs = OALLOCN(obst,const arch_register_req_t*, n_ins);
1257 		}
1258 		memcpy(new_info->in_reqs, old_info->in_reqs,
1259 		       n_ins * sizeof(new_info->in_reqs[0]));
1260 	} else {
1261 		new_info->in_reqs = NULL;
1262 	}
1263 }
1264 
is_be_node(const ir_node * irn)1265 int is_be_node(const ir_node *irn)
1266 {
1267 	return get_op_ops(get_irn_op(irn))->be_ops == &be_node_irn_ops;
1268 }
1269 
new_be_op(unsigned code,const char * name,op_pin_state p,irop_flags flags,op_arity opar,size_t attr_size)1270 static ir_op *new_be_op(unsigned code, const char *name, op_pin_state p,
1271                         irop_flags flags, op_arity opar, size_t attr_size)
1272 {
1273 	ir_op *res = new_ir_op(code, name, p, flags, opar, 0, attr_size);
1274 	res->ops.dump_node = dump_node;
1275 	res->ops.copy_attr = copy_attr;
1276 	res->ops.be_ops    = &be_node_irn_ops;
1277 	return res;
1278 }
1279 
be_init_op(void)1280 void be_init_op(void)
1281 {
1282 	unsigned opc;
1283 
1284 	assert(op_be_Spill == NULL);
1285 
1286 	/* Acquire all needed opcodes. */
1287 	op_be_Spill     = new_be_op(beo_Spill,     "be_Spill",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_frame_attr_t));
1288 	op_be_Reload    = new_be_op(beo_Reload,    "be_Reload",    op_pin_state_exc_pinned, irop_flag_none,                          oparity_zero,     sizeof(be_frame_attr_t));
1289 	op_be_Perm      = new_be_op(beo_Perm,      "be_Perm",      op_pin_state_exc_pinned, irop_flag_none,                          oparity_variable, sizeof(be_node_attr_t));
1290 	op_be_MemPerm   = new_be_op(beo_MemPerm,   "be_MemPerm",   op_pin_state_exc_pinned, irop_flag_none,                          oparity_variable, sizeof(be_memperm_attr_t));
1291 	op_be_Copy      = new_be_op(beo_Copy,      "be_Copy",      op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_node_attr_t));
1292 	op_be_Keep      = new_be_op(beo_Keep,      "be_Keep",      op_pin_state_exc_pinned, irop_flag_keep,                          oparity_dynamic,  sizeof(be_node_attr_t));
1293 	op_be_CopyKeep  = new_be_op(beo_CopyKeep,  "be_CopyKeep",  op_pin_state_exc_pinned, irop_flag_keep,                          oparity_variable, sizeof(be_node_attr_t));
1294 	op_be_Call      = new_be_op(beo_Call,      "be_Call",      op_pin_state_exc_pinned, irop_flag_fragile|irop_flag_uses_memory, oparity_variable, sizeof(be_call_attr_t));
1295 	ir_op_set_memory_index(op_be_Call, n_be_Call_mem);
1296 	ir_op_set_fragile_indices(op_be_Call, pn_be_Call_X_regular, pn_be_Call_X_except);
1297 	op_be_Return    = new_be_op(beo_Return,    "be_Return",    op_pin_state_exc_pinned, irop_flag_cfopcode,                      oparity_variable, sizeof(be_return_attr_t));
1298 	op_be_AddSP     = new_be_op(beo_AddSP,     "be_AddSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_node_attr_t));
1299 	op_be_SubSP     = new_be_op(beo_SubSP,     "be_SubSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_node_attr_t));
1300 	op_be_IncSP     = new_be_op(beo_IncSP,     "be_IncSP",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_incsp_attr_t));
1301 	op_be_Start     = new_be_op(beo_Start,     "be_Start",     op_pin_state_exc_pinned, irop_flag_none,                          oparity_zero,     sizeof(be_node_attr_t));
1302 	op_be_FrameAddr = new_be_op(beo_FrameAddr, "be_FrameAddr", op_pin_state_exc_pinned, irop_flag_none,                          oparity_unary,    sizeof(be_frame_attr_t));
1303 
1304 	op_be_Spill->ops.node_cmp_attr     = FrameAddr_cmp_attr;
1305 	op_be_Reload->ops.node_cmp_attr    = FrameAddr_cmp_attr;
1306 	op_be_Perm->ops.node_cmp_attr      = be_nodes_equal;
1307 	op_be_MemPerm->ops.node_cmp_attr   = be_nodes_equal;
1308 	op_be_Copy->ops.node_cmp_attr      = be_nodes_equal;
1309 	op_be_Keep->ops.node_cmp_attr      = be_nodes_equal;
1310 	op_be_CopyKeep->ops.node_cmp_attr  = be_nodes_equal;
1311 	op_be_Call->ops.node_cmp_attr      = Call_cmp_attr;
1312 	op_be_Return->ops.node_cmp_attr    = Return_cmp_attr;
1313 	op_be_AddSP->ops.node_cmp_attr     = be_nodes_equal;
1314 	op_be_SubSP->ops.node_cmp_attr     = be_nodes_equal;
1315 	op_be_IncSP->ops.node_cmp_attr     = IncSP_cmp_attr;
1316 	op_be_Start->ops.node_cmp_attr     = be_nodes_equal;
1317 	op_be_FrameAddr->ops.node_cmp_attr = FrameAddr_cmp_attr;
1318 
1319 	/* attach out dummy_ops to middle end nodes */
1320 	for (opc = iro_First; opc <= iro_Last; ++opc) {
1321 		ir_op *op = ir_get_opcode(opc);
1322 		assert(op->ops.be_ops == NULL);
1323 		op->ops.be_ops = &dummy_be_irn_ops;
1324 	}
1325 
1326 	op_Phi->ops.be_ops = &phi_irn_ops;
1327 }
1328 
be_finish_op(void)1329 void be_finish_op(void)
1330 {
1331 	free_ir_op(op_be_Spill);     op_be_Spill     = NULL;
1332 	free_ir_op(op_be_Reload);    op_be_Reload    = NULL;
1333 	free_ir_op(op_be_Perm);      op_be_Perm      = NULL;
1334 	free_ir_op(op_be_MemPerm);   op_be_MemPerm   = NULL;
1335 	free_ir_op(op_be_Copy);      op_be_Copy      = NULL;
1336 	free_ir_op(op_be_Keep);      op_be_Keep      = NULL;
1337 	free_ir_op(op_be_CopyKeep);  op_be_CopyKeep  = NULL;
1338 	free_ir_op(op_be_Call);      op_be_Call      = NULL;
1339 	free_ir_op(op_be_Return);    op_be_Return    = NULL;
1340 	free_ir_op(op_be_IncSP);     op_be_IncSP     = NULL;
1341 	free_ir_op(op_be_AddSP);     op_be_AddSP     = NULL;
1342 	free_ir_op(op_be_SubSP);     op_be_SubSP     = NULL;
1343 	free_ir_op(op_be_Start);     op_be_Start     = NULL;
1344 	free_ir_op(op_be_FrameAddr); op_be_FrameAddr = NULL;
1345 }
1346