1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief       Backend ABI implementation.
23  * @author      Sebastian Hack, Michael Beck
24  */
25 #include "config.h"
26 
27 #include "obst.h"
28 
29 #include "irgopt.h"
30 
31 #include "irgraph_t.h"
32 #include "irnode_t.h"
33 #include "ircons_t.h"
34 #include "iredges_t.h"
35 #include "irgmod.h"
36 #include "irgwalk.h"
37 #include "irprintf_t.h"
38 #include "irgopt.h"
39 #include "iropt_t.h"
40 #include "irtools.h"
41 #include "heights.h"
42 #include "pdeq.h"
43 #include "util.h"
44 #include "raw_bitset.h"
45 #include "error.h"
46 #include "pset_new.h"
47 
48 #include "be.h"
49 #include "beabi.h"
50 #include "bearch.h"
51 #include "benode.h"
52 #include "belive_t.h"
53 #include "besched.h"
54 #include "beirg.h"
55 #include "bessaconstr.h"
56 #include "bemodule.h"
57 #include "betranshlp.h"
58 
59 DEBUG_ONLY(static firm_dbg_module_t *dbg;)
60 
61 typedef struct be_abi_call_arg_t {
62 	unsigned is_res   : 1;  /**< 1: the call argument is a return value. 0: it's a call parameter. */
63 	unsigned in_reg   : 1;  /**< 1: this argument is transmitted in registers. */
64 	unsigned on_stack : 1;  /**< 1: this argument is transmitted on the stack. */
65 	unsigned callee   : 1;  /**< 1: someone called us. 0: We call another function */
66 
67 	int                    pos;
68 	const arch_register_t *reg;
69 	ir_entity             *stack_ent;
70 	ir_mode               *load_mode;
71 	unsigned               alignment;    /**< stack alignment */
72 	unsigned               space_before; /**< allocate space before */
73 	unsigned               space_after;  /**< allocate space after */
74 } be_abi_call_arg_t;
75 
76 struct be_abi_call_t {
77 	be_abi_call_flags_t          flags;  /**< Flags describing the ABI behavior on calls */
78 	int                          pop;    /**< number of bytes the stack frame is shrinked by the callee on return. */
79 	const be_abi_callbacks_t    *cb;
80 	ir_type                     *between_type;
81 	set                         *params;
82 	const arch_register_class_t *cls_addr; /**< register class of the call address */
83 };
84 
85 /**
86  * The ABI information for the current graph.
87  */
88 struct be_abi_irg_t {
89 	be_abi_call_t        *call;         /**< The ABI call information. */
90 
91 	ir_node              *init_sp;      /**< The node representing the stack pointer
92 	                                         at the start of the function. */
93 
94 	ir_node              *start;        /**< The be_Start params node. */
95 	pmap                 *regs;         /**< A map of all callee-save and ignore regs to
96 	                                         their Projs to the RegParams node. */
97 	int                  start_block_bias; /**< The stack bias at the end of the start block. */
98 
99 	pmap                 *keep_map;     /**< mapping blocks to keep nodes. */
100 
101 	ir_node              **calls;       /**< flexible array containing all be_Call nodes */
102 };
103 
104 static ir_heights_t *ir_heights;
105 
106 /** Flag: if set, try to omit the frame pointer in all routines. */
107 static int be_omit_fp = 1;
108 
be_abi_reg_map_get(pmap * map,const arch_register_t * reg)109 static ir_node *be_abi_reg_map_get(pmap *map, const arch_register_t *reg)
110 {
111 	return pmap_get(ir_node, map, reg);
112 }
113 
be_abi_reg_map_set(pmap * map,const arch_register_t * reg,ir_node * node)114 static void be_abi_reg_map_set(pmap *map, const arch_register_t* reg,
115                                ir_node *node)
116 {
117 	pmap_insert(map, reg, node);
118 }
119 
120 /**
121  * Check if the given register is callee save, ie. will be saved by the callee.
122  */
arch_register_is_callee_save(const arch_env_t * arch_env,const arch_register_t * reg)123 static bool arch_register_is_callee_save(
124 	const arch_env_t      *arch_env,
125 	const arch_register_t *reg)
126 {
127 	if (arch_env->impl->register_saved_by)
128 		return arch_env->impl->register_saved_by(reg, /*callee=*/1);
129 	return false;
130 }
131 
132 /**
133  * Check if the given register is caller save, ie. must be saved by the caller.
134  */
arch_register_is_caller_save(const arch_env_t * arch_env,const arch_register_t * reg)135 static bool arch_register_is_caller_save(
136 	const arch_env_t      *arch_env,
137 	const arch_register_t *reg)
138 {
139 	if (arch_env->impl->register_saved_by)
140 		return arch_env->impl->register_saved_by(reg, /*callee=*/0);
141 	return false;
142 }
143 
144 
145 
146 /*
147      _    ____ ___    ____      _ _ _                _
148     / \  | __ )_ _|  / ___|__ _| | | |__   __ _  ___| | _____
149    / _ \ |  _ \| |  | |   / _` | | | '_ \ / _` |/ __| |/ / __|
150   / ___ \| |_) | |  | |__| (_| | | | |_) | (_| | (__|   <\__ \
151  /_/   \_\____/___|  \____\__,_|_|_|_.__/ \__,_|\___|_|\_\___/
152 
153   These callbacks are used by the backend to set the parameters
154   for a specific call type.
155 */
156 
157 /**
158  * Set compare function: compares two ABI call object arguments.
159  */
cmp_call_arg(const void * a,const void * b,size_t n)160 static int cmp_call_arg(const void *a, const void *b, size_t n)
161 {
162 	const be_abi_call_arg_t *p = (const be_abi_call_arg_t*)a;
163 	const be_abi_call_arg_t *q = (const be_abi_call_arg_t*)b;
164 	(void) n;
165 	return !(p->is_res == q->is_res && p->pos == q->pos && p->callee == q->callee);
166 }
167 
168 /**
169  * Get  an ABI call object argument.
170  *
171  * @param call      the abi call
172  * @param is_res    true for call results, false for call arguments
173  * @param pos       position of the argument
174  * @param callee    context type - if we are callee or caller
175  */
get_call_arg(be_abi_call_t * call,int is_res,int pos,int callee)176 static be_abi_call_arg_t *get_call_arg(be_abi_call_t *call, int is_res, int pos, int callee)
177 {
178 	be_abi_call_arg_t arg;
179 	unsigned hash;
180 
181 	memset(&arg, 0, sizeof(arg));
182 	arg.is_res = is_res;
183 	arg.pos    = pos;
184 	arg.callee = callee;
185 
186 	hash = is_res * 128 + pos;
187 
188 	return set_find(be_abi_call_arg_t, call->params, &arg, sizeof(arg), hash);
189 }
190 
191 /**
192  * Set an ABI call object argument.
193  */
remember_call_arg(be_abi_call_arg_t * arg,be_abi_call_t * call,be_abi_context_t context)194 static void remember_call_arg(be_abi_call_arg_t *arg, be_abi_call_t *call, be_abi_context_t context)
195 {
196 	unsigned hash = arg->is_res * 128 + arg->pos;
197 	if (context & ABI_CONTEXT_CALLEE) {
198 		arg->callee = 1;
199 		(void)set_insert(be_abi_call_arg_t, call->params, arg, sizeof(*arg), hash);
200 	}
201 	if (context & ABI_CONTEXT_CALLER) {
202 		arg->callee = 0;
203 		(void)set_insert(be_abi_call_arg_t, call->params, arg, sizeof(*arg), hash);
204 	}
205 }
206 
207 /* Set the flags for a call. */
be_abi_call_set_flags(be_abi_call_t * call,be_abi_call_flags_t flags,const be_abi_callbacks_t * cb)208 void be_abi_call_set_flags(be_abi_call_t *call, be_abi_call_flags_t flags, const be_abi_callbacks_t *cb)
209 {
210 	call->flags = flags;
211 	call->cb    = cb;
212 }
213 
214 /* Sets the number of bytes the stackframe is shrinked by the callee on return */
be_abi_call_set_pop(be_abi_call_t * call,int pop)215 void be_abi_call_set_pop(be_abi_call_t *call, int pop)
216 {
217 	assert(pop >= 0);
218 	call->pop = pop;
219 }
220 
221 /* Set register class for call address */
be_abi_call_set_call_address_reg_class(be_abi_call_t * call,const arch_register_class_t * cls)222 void be_abi_call_set_call_address_reg_class(be_abi_call_t *call, const arch_register_class_t *cls)
223 {
224 	call->cls_addr = cls;
225 }
226 
227 
be_abi_call_param_stack(be_abi_call_t * call,int arg_pos,ir_mode * load_mode,unsigned alignment,unsigned space_before,unsigned space_after,be_abi_context_t context)228 void be_abi_call_param_stack(be_abi_call_t *call, int arg_pos,
229                              ir_mode *load_mode, unsigned alignment,
230                              unsigned space_before, unsigned space_after,
231                              be_abi_context_t context)
232 {
233 	be_abi_call_arg_t arg;
234 	memset(&arg, 0, sizeof(arg));
235 	assert(alignment > 0 && "Alignment must be greater than 0");
236 	arg.on_stack     = 1;
237 	arg.load_mode    = load_mode;
238 	arg.alignment    = alignment;
239 	arg.space_before = space_before;
240 	arg.space_after  = space_after;
241 	arg.is_res       = 0;
242 	arg.pos          = arg_pos;
243 
244 	remember_call_arg(&arg, call, context);
245 }
246 
be_abi_call_param_reg(be_abi_call_t * call,int arg_pos,const arch_register_t * reg,be_abi_context_t context)247 void be_abi_call_param_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
248 {
249 	be_abi_call_arg_t arg;
250 	memset(&arg, 0, sizeof(arg));
251 
252 	arg.in_reg = 1;
253 	arg.reg    = reg;
254 	arg.is_res = 0;
255 	arg.pos    = arg_pos;
256 
257 	remember_call_arg(&arg, call, context);
258 }
259 
be_abi_call_res_reg(be_abi_call_t * call,int arg_pos,const arch_register_t * reg,be_abi_context_t context)260 void be_abi_call_res_reg(be_abi_call_t *call, int arg_pos, const arch_register_t *reg, be_abi_context_t context)
261 {
262 	be_abi_call_arg_t arg;
263 	memset(&arg, 0, sizeof(arg));
264 
265 	arg.in_reg = 1;
266 	arg.reg    = reg;
267 	arg.is_res = 1;
268 	arg.pos    = arg_pos;
269 
270 	remember_call_arg(&arg, call, context);
271 }
272 
273 /* Get the flags of a ABI call object. */
be_abi_call_get_flags(const be_abi_call_t * call)274 be_abi_call_flags_t be_abi_call_get_flags(const be_abi_call_t *call)
275 {
276 	return call->flags;
277 }
278 
279 /**
280  * Constructor for a new ABI call object.
281  *
282  * @param cls_addr  register class of the call address
283  *
284  * @return the new ABI call object
285  */
be_abi_call_new(const arch_register_class_t * cls_addr)286 static be_abi_call_t *be_abi_call_new(const arch_register_class_t *cls_addr)
287 {
288 	be_abi_call_t *call = XMALLOCZ(be_abi_call_t);
289 
290 	call->flags.val  = 0;
291 	call->params     = new_set(cmp_call_arg, 16);
292 	call->cb         = NULL;
293 	call->cls_addr   = cls_addr;
294 
295 	call->flags.bits.try_omit_fp = be_omit_fp;
296 
297 	return call;
298 }
299 
300 /**
301  * Destructor for an ABI call object.
302  */
be_abi_call_free(be_abi_call_t * call)303 static void be_abi_call_free(be_abi_call_t *call)
304 {
305 	del_set(call->params);
306 	free(call);
307 }
308 
309 /**
310  * Initializes the frame layout from parts
311  *
312  * @param frame     the stack layout that will be initialized
313  * @param args      the stack argument layout type
314  * @param between   the between layout type
315  * @param locals    the method frame type
316  *
317  * @return the initialized stack layout
318  */
stack_frame_init(be_stack_layout_t * frame,ir_type * args,ir_type * between,ir_type * locals)319 static be_stack_layout_t *stack_frame_init(be_stack_layout_t *frame, ir_type *args,
320                                            ir_type *between, ir_type *locals)
321 {
322 	frame->arg_type       = args;
323 	frame->between_type   = between;
324 	frame->frame_type     = locals;
325 	frame->initial_offset = 0;
326 	frame->initial_bias   = 0;
327 	frame->order[1]       = between;
328 
329 	/* typical decreasing stack: locals have the
330 	 * lowest addresses, arguments the highest */
331 	frame->order[0] = locals;
332 	frame->order[2] = args;
333 	return frame;
334 }
335 
336 /*
337    ____      _ _
338   / ___|__ _| | |___
339  | |   / _` | | / __|
340  | |__| (_| | | \__ \
341   \____\__,_|_|_|___/
342 
343   Adjustment of the calls inside a graph.
344 
345 */
346 
347 /**
348  * Transform a call node into a be_Call node.
349  *
350  * @param env The ABI environment for the current irg.
351  * @param irn The call node.
352  * @param curr_sp The stack pointer node to use.
353  * @return The stack pointer after the call.
354  */
adjust_call(be_abi_irg_t * env,ir_node * irn,ir_node * curr_sp)355 static ir_node *adjust_call(be_abi_irg_t *env, ir_node *irn, ir_node *curr_sp)
356 {
357 	ir_graph *irg              = get_irn_irg(irn);
358 	const arch_env_t *arch_env = be_get_irg_arch_env(irg);
359 	ir_type *call_tp           = get_Call_type(irn);
360 	ir_node *call_ptr          = get_Call_ptr(irn);
361 	size_t   n_params          = get_method_n_params(call_tp);
362 	ir_node *curr_mem          = get_Call_mem(irn);
363 	ir_node *bl                = get_nodes_block(irn);
364 	int stack_size             = 0;
365 	const arch_register_t *sp  = arch_env->sp;
366 	be_abi_call_t *call        = be_abi_call_new(sp->reg_class);
367 	ir_mode *mach_mode         = sp->reg_class->mode;
368 	int n_res                  = get_method_n_ress(call_tp);
369 
370 	ir_node *res_proj  = NULL;
371 	int n_reg_params   = 0;
372 	int n_stack_params = 0;
373 	int n_ins;
374 
375 	const arch_register_t **states = NEW_ARR_F(const arch_register_t*, 0);
376 	const arch_register_t **destroyed_regs = NEW_ARR_F(const arch_register_t*, 0);
377 	ir_node                *low_call;
378 	ir_node               **in;
379 	ir_node               **res_projs;
380 	int                     n_reg_results = 0;
381 	int                    *reg_param_idxs;
382 	int                    *stack_param_idx;
383 	int                     i, n;
384 	int                     throws_exception;
385 	size_t                  s;
386 	size_t                  p;
387 	dbg_info               *dbgi;
388 
389 	/* Let the isa fill out the abi description for that call node. */
390 	arch_env_get_call_abi(arch_env, call_tp, call);
391 
392 	/* Insert code to put the stack arguments on the stack. */
393 	assert((size_t)get_Call_n_params(irn) == n_params);
394 	stack_param_idx = ALLOCAN(int, n_params);
395 	for (p = 0; p < n_params; ++p) {
396 		be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
397 		assert(arg);
398 		if (arg->on_stack) {
399 			int arg_size = get_type_size_bytes(get_method_param_type(call_tp, p));
400 
401 			stack_size += round_up2(arg->space_before, arg->alignment);
402 			stack_size += round_up2(arg_size, arg->alignment);
403 			stack_size += round_up2(arg->space_after, arg->alignment);
404 
405 			stack_param_idx[n_stack_params++] = p;
406 		}
407 	}
408 
409 	/* Collect all arguments which are passed in registers. */
410 	reg_param_idxs = ALLOCAN(int, n_params);
411 	for (p = 0; p < n_params; ++p) {
412 		be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
413 		if (arg && arg->in_reg) {
414 			reg_param_idxs[n_reg_params++] = p;
415 		}
416 	}
417 
418 	/*
419 	 * If the stack is decreasing and we do not want to store sequentially,
420 	 * or someone else allocated the call frame
421 	 * we allocate as much space on the stack all parameters need, by
422 	 * moving the stack pointer along the stack's direction.
423 	 *
424 	 * Note: we also have to do this for stack_size == 0, because we may have
425 	 * to adjust stack alignment for the call.
426 	 */
427 	curr_sp = be_new_IncSP(sp, bl, curr_sp, stack_size, 1);
428 
429 	dbgi = get_irn_dbg_info(irn);
430 	/* If there are some parameters which shall be passed on the stack. */
431 	if (n_stack_params > 0) {
432 		int       curr_ofs = 0;
433 		ir_node **in       = ALLOCAN(ir_node*, n_stack_params+1);
434 		unsigned  n_in     = 0;
435 
436 		curr_mem = get_Call_mem(irn);
437 		in[n_in++] = curr_mem;
438 
439 		for (i = 0; i < n_stack_params; ++i) {
440 			int p                  = stack_param_idx[i];
441 			be_abi_call_arg_t *arg = get_call_arg(call, 0, p, 0);
442 			ir_node *param         = get_Call_param(irn, p);
443 			ir_node *addr          = curr_sp;
444 			ir_node *mem           = NULL;
445 			ir_type *param_type    = get_method_param_type(call_tp, p);
446 			int param_size         = get_type_size_bytes(param_type) + arg->space_after;
447 
448 			/*
449 			 * If we wanted to build the arguments sequentially,
450 			 * the stack pointer for the next must be incremented,
451 			 * and the memory value propagated.
452 			 */
453 			curr_ofs += arg->space_before;
454 			curr_ofs =  round_up2(curr_ofs, arg->alignment);
455 
456 			/* Make the expression to compute the argument's offset. */
457 			if (curr_ofs > 0) {
458 				ir_mode *constmode = mach_mode;
459 				if (mode_is_reference(mach_mode)) {
460 					constmode = mode_Is;
461 				}
462 				addr = new_r_Const_long(irg, constmode, curr_ofs);
463 				addr = new_r_Add(bl, curr_sp, addr, mach_mode);
464 			}
465 
466 			/* Insert a store for primitive arguments. */
467 			if (is_atomic_type(param_type)) {
468 				ir_node *nomem     = get_irg_no_mem(irg);
469 				ir_node *mem_input = nomem;
470 				ir_node *store     = new_rd_Store(dbgi, bl, mem_input, addr, param, cons_none);
471 				mem   = new_r_Proj(store, mode_M, pn_Store_M);
472 			} else {
473 				/* Make a mem copy for compound arguments. */
474 				ir_node *copy;
475 
476 				assert(mode_is_reference(get_irn_mode(param)));
477 				copy = new_rd_CopyB(dbgi, bl, curr_mem, addr, param, param_type);
478 				mem = new_r_Proj(copy, mode_M, pn_CopyB_M);
479 			}
480 
481 			curr_ofs += param_size;
482 
483 			in[n_in++] = mem;
484 		}
485 
486 		/* We need the sync only, if we didn't build the stores sequentially. */
487 		if (n_stack_params >= 1) {
488 			curr_mem = new_r_Sync(bl, n_in, in);
489 		} else {
490 			curr_mem = get_Call_mem(irn);
491 		}
492 	}
493 
494 	/* Put caller save into the destroyed set and state registers in the states
495 	 * set */
496 	for (i = 0, n = arch_env->n_register_classes; i < n; ++i) {
497 		unsigned j;
498 		const arch_register_class_t *cls = &arch_env->register_classes[i];
499 		for (j = 0; j < cls->n_regs; ++j) {
500 			const arch_register_t *reg = arch_register_for_index(cls, j);
501 
502 			/* even if destroyed all is specified, neither SP nor FP are
503 			 * destroyed (else bad things will happen) */
504 			if (reg == arch_env->sp || reg == arch_env->bp)
505 				continue;
506 
507 			if (reg->type & arch_register_type_state) {
508 				ARR_APP1(const arch_register_t*, destroyed_regs, reg);
509 				ARR_APP1(const arch_register_t*, states, reg);
510 				/* we're already in the destroyed set so no need for further
511 				 * checking */
512 				continue;
513 			}
514 			if (arch_register_is_caller_save(arch_env, reg)) {
515 				if (!(reg->type & arch_register_type_ignore)) {
516 					ARR_APP1(const arch_register_t*, destroyed_regs, reg);
517 				}
518 			}
519 		}
520 	}
521 
522 	/* search the largest result proj number */
523 	res_projs = ALLOCANZ(ir_node*, n_res);
524 
525 	foreach_out_edge(irn, edge) {
526 		ir_node *irn = get_edge_src_irn(edge);
527 
528 		if (!is_Proj(irn) || get_Proj_proj(irn) != pn_Call_T_result)
529 			continue;
530 
531 		foreach_out_edge(irn, res_edge) {
532 			int proj;
533 			ir_node *res = get_edge_src_irn(res_edge);
534 
535 			assert(is_Proj(res));
536 
537 			proj = get_Proj_proj(res);
538 			assert(proj < n_res);
539 			assert(res_projs[proj] == NULL);
540 			res_projs[proj] = res;
541 		}
542 		res_proj = irn;
543 		break;
544 	}
545 
546 	/** TODO: this is not correct for cases where return values are passed
547 	 * on the stack, but no known ABI does this currently...
548 	 */
549 	n_reg_results = n_res;
550 
551 	n_ins = 0;
552 	in    = ALLOCAN(ir_node*, n_reg_params + ARR_LEN(states));
553 
554 	/* make the back end call node and set its register requirements. */
555 	for (i = 0; i < n_reg_params; ++i) {
556 		in[n_ins++] = get_Call_param(irn, reg_param_idxs[i]);
557 	}
558 
559 	/* add state registers ins */
560 	for (s = 0; s < ARR_LEN(states); ++s) {
561 		const arch_register_t       *reg = states[s];
562 		const arch_register_class_t *cls = reg->reg_class;
563 		ir_node *regnode = new_r_Unknown(irg, cls->mode);
564 		in[n_ins++]      = regnode;
565 	}
566 	assert(n_ins == (int) (n_reg_params + ARR_LEN(states)));
567 
568 	/* ins collected, build the call */
569 	throws_exception = ir_throws_exception(irn);
570 	if (env->call->flags.bits.call_has_imm && is_SymConst(call_ptr)) {
571 		/* direct call */
572 		low_call = be_new_Call(dbgi, irg, bl, curr_mem, sp->single_req, curr_sp,
573 		                       sp->single_req, curr_sp,
574 		                       n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
575 		                       n_ins, in, get_Call_type(irn));
576 		be_Call_set_entity(low_call, get_SymConst_entity(call_ptr));
577 	} else {
578 		/* indirect call */
579 		low_call = be_new_Call(dbgi, irg, bl, curr_mem, sp->single_req, curr_sp,
580 		                       call->cls_addr->class_req, call_ptr,
581 		                       n_reg_results + pn_be_Call_first_res + ARR_LEN(destroyed_regs),
582 		                       n_ins, in, get_Call_type(irn));
583 	}
584 	ir_set_throws_exception(low_call, throws_exception);
585 	be_Call_set_pop(low_call, call->pop);
586 
587 	/* put the call into the list of all calls for later processing */
588 	ARR_APP1(ir_node *, env->calls, low_call);
589 
590 	/* create new stack pointer */
591 	curr_sp = new_r_Proj(low_call, get_irn_mode(curr_sp), pn_be_Call_sp);
592 	be_set_constr_single_reg_out(low_call, pn_be_Call_sp, sp,
593 			arch_register_req_type_ignore | arch_register_req_type_produces_sp);
594 	arch_set_irn_register(curr_sp, sp);
595 
596 	/* now handle results */
597 	for (i = 0; i < n_res; ++i) {
598 		ir_node           *proj = res_projs[i];
599 		be_abi_call_arg_t *arg  = get_call_arg(call, 1, i, 0);
600 
601 		/* returns values on stack not supported yet */
602 		assert(arg->in_reg);
603 
604 		/*
605 			shift the proj number to the right, since we will drop the
606 			unspeakable Proj_T from the Call. Therefore, all real argument
607 			Proj numbers must be increased by pn_be_Call_first_res
608 		*/
609 		long pn = i + pn_be_Call_first_res;
610 
611 		if (proj == NULL) {
612 			ir_type *res_type = get_method_res_type(call_tp, i);
613 			ir_mode *mode     = get_type_mode(res_type);
614 			proj              = new_r_Proj(low_call, mode, pn);
615 			res_projs[i]      = proj;
616 		} else {
617 			set_Proj_pred(proj, low_call);
618 			set_Proj_proj(proj, pn);
619 		}
620 
621 		if (arg->in_reg) {
622 			/* remove register from destroyed regs */
623 			size_t j;
624 			size_t n = ARR_LEN(destroyed_regs);
625 			for (j = 0; j < n; ++j) {
626 				if (destroyed_regs[j] == arg->reg) {
627 					destroyed_regs[j] = destroyed_regs[n-1];
628 					ARR_SHRINKLEN(destroyed_regs,n-1);
629 					break;
630 				}
631 			}
632 		}
633 	}
634 
635 	DBG((dbg, LEVEL_3, "\tcreated backend call %+F\n", low_call));
636 
637 	/* Set the register classes and constraints of the Call parameters. */
638 	for (i = 0; i < n_reg_params; ++i) {
639 		int index = reg_param_idxs[i];
640 		be_abi_call_arg_t *arg = get_call_arg(call, 0, index, 0);
641 		assert(arg->reg != NULL);
642 
643 		be_set_constr_single_reg_in(low_call, n_be_Call_first_arg + i,
644 		                            arg->reg, arch_register_req_type_none);
645 	}
646 
647 	/* Set the register constraints of the results. */
648 	for (i = 0; i < n_res; ++i) {
649 		ir_node                 *proj = res_projs[i];
650 		const be_abi_call_arg_t *arg  = get_call_arg(call, 1, i, 0);
651 		int                      pn   = get_Proj_proj(proj);
652 
653 		assert(arg->in_reg);
654 		be_set_constr_single_reg_out(low_call, pn, arg->reg,
655 		                             arch_register_req_type_none);
656 		arch_set_irn_register(proj, arg->reg);
657 	}
658 	exchange(irn, low_call);
659 
660 	/* kill the ProjT node */
661 	if (res_proj != NULL) {
662 		kill_node(res_proj);
663 	}
664 
665 	/* Make additional projs for the caller save registers
666 	   and the Keep node which keeps them alive. */
667 	{
668 		ir_node               **in, *keep;
669 		int                   i;
670 		size_t                d;
671 		int                   n = 0;
672 		int                   curr_res_proj = pn_be_Call_first_res + n_reg_results;
673 		int                   n_ins;
674 
675 		n_ins = ARR_LEN(destroyed_regs) + n_reg_results + 1;
676 		in    = ALLOCAN(ir_node *, n_ins);
677 
678 		/* also keep the stack pointer */
679 		set_irn_link(curr_sp, (void*) sp);
680 		in[n++] = curr_sp;
681 
682 		for (d = 0; d < ARR_LEN(destroyed_regs); ++d) {
683 			const arch_register_t *reg = destroyed_regs[d];
684 			ir_node *proj = new_r_Proj(low_call, reg->reg_class->mode, curr_res_proj);
685 
686 			/* memorize the register in the link field. we need afterwards to set the register class of the keep correctly. */
687 			be_set_constr_single_reg_out(low_call, curr_res_proj, reg,
688 			                             arch_register_req_type_none);
689 			arch_set_irn_register(proj, reg);
690 
691 			set_irn_link(proj, (void*) reg);
692 			in[n++] = proj;
693 			++curr_res_proj;
694 		}
695 
696 		for (i = 0; i < n_reg_results; ++i) {
697 			ir_node *proj = res_projs[i];
698 			const arch_register_t *reg = arch_get_irn_register(proj);
699 			set_irn_link(proj, (void*) reg);
700 			in[n++] = proj;
701 		}
702 		assert(n <= n_ins);
703 
704 		/* create the Keep for the caller save registers */
705 		keep = be_new_Keep(bl, n, in);
706 		for (i = 0; i < n; ++i) {
707 			const arch_register_t *reg = (const arch_register_t*)get_irn_link(in[i]);
708 			be_node_set_reg_class_in(keep, i, reg->reg_class);
709 		}
710 	}
711 
712 	/* Clean up the stack. */
713 	assert(stack_size >= call->pop);
714 	stack_size -= call->pop;
715 
716 	if (stack_size > 0) {
717 		ir_node *mem_proj = NULL;
718 
719 		foreach_out_edge(low_call, edge) {
720 			ir_node *irn = get_edge_src_irn(edge);
721 			if (is_Proj(irn) && get_Proj_proj(irn) == pn_Call_M) {
722 				mem_proj = irn;
723 				break;
724 			}
725 		}
726 
727 		if (! mem_proj) {
728 			mem_proj = new_r_Proj(low_call, mode_M, pn_be_Call_M);
729 			keep_alive(mem_proj);
730 		}
731 	}
732 	/* Clean up the stack frame or revert alignment fixes if we allocated it */
733 	curr_sp = be_new_IncSP(sp, bl, curr_sp, -stack_size, 0);
734 
735 	be_abi_call_free(call);
736 
737 	DEL_ARR_F(states);
738 	DEL_ARR_F(destroyed_regs);
739 
740 	return curr_sp;
741 }
742 
743 /**
744  * Adjust the size of a node representing a stack alloc or free for the minimum stack alignment.
745  *
746  * @param alignment  the minimum stack alignment
747  * @param size       the node containing the non-aligned size
748  * @param block      the block where new nodes are allocated on
749  * @param dbg        debug info for new nodes
750  *
751  * @return a node representing the aligned size
752  */
adjust_alloc_size(unsigned stack_alignment,ir_node * size,ir_node * block,dbg_info * dbg)753 static ir_node *adjust_alloc_size(unsigned stack_alignment, ir_node *size,
754                                   ir_node *block, dbg_info *dbg)
755 {
756 	if (stack_alignment > 1) {
757 		ir_mode   *mode;
758 		ir_tarval *tv;
759 		ir_node   *mask;
760 		ir_graph  *irg;
761 
762 		assert(is_po2(stack_alignment));
763 
764 		mode = get_irn_mode(size);
765 		tv   = new_tarval_from_long(stack_alignment-1, mode);
766 		irg  = get_Block_irg(block);
767 		mask = new_r_Const(irg, tv);
768 		size = new_rd_Add(dbg, block, size, mask, mode);
769 
770 		tv   = new_tarval_from_long(-(long)stack_alignment, mode);
771 		mask = new_r_Const(irg, tv);
772 		size = new_rd_And(dbg, block, size, mask, mode);
773 	}
774 	return size;
775 }
776 /**
777  * Adjust an alloca.
778  * The alloca is transformed into a back end alloca node and connected to the stack nodes.
779  */
adjust_alloc(be_abi_irg_t * env,ir_node * alloc,ir_node * curr_sp)780 static ir_node *adjust_alloc(be_abi_irg_t *env, ir_node *alloc, ir_node *curr_sp)
781 {
782 	ir_node          *block     = get_nodes_block(alloc);
783 	ir_graph         *irg       = get_Block_irg(block);
784 	const arch_env_t *arch_env  = be_get_irg_arch_env(irg);
785 	ir_node          *alloc_mem = NULL;
786 	ir_node          *alloc_res = NULL;
787 	ir_type          *type      = get_Alloc_type(alloc);
788 	dbg_info         *dbg;
789 
790 	ir_node *new_alloc;
791 	ir_node *count;
792 	ir_node *size;
793 	ir_node *ins[2];
794 	unsigned stack_alignment;
795 
796 	/* all non-stack Alloc nodes should already be lowered before the backend */
797 	assert(get_Alloc_where(alloc) == stack_alloc);
798 
799 	foreach_out_edge(alloc, edge) {
800 		ir_node *irn = get_edge_src_irn(edge);
801 
802 		assert(is_Proj(irn));
803 		switch (get_Proj_proj(irn)) {
804 		case pn_Alloc_M:
805 			alloc_mem = irn;
806 			break;
807 		case pn_Alloc_res:
808 			alloc_res = irn;
809 			break;
810 		default:
811 			break;
812 		}
813 	}
814 
815 	/* Beware: currently Alloc nodes without a result might happen,
816 	   only escape analysis kills them and this phase runs only for object
817 	   oriented source. We kill the Alloc here. */
818 	if (alloc_res == NULL && alloc_mem) {
819 		exchange(alloc_mem, get_Alloc_mem(alloc));
820 		return curr_sp;
821 	}
822 
823 	dbg   = get_irn_dbg_info(alloc);
824 	count = get_Alloc_count(alloc);
825 
826 	/* we might need to multiply the count with the element size */
827 	if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
828 		ir_mode   *mode  = get_irn_mode(count);
829 		ir_tarval *tv    = new_tarval_from_long(get_type_size_bytes(type),
830 		                                        mode);
831 		ir_node   *cnst = new_rd_Const(dbg, irg, tv);
832 		size            = new_rd_Mul(dbg, block, count, cnst, mode);
833 	} else {
834 		size = count;
835 	}
836 
837 	/* The stack pointer will be modified in an unknown manner.
838 	   We cannot omit it. */
839 	env->call->flags.bits.try_omit_fp = 0;
840 
841 	stack_alignment = 1 << arch_env->stack_alignment;
842 	size            = adjust_alloc_size(stack_alignment, size, block, dbg);
843 	new_alloc       = be_new_AddSP(arch_env->sp, block, curr_sp, size);
844 	set_irn_dbg_info(new_alloc, dbg);
845 
846 	if (alloc_mem != NULL) {
847 		ir_node *addsp_mem;
848 		ir_node *sync;
849 
850 		addsp_mem = new_r_Proj(new_alloc, mode_M, pn_be_AddSP_M);
851 
852 		/* We need to sync the output mem of the AddSP with the input mem
853 		   edge into the alloc node. */
854 		ins[0] = get_Alloc_mem(alloc);
855 		ins[1] = addsp_mem;
856 		sync = new_r_Sync(block, 2, ins);
857 
858 		exchange(alloc_mem, sync);
859 	}
860 
861 	exchange(alloc, new_alloc);
862 
863 	/* fix projnum of alloca res */
864 	set_Proj_proj(alloc_res, pn_be_AddSP_res);
865 
866 	curr_sp = new_r_Proj(new_alloc,  get_irn_mode(curr_sp), pn_be_AddSP_sp);
867 
868 	return curr_sp;
869 }
870 
871 /**
872  * Adjust a Free.
873  * The Free is transformed into a back end free node and connected to the stack nodes.
874  */
adjust_free(be_abi_irg_t * env,ir_node * free,ir_node * curr_sp)875 static ir_node *adjust_free(be_abi_irg_t *env, ir_node *free, ir_node *curr_sp)
876 {
877 	ir_node          *block    = get_nodes_block(free);
878 	ir_graph         *irg      = get_irn_irg(free);
879 	ir_type          *type     = get_Free_type(free);
880 	const arch_env_t *arch_env = be_get_irg_arch_env(irg);
881 	ir_mode          *sp_mode  = arch_env->sp->reg_class->mode;
882 	dbg_info         *dbg      = get_irn_dbg_info(free);
883 	ir_node  *subsp, *mem, *res, *size, *sync;
884 	ir_node *in[2];
885 	unsigned stack_alignment;
886 
887 	/* all non-stack-alloc Free nodes should already be lowered before the
888 	 * backend phase */
889 	assert(get_Free_where(free) == stack_alloc);
890 
891 	/* we might need to multiply the size with the element size */
892 	if (!is_unknown_type(type) && get_type_size_bytes(type) != 1) {
893 		ir_tarval *tv   = new_tarval_from_long(get_type_size_bytes(type), mode_Iu);
894 		ir_node   *cnst = new_rd_Const(dbg, irg, tv);
895 		ir_node   *mul  = new_rd_Mul(dbg, block, get_Free_count(free),
896 		                             cnst, mode_Iu);
897 		size = mul;
898 	} else {
899 		size = get_Free_count(free);
900 	}
901 
902 	stack_alignment = 1 << arch_env->stack_alignment;
903 	size            = adjust_alloc_size(stack_alignment, size, block, dbg);
904 
905 	/* The stack pointer will be modified in an unknown manner.
906 	   We cannot omit it. */
907 	env->call->flags.bits.try_omit_fp = 0;
908 	subsp = be_new_SubSP(arch_env->sp, block, curr_sp, size);
909 	set_irn_dbg_info(subsp, dbg);
910 
911 	mem = new_r_Proj(subsp, mode_M, pn_be_SubSP_M);
912 	res = new_r_Proj(subsp, sp_mode, pn_be_SubSP_sp);
913 
914 	/* we need to sync the memory */
915 	in[0] = get_Free_mem(free);
916 	in[1] = mem;
917 	sync = new_r_Sync(block, 2, in);
918 
919 	/* and make the AddSP dependent on the former memory */
920 	add_irn_dep(subsp, get_Free_mem(free));
921 
922 	/* kill the free */
923 	exchange(free, sync);
924 	curr_sp = res;
925 
926 	return curr_sp;
927 }
928 
929 /**
930  * Check if a node is somehow data dependent on another one.
931  * both nodes must be in the same basic block.
932  * @param n1 The first node.
933  * @param n2 The second node.
934  * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
935  */
dependent_on(ir_node * n1,ir_node * n2)936 static int dependent_on(ir_node *n1, ir_node *n2)
937 {
938 	assert(get_nodes_block(n1) == get_nodes_block(n2));
939 
940 	return heights_reachable_in_block(ir_heights, n1, n2);
941 }
942 
cmp_call_dependency(const void * c1,const void * c2)943 static int cmp_call_dependency(const void *c1, const void *c2)
944 {
945 	ir_node *n1 = *(ir_node **) c1;
946 	ir_node *n2 = *(ir_node **) c2;
947 	unsigned h1, h2;
948 
949 	/*
950 		Classical qsort() comparison function behavior:
951 		0  if both elements are equal
952 		1  if second is "smaller" that first
953 		-1 if first is "smaller" that second
954 	*/
955 	if (dependent_on(n1, n2))
956 		return -1;
957 
958 	if (dependent_on(n2, n1))
959 		return 1;
960 
961 	/* The nodes have no depth order, but we need a total order because qsort()
962 	 * is not stable.
963 	 *
964 	 * Additionally, we need to respect transitive dependencies. Consider a
965 	 * Call a depending on Call b and an independent Call c.
966 	 * We MUST NOT order c > a and b > c. */
967 	h1 = get_irn_height(ir_heights, n1);
968 	h2 = get_irn_height(ir_heights, n2);
969 	if (h1 < h2) return -1;
970 	if (h1 > h2) return  1;
971 	/* Same height, so use a random (but stable) order */
972 	return get_irn_idx(n1) - get_irn_idx(n2);
973 }
974 
975 /**
976  * Walker: links all Call/Alloc/Free nodes to the Block they are contained.
977  */
link_ops_in_block_walker(ir_node * irn,void * data)978 static void link_ops_in_block_walker(ir_node *irn, void *data)
979 {
980 	be_abi_irg_t *env  = (be_abi_irg_t*)data;
981 	unsigned      code = get_irn_opcode(irn);
982 
983 	if (code == iro_Call ||
984 	   (code == iro_Alloc && get_Alloc_where(irn) == stack_alloc) ||
985 	   (code == iro_Free && get_Free_where(irn) == stack_alloc)) {
986 		ir_node *bl       = get_nodes_block(irn);
987 		void *save        = get_irn_link(bl);
988 
989 		set_irn_link(irn, save);
990 		set_irn_link(bl, irn);
991 	}
992 
993 	if (code == iro_Builtin && get_Builtin_kind(irn) == ir_bk_return_address) {
994 		ir_node       *param = get_Builtin_param(irn, 0);
995 		ir_tarval     *tv    = get_Const_tarval(param);
996 		unsigned long  value = get_tarval_long(tv);
997 		/* use ebp, so the climbframe algo works... */
998 		if (value > 0) {
999 			env->call->flags.bits.try_omit_fp = 0;
1000 		}
1001 	}
1002 }
1003 
1004 /**
1005  * Block-walker:
1006  * Process all Call/Alloc/Free nodes inside a basic block.
1007  * Note that the link field of the block must contain a linked list of all
1008  * nodes inside the Block. We first order this list according to data dependency
1009  * and that connect the nodes together.
1010  */
process_ops_in_block(ir_node * bl,void * data)1011 static void process_ops_in_block(ir_node *bl, void *data)
1012 {
1013 	be_abi_irg_t   *env     = (be_abi_irg_t*)data;
1014 	ir_node        *curr_sp = env->init_sp;
1015 	ir_node        *irn;
1016 	ir_node       **nodes;
1017 	int             n;
1018 	int             n_nodes;
1019 
1020 	n_nodes = 0;
1021 	for (irn = (ir_node*)get_irn_link(bl); irn != NULL;
1022 	     irn = (ir_node*)get_irn_link(irn)) {
1023 		++n_nodes;
1024 	}
1025 
1026 	nodes = ALLOCAN(ir_node*, n_nodes);
1027 	for (irn = (ir_node*)get_irn_link(bl), n = 0; irn != NULL;
1028 	     irn = (ir_node*)get_irn_link(irn), ++n) {
1029 		nodes[n] = irn;
1030 	}
1031 
1032 	/* If there were call nodes in the block. */
1033 	if (n > 0) {
1034 		ir_node *keep;
1035 		int i;
1036 
1037 		/* order the call nodes according to data dependency */
1038 		qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
1039 
1040 		for (i = n_nodes - 1; i >= 0; --i) {
1041 			ir_node *irn = nodes[i];
1042 
1043 			DBG((dbg, LEVEL_3, "\tprocessing call %+F\n", irn));
1044 			switch (get_irn_opcode(irn)) {
1045 			case iro_Call:
1046 				if (! be_omit_fp) {
1047 					/* The stack pointer will be modified due to a call. */
1048 					env->call->flags.bits.try_omit_fp = 0;
1049 				}
1050 				curr_sp = adjust_call(env, irn, curr_sp);
1051 				break;
1052 			case iro_Alloc:
1053 				if (get_Alloc_where(irn) == stack_alloc)
1054 					curr_sp = adjust_alloc(env, irn, curr_sp);
1055 				break;
1056 			case iro_Free:
1057 				if (get_Free_where(irn) == stack_alloc)
1058 					curr_sp = adjust_free(env, irn, curr_sp);
1059 				break;
1060 			default:
1061 				panic("invalid call");
1062 			}
1063 		}
1064 
1065 		/* Keep the last stack state in the block by tying it to Keep node,
1066 		 * the proj from calls is already kept */
1067 		if (curr_sp != env->init_sp &&
1068 		    !(is_Proj(curr_sp) && be_is_Call(get_Proj_pred(curr_sp)))) {
1069 			nodes[0] = curr_sp;
1070 			keep     = be_new_Keep(bl, 1, nodes);
1071 			pmap_insert(env->keep_map, bl, keep);
1072 		}
1073 	}
1074 
1075 	set_irn_link(bl, curr_sp);
1076 }
1077 
1078 /**
1079  * Adjust all call nodes in the graph to the ABI conventions.
1080  */
process_calls(ir_graph * irg)1081 static void process_calls(ir_graph *irg)
1082 {
1083 	be_abi_irg_t *abi = be_get_irg_abi(irg);
1084 
1085 	irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, abi);
1086 
1087 	ir_heights = heights_new(irg);
1088 	irg_block_walk_graph(irg, NULL, process_ops_in_block, abi);
1089 	heights_free(ir_heights);
1090 }
1091 
1092 /**
1093  * Computes the stack argument layout type.
1094  * Changes a possibly allocated value param type by moving
1095  * entities to the stack layout type.
1096  *
1097  * @param call          the current call ABI
1098  * @param method_type   the method type
1099  *
1100  * @return the stack argument layout type
1101  */
compute_arg_type(ir_graph * irg,be_abi_call_t * call,ir_type * method_type)1102 static ir_type *compute_arg_type(ir_graph *irg, be_abi_call_t *call,
1103 								 ir_type *method_type)
1104 {
1105 	struct obstack *obst = be_get_be_obst(irg);
1106 	ir_type   *frame_type      = get_irg_frame_type(irg);
1107 	size_t     n_params        = get_method_n_params(method_type);
1108 	size_t     n_frame_members = get_compound_n_members(frame_type);
1109 	ir_entity *va_start_entity = NULL;
1110 	size_t   f;
1111 	int      ofs  = 0;
1112 
1113 	ir_type *res;
1114 	size_t i;
1115 	ir_entity **map = OALLOCNZ(obst, ir_entity*, n_params);
1116 	res = new_type_struct(new_id_from_chars("arg_type", 8));
1117 
1118 	/* collect existing entities for value_param_types */
1119 	for (f = n_frame_members; f > 0; ) {
1120 		ir_entity *entity = get_compound_member(frame_type, --f);
1121 		size_t     num;
1122 
1123 		set_entity_link(entity, NULL);
1124 		if (!is_parameter_entity(entity))
1125 			continue;
1126 		num = get_entity_parameter_number(entity);
1127 		if (num == IR_VA_START_PARAMETER_NUMBER) {
1128 			/* move entity to new arg_type */
1129 			set_entity_owner(entity, res);
1130 			va_start_entity = entity;
1131 			continue;
1132 		}
1133 		assert(num < n_params);
1134 		if (map[num] != NULL)
1135 			panic("multiple entities for parameter %u in %+F found", f, irg);
1136 
1137 		if (num != n_params && !get_call_arg(call, 0, num, 1)->on_stack) {
1138 			/* don't move this entity */
1139 			continue;
1140 		}
1141 
1142 		map[num] = entity;
1143 		/* move entity to new arg_type */
1144 		set_entity_owner(entity, res);
1145 	}
1146 
1147 	for (i = 0; i < n_params; ++i) {
1148 		be_abi_call_arg_t *arg        = get_call_arg(call, 0, i, 1);
1149 		ir_type           *param_type = get_method_param_type(method_type, i);
1150 		ir_entity         *entity;
1151 
1152 		if (!arg->on_stack) {
1153 			continue;
1154 		}
1155 		entity = map[i];
1156 		if (entity == NULL) {
1157 			/* create a new entity */
1158 			entity = new_parameter_entity(res, i, param_type);
1159 		}
1160 		ofs += arg->space_before;
1161 		ofs = round_up2(ofs, arg->alignment);
1162 		set_entity_offset(entity, ofs);
1163 		ofs += arg->space_after;
1164 		ofs += get_type_size_bytes(param_type);
1165 		arg->stack_ent = entity;
1166 	}
1167 	if (va_start_entity != NULL) {
1168 		set_entity_offset(va_start_entity, ofs);
1169 	}
1170 	set_type_size_bytes(res, ofs);
1171 	set_type_state(res, layout_fixed);
1172 
1173 	return res;
1174 }
1175 
1176 typedef struct {
1177 	const arch_register_t *reg;
1178 	ir_node *irn;
1179 } reg_node_map_t;
1180 
cmp_regs(const void * a,const void * b)1181 static int cmp_regs(const void *a, const void *b)
1182 {
1183 	const reg_node_map_t *p = (const reg_node_map_t*)a;
1184 	const reg_node_map_t *q = (const reg_node_map_t*)b;
1185 
1186 	if (p->reg->reg_class == q->reg->reg_class)
1187 		return p->reg->index - q->reg->index;
1188 	else
1189 		return p->reg->reg_class < q->reg->reg_class ? -1 : +1;
1190 }
1191 
reg_map_to_arr(reg_node_map_t * res,pmap * reg_map)1192 static void reg_map_to_arr(reg_node_map_t *res, pmap *reg_map)
1193 {
1194 	pmap_entry *ent;
1195 	size_t n = pmap_count(reg_map);
1196 	size_t i = 0;
1197 
1198 	foreach_pmap(reg_map, ent) {
1199 		res[i].reg = (const arch_register_t*)ent->key;
1200 		res[i].irn = (ir_node*)ent->value;
1201 		i++;
1202 	}
1203 
1204 	qsort(res, n, sizeof(res[0]), cmp_regs);
1205 }
1206 
1207 /**
1208  * Creates a be_Return for a Return node.
1209  *
1210  * @param @env  the abi environment
1211  * @param irn   the Return node or NULL if there was none
1212  * @param bl    the block where the be_Retun should be placed
1213  * @param mem   the current memory
1214  * @param n_res number of return results
1215  */
create_be_return(be_abi_irg_t * env,ir_node * irn,ir_node * bl,ir_node * mem,int n_res)1216 static ir_node *create_be_return(be_abi_irg_t *env, ir_node *irn, ir_node *bl,
1217 		ir_node *mem, int n_res)
1218 {
1219 	be_abi_call_t    *call     = env->call;
1220 	ir_graph         *irg      = get_Block_irg(bl);
1221 	const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1222 	dbg_info *dbgi;
1223 	pmap *reg_map  = pmap_create();
1224 	ir_node *keep  = pmap_get(ir_node, env->keep_map, bl);
1225 	size_t in_max;
1226 	ir_node *ret;
1227 	int i, n;
1228 	unsigned pop;
1229 	ir_node **in;
1230 	ir_node *stack;
1231 	const arch_register_t **regs;
1232 	pmap_entry *ent;
1233 
1234 	/*
1235 		get the valid stack node in this block.
1236 		If we had a call in that block there is a Keep constructed by process_calls()
1237 		which points to the last stack modification in that block. we'll use
1238 		it then. Else we use the stack from the start block and let
1239 		the ssa construction fix the usage.
1240 	*/
1241 	stack = be_abi_reg_map_get(env->regs, arch_env->sp);
1242 	if (keep) {
1243 		stack = get_irn_n(keep, 0);
1244 		kill_node(keep);
1245 		remove_End_keepalive(get_irg_end(irg), keep);
1246 	}
1247 
1248 	/* Insert results for Return into the register map. */
1249 	for (i = 0; i < n_res; ++i) {
1250 		ir_node *res           = get_Return_res(irn, i);
1251 		be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1252 		assert(arg->in_reg && "return value must be passed in register");
1253 		pmap_insert(reg_map, (void *) arg->reg, res);
1254 	}
1255 
1256 	/* Add uses of the callee save registers. */
1257 	foreach_pmap(env->regs, ent) {
1258 		const arch_register_t *reg = (const arch_register_t*)ent->key;
1259 		if ((reg->type & arch_register_type_ignore) || arch_register_is_callee_save(arch_env, reg))
1260 			pmap_insert(reg_map, ent->key, ent->value);
1261 	}
1262 
1263 	be_abi_reg_map_set(reg_map, arch_env->sp, stack);
1264 
1265 	/*
1266 		Maximum size of the in array for Return nodes is
1267 		return args + callee save/ignore registers + memory + stack pointer
1268 	*/
1269 	in_max = pmap_count(reg_map) + n_res + 2;
1270 
1271 	in   = ALLOCAN(ir_node*,               in_max);
1272 	regs = ALLOCAN(arch_register_t const*, in_max);
1273 
1274 	in[0]   = mem;
1275 	in[1]   = be_abi_reg_map_get(reg_map, arch_env->sp);
1276 	regs[0] = NULL;
1277 	regs[1] = arch_env->sp;
1278 	n       = 2;
1279 
1280 	/* clear SP entry, since it has already been grown. */
1281 	pmap_insert(reg_map, (void *) arch_env->sp, NULL);
1282 	for (i = 0; i < n_res; ++i) {
1283 		be_abi_call_arg_t *arg = get_call_arg(call, 1, i, 1);
1284 
1285 		in[n]     = be_abi_reg_map_get(reg_map, arg->reg);
1286 		regs[n++] = arg->reg;
1287 
1288 		/* Clear the map entry to mark the register as processed. */
1289 		be_abi_reg_map_set(reg_map, arg->reg, NULL);
1290 	}
1291 
1292 	/* grow the rest of the stuff. */
1293 	foreach_pmap(reg_map, ent) {
1294 		if (ent->value) {
1295 			in[n]     = (ir_node*)ent->value;
1296 			regs[n++] = (const arch_register_t*)ent->key;
1297 		}
1298 	}
1299 
1300 	/* The in array for the new back end return is now ready. */
1301 	if (irn != NULL) {
1302 		dbgi = get_irn_dbg_info(irn);
1303 	} else {
1304 		dbgi = NULL;
1305 	}
1306 	/* we have to pop the shadow parameter in in case of struct returns */
1307 	pop = call->pop;
1308 	ret = be_new_Return(dbgi, irg, bl, n_res, pop, n, in);
1309 
1310 	/* Set the register classes of the return's parameter accordingly. */
1311 	for (i = 0; i < n; ++i) {
1312 		if (regs[i] == NULL)
1313 			continue;
1314 
1315 		be_set_constr_single_reg_in(ret, i, regs[i], arch_register_req_type_none);
1316 	}
1317 
1318 	/* Free the space of the Epilog's in array and the register <-> proj map. */
1319 	pmap_destroy(reg_map);
1320 
1321 	return ret;
1322 }
1323 
1324 typedef struct lower_frame_sels_env_t {
1325 	ir_node      *frame;                     /**< the current frame */
1326 	const arch_register_class_t *sp_class;   /**< register class of the stack pointer */
1327 	const arch_register_class_t *link_class; /**< register class of the link pointer */
1328 	ir_type      *frame_tp;                  /**< the frame type */
1329 	int          static_link_pos;            /**< argument number of the hidden static link */
1330 } lower_frame_sels_env_t;
1331 
1332 /**
1333  * Walker: Replaces Sels of frame type and
1334  * value param type entities by FrameAddress.
1335  * Links all used entities.
1336  */
lower_frame_sels_walker(ir_node * irn,void * data)1337 static void lower_frame_sels_walker(ir_node *irn, void *data)
1338 {
1339 	lower_frame_sels_env_t *ctx = (lower_frame_sels_env_t*)data;
1340 
1341 	if (is_Sel(irn)) {
1342 		ir_node *ptr = get_Sel_ptr(irn);
1343 
1344 		if (ptr == ctx->frame) {
1345 			ir_entity    *ent = get_Sel_entity(irn);
1346 			ir_node      *bl  = get_nodes_block(irn);
1347 			ir_node      *nw;
1348 
1349 			nw = be_new_FrameAddr(ctx->sp_class, bl, ctx->frame, ent);
1350 			exchange(irn, nw);
1351 		}
1352 	}
1353 }
1354 
1355 /**
1356  * The start block has no jump, instead it has an initial exec Proj.
1357  * The backend wants to handle all blocks the same way, so we replace
1358  * the out cfg edge with a real jump.
1359  */
fix_start_block(ir_graph * irg)1360 static void fix_start_block(ir_graph *irg)
1361 {
1362 	ir_node *initial_X   = get_irg_initial_exec(irg);
1363 	ir_node *start_block = get_irg_start_block(irg);
1364 	ir_node *jmp         = new_r_Jmp(start_block);
1365 
1366 	assert(is_Proj(initial_X));
1367 	exchange(initial_X, jmp);
1368 	set_irg_initial_exec(irg, new_r_Bad(irg, mode_X));
1369 
1370 	/* merge start block with successor if possible */
1371 	{
1372 		foreach_out_edge(jmp, edge) {
1373 			ir_node *succ = get_edge_src_irn(edge);
1374 			if (!is_Block(succ))
1375 				continue;
1376 
1377 			if (get_irn_arity(succ) == 1) {
1378 				exchange(succ, start_block);
1379 			}
1380 			break;
1381 		}
1382 	}
1383 }
1384 
1385 /**
1386  * Modify the irg itself and the frame type.
1387  */
modify_irg(ir_graph * irg)1388 static void modify_irg(ir_graph *irg)
1389 {
1390 	be_abi_irg_t          *env          = be_get_irg_abi(irg);
1391 	be_abi_call_t         *call         = env->call;
1392 	const arch_env_t      *arch_env     = be_get_irg_arch_env(irg);
1393 	const arch_register_t *sp           = arch_env->sp;
1394 	ir_type               *method_type  = get_entity_type(get_irg_entity(irg));
1395 	be_irg_t              *birg         = be_birg_from_irg(irg);
1396 	struct obstack        *obst         = be_get_be_obst(irg);
1397 	be_stack_layout_t     *stack_layout = be_get_irg_stack_layout(irg);
1398 	ir_node *end;
1399 	ir_node *old_mem;
1400 	ir_node *new_mem_proj;
1401 	ir_node *mem;
1402 
1403 	int n_params;
1404 	int i, n;
1405 	unsigned j;
1406 	unsigned frame_size;
1407 
1408 	reg_node_map_t *rm;
1409 	const arch_register_t *fp_reg;
1410 	ir_node *frame_pointer;
1411 	ir_node *start_bl;
1412 	ir_node **args;
1413 	ir_node *arg_tuple;
1414 	ir_type *arg_type, *bet_type;
1415 	lower_frame_sels_env_t ctx;
1416 
1417 	DBG((dbg, LEVEL_1, "introducing abi on %+F\n", irg));
1418 
1419 	old_mem = get_irg_initial_mem(irg);
1420 
1421 	irp_reserve_resources(irp, IRP_RESOURCE_ENTITY_LINK);
1422 
1423 	arg_type = compute_arg_type(irg, call, method_type);
1424 
1425 	/* Convert the Sel nodes in the irg to frame addr nodes: */
1426 	ctx.frame            = get_irg_frame(irg);
1427 	ctx.sp_class         = arch_env->sp->reg_class;
1428 	ctx.link_class       = arch_env->link_class;
1429 	ctx.frame_tp         = get_irg_frame_type(irg);
1430 
1431 	/* layout the stackframe now */
1432 	if (get_type_state(ctx.frame_tp) == layout_undefined) {
1433 		default_layout_compound_type(ctx.frame_tp);
1434 	}
1435 
1436 	/* align stackframe to 4 byte */
1437 	frame_size = get_type_size_bytes(ctx.frame_tp);
1438 	if (frame_size % 4 != 0) {
1439 		set_type_size_bytes(ctx.frame_tp, frame_size + 4 - (frame_size % 4));
1440 	}
1441 
1442 	env->regs  = pmap_create();
1443 
1444 	n_params = get_method_n_params(method_type);
1445 	args     = OALLOCNZ(obst, ir_node*, n_params);
1446 
1447 	be_add_parameter_entity_stores(irg);
1448 
1449 	irg_walk_graph(irg, lower_frame_sels_walker, NULL, &ctx);
1450 
1451 	irp_free_resources(irp, IRP_RESOURCE_ENTITY_LINK);
1452 
1453 	/* Fill the argument vector */
1454 	arg_tuple = get_irg_args(irg);
1455 	foreach_out_edge(arg_tuple, edge) {
1456 		ir_node *irn = get_edge_src_irn(edge);
1457 		if (! is_Anchor(irn)) {
1458 			int nr       = get_Proj_proj(irn);
1459 			args[nr]     = irn;
1460 			DBG((dbg, LEVEL_2, "\treading arg: %d -> %+F\n", nr, irn));
1461 		}
1462 	}
1463 
1464 	stack_layout->sp_relative = call->flags.bits.try_omit_fp;
1465 	bet_type = call->cb->get_between_type(irg);
1466 	stack_frame_init(stack_layout, arg_type, bet_type,
1467 	                 get_irg_frame_type(irg));
1468 
1469 	/* Count the register params and add them to the number of Projs for the RegParams node */
1470 	for (i = 0; i < n_params; ++i) {
1471 		be_abi_call_arg_t *arg = get_call_arg(call, 0, i, 1);
1472 		if (arg->in_reg && args[i]) {
1473 			assert(arg->reg != sp && "cannot use stack pointer as parameter register");
1474 			assert(i == get_Proj_proj(args[i]));
1475 
1476 			/* For now, associate the register with the old Proj from Start representing that argument. */
1477 			pmap_insert(env->regs, (void *) arg->reg, args[i]);
1478 			DBG((dbg, LEVEL_2, "\targ #%d -> reg %s\n", i, arg->reg->name));
1479 		}
1480 	}
1481 
1482 	/* Collect all callee-save registers */
1483 	for (i = 0, n = arch_env->n_register_classes; i < n; ++i) {
1484 		const arch_register_class_t *cls = &arch_env->register_classes[i];
1485 		for (j = 0; j < cls->n_regs; ++j) {
1486 			const arch_register_t *reg = &cls->regs[j];
1487 			if ((reg->type & arch_register_type_state) || arch_register_is_callee_save(arch_env, reg)) {
1488 				pmap_insert(env->regs, (void *) reg, NULL);
1489 			}
1490 		}
1491 	}
1492 
1493 	fp_reg = call->flags.bits.try_omit_fp ? arch_env->sp : arch_env->bp;
1494 	rbitset_clear(birg->allocatable_regs, fp_reg->global_index);
1495 
1496 	/* handle start block here (place a jump in the block) */
1497 	fix_start_block(irg);
1498 
1499 	pmap_insert(env->regs, (void *) sp, NULL);
1500 	pmap_insert(env->regs, (void *) arch_env->bp, NULL);
1501 	start_bl   = get_irg_start_block(irg);
1502 	env->start = be_new_Start(NULL, start_bl, pmap_count(env->regs) + 1);
1503 	set_irg_start(irg, env->start);
1504 
1505 	/*
1506 	 * make proj nodes for the callee save registers.
1507 	 * memorize them, since Return nodes get those as inputs.
1508 	 *
1509 	 * Note, that if a register corresponds to an argument, the regs map
1510 	 * contains the old Proj from start for that argument.
1511 	 */
1512 	rm = ALLOCAN(reg_node_map_t, pmap_count(env->regs));
1513 	reg_map_to_arr(rm, env->regs);
1514 	for (i = 0, n = pmap_count(env->regs); i < n; ++i) {
1515 		const arch_register_t    *reg      = rm[i].reg;
1516 		ir_mode                  *mode     = reg->reg_class->mode;
1517 		long                      nr       = i;
1518 		arch_register_req_type_t  add_type = arch_register_req_type_none;
1519 		ir_node                  *proj;
1520 
1521 		if (reg == sp)
1522 			add_type |= arch_register_req_type_produces_sp;
1523 		if (!rbitset_is_set(birg->allocatable_regs, reg->global_index)) {
1524 			add_type |= arch_register_req_type_ignore;
1525 		}
1526 
1527 		assert(nr >= 0);
1528 		proj = new_r_Proj(env->start, mode, nr + 1);
1529 		pmap_insert(env->regs, (void *) reg, proj);
1530 		be_set_constr_single_reg_out(env->start, nr + 1, reg, add_type);
1531 		arch_set_irn_register(proj, reg);
1532 
1533 		DBG((dbg, LEVEL_2, "\tregister save proj #%d -> reg %s\n", nr, reg->name));
1534 	}
1535 
1536 	/* create a new initial memory proj */
1537 	assert(is_Proj(old_mem));
1538 	arch_set_irn_register_req_out(env->start, 0, arch_no_register_req);
1539 	new_mem_proj = new_r_Proj(env->start, mode_M, 0);
1540 	mem = new_mem_proj;
1541 	set_irg_initial_mem(irg, mem);
1542 
1543 	env->init_sp = be_abi_reg_map_get(env->regs, sp);
1544 
1545 	/* set new frame_pointer */
1546 	frame_pointer = be_abi_reg_map_get(env->regs, fp_reg);
1547 	set_irg_frame(irg, frame_pointer);
1548 
1549 	/* rewire old mem users to new mem */
1550 	exchange(old_mem, mem);
1551 
1552 	/* keep the mem (for functions with an endless loop = no return) */
1553 	keep_alive(mem);
1554 
1555 	set_irg_initial_mem(irg, mem);
1556 
1557 	/* Now, introduce stack param nodes for all parameters passed on the stack */
1558 	for (i = 0; i < n_params; ++i) {
1559 		ir_node *arg_proj = args[i];
1560 		ir_node *repl     = NULL;
1561 
1562 		if (arg_proj != NULL) {
1563 			be_abi_call_arg_t *arg;
1564 			ir_type *param_type;
1565 			int     nr = get_Proj_proj(arg_proj);
1566 			ir_mode *mode;
1567 
1568 			nr         = MIN(nr, n_params);
1569 			arg        = get_call_arg(call, 0, nr, 1);
1570 			param_type = get_method_param_type(method_type, nr);
1571 
1572 			if (arg->in_reg) {
1573 				repl = pmap_get(ir_node, env->regs, arg->reg);
1574 			} else if (arg->on_stack) {
1575 				ir_node *addr = be_new_FrameAddr(sp->reg_class, start_bl, frame_pointer, arg->stack_ent);
1576 
1577 				/* For atomic parameters which are actually used, we create a Load node. */
1578 				if (is_atomic_type(param_type) && get_irn_n_edges(args[i]) > 0) {
1579 					ir_mode *mode      = get_type_mode(param_type);
1580 					ir_mode *load_mode = arg->load_mode;
1581 					ir_node *nomem     = get_irg_no_mem(irg);
1582 
1583 					ir_node *load = new_r_Load(start_bl, nomem, addr, load_mode, cons_floats);
1584 					repl = new_r_Proj(load, load_mode, pn_Load_res);
1585 
1586 					if (mode != load_mode) {
1587 						repl = new_r_Conv(start_bl, repl, mode);
1588 					}
1589 				} else {
1590 					/* The stack parameter is not primitive (it is a struct or array),
1591 					 * we thus will create a node representing the parameter's address
1592 					 * on the stack. */
1593 					repl = addr;
1594 				}
1595 			}
1596 
1597 			assert(repl != NULL);
1598 
1599 			/* Beware: the mode of the register parameters is always the mode of the register class
1600 			   which may be wrong. Add Conv's then. */
1601 			mode = get_irn_mode(args[i]);
1602 			if (mode != get_irn_mode(repl)) {
1603 				repl = new_r_Conv(get_nodes_block(repl), repl, mode);
1604 			}
1605 			exchange(args[i], repl);
1606 		}
1607 	}
1608 
1609 	/* the arg proj is not needed anymore now and should be only used by the anchor */
1610 	assert(get_irn_n_edges(arg_tuple) == 1);
1611 	kill_node(arg_tuple);
1612 	set_irg_args(irg, new_r_Bad(irg, mode_T));
1613 
1614 	/* All Return nodes hang on the End node, so look for them there. */
1615 	end = get_irg_end_block(irg);
1616 	for (i = 0, n = get_Block_n_cfgpreds(end); i < n; ++i) {
1617 		ir_node *irn = get_Block_cfgpred(end, i);
1618 
1619 		if (is_Return(irn)) {
1620 			ir_node *blk = get_nodes_block(irn);
1621 			ir_node *mem = get_Return_mem(irn);
1622 			ir_node *ret = create_be_return(env, irn, blk, mem, get_Return_n_ress(irn));
1623 			exchange(irn, ret);
1624 		}
1625 	}
1626 
1627 	/* if we have endless loops here, n might be <= 0. Do NOT create a be_Return then,
1628 	   the code is dead and will never be executed. */
1629 }
1630 
1631 /** Fix the state inputs of calls that still hang on unknowns */
fix_call_state_inputs(ir_graph * irg)1632 static void fix_call_state_inputs(ir_graph *irg)
1633 {
1634 	be_abi_irg_t     *env      = be_get_irg_abi(irg);
1635 	const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1636 	int i, n, n_states;
1637 	arch_register_t **stateregs = NEW_ARR_F(arch_register_t*, 0);
1638 
1639 	/* Collect caller save registers */
1640 	n = arch_env->n_register_classes;
1641 	for (i = 0; i < n; ++i) {
1642 		unsigned j;
1643 		const arch_register_class_t *cls = &arch_env->register_classes[i];
1644 		for (j = 0; j < cls->n_regs; ++j) {
1645 			const arch_register_t *reg = arch_register_for_index(cls, j);
1646 			if (reg->type & arch_register_type_state) {
1647 				ARR_APP1(arch_register_t*, stateregs, (arch_register_t *)reg);
1648 			}
1649 		}
1650 	}
1651 
1652 	n = ARR_LEN(env->calls);
1653 	n_states = ARR_LEN(stateregs);
1654 	for (i = 0; i < n; ++i) {
1655 		int s, arity;
1656 		ir_node *call = env->calls[i];
1657 
1658 		arity = get_irn_arity(call);
1659 
1660 		/* the state reg inputs are the last n inputs of the calls */
1661 		for (s = 0; s < n_states; ++s) {
1662 			int inp = arity - n_states + s;
1663 			const arch_register_t *reg = stateregs[s];
1664 			ir_node *regnode = be_abi_reg_map_get(env->regs, reg);
1665 
1666 			set_irn_n(call, inp, regnode);
1667 		}
1668 	}
1669 
1670 	DEL_ARR_F(stateregs);
1671 }
1672 
1673 /**
1674  * Create a trampoline entity for the given method.
1675  */
create_trampoline(be_main_env_t * be,ir_entity * method)1676 static ir_entity *create_trampoline(be_main_env_t *be, ir_entity *method)
1677 {
1678 	ir_type   *type   = get_entity_type(method);
1679 	ident     *old_id = get_entity_ld_ident(method);
1680 	ident     *id     = id_mangle3("", old_id, "$stub");
1681 	ir_type   *parent = be->pic_trampolines_type;
1682 	ir_entity *ent    = new_entity(parent, old_id, type);
1683 	set_entity_ld_ident(ent, id);
1684 	set_entity_visibility(ent, ir_visibility_private);
1685 
1686 	return ent;
1687 }
1688 
1689 /**
1690  * Returns the trampoline entity for the given method.
1691  */
get_trampoline(be_main_env_t * env,ir_entity * method)1692 static ir_entity *get_trampoline(be_main_env_t *env, ir_entity *method)
1693 {
1694 	ir_entity *result = pmap_get(ir_entity, env->ent_trampoline_map, method);
1695 	if (result == NULL) {
1696 		result = create_trampoline(env, method);
1697 		pmap_insert(env->ent_trampoline_map, method, result);
1698 	}
1699 
1700 	return result;
1701 }
1702 
create_pic_symbol(be_main_env_t * be,ir_entity * entity)1703 static ir_entity *create_pic_symbol(be_main_env_t *be, ir_entity *entity)
1704 {
1705 	ident     *old_id = get_entity_ld_ident(entity);
1706 	ident     *id     = id_mangle3("", old_id, "$non_lazy_ptr");
1707 	ir_type   *e_type = get_entity_type(entity);
1708 	ir_type   *type   = new_type_pointer(e_type);
1709 	ir_type   *parent = be->pic_symbols_type;
1710 	ir_entity *ent    = new_entity(parent, old_id, type);
1711 	set_entity_ld_ident(ent, id);
1712 	set_entity_visibility(ent, ir_visibility_private);
1713 
1714 	return ent;
1715 }
1716 
get_pic_symbol(be_main_env_t * env,ir_entity * entity)1717 static ir_entity *get_pic_symbol(be_main_env_t *env, ir_entity *entity)
1718 {
1719 	ir_entity *result = pmap_get(ir_entity, env->ent_pic_symbol_map, entity);
1720 	if (result == NULL) {
1721 		result = create_pic_symbol(env, entity);
1722 		pmap_insert(env->ent_pic_symbol_map, entity, result);
1723 	}
1724 
1725 	return result;
1726 }
1727 
1728 
1729 
1730 /**
1731  * Returns non-zero if a given entity can be accessed using a relative address.
1732  */
can_address_relative(ir_entity * entity)1733 static int can_address_relative(ir_entity *entity)
1734 {
1735 	return entity_has_definition(entity) && !(get_entity_linkage(entity) & IR_LINKAGE_MERGE);
1736 }
1737 
get_pic_base(ir_graph * irg)1738 static ir_node *get_pic_base(ir_graph *irg)
1739 {
1740 	const arch_env_t *arch_env = be_get_irg_arch_env(irg);
1741 	if (arch_env->impl->get_pic_base == NULL)
1742 		return NULL;
1743 	return arch_env->impl->get_pic_base(irg);
1744 }
1745 
1746 /** patches SymConsts to work in position independent code */
fix_pic_symconsts(ir_node * node,void * data)1747 static void fix_pic_symconsts(ir_node *node, void *data)
1748 {
1749 	ir_graph         *irg = get_irn_irg(node);
1750 	be_main_env_t    *be  = be_get_irg_main_env(irg);
1751 	ir_node          *pic_base;
1752 	ir_node          *add;
1753 	ir_node          *block;
1754 	ir_mode          *mode;
1755 	ir_node          *load;
1756 	ir_node          *load_res;
1757 	int               arity, i;
1758 	(void) data;
1759 
1760 	arity = get_irn_arity(node);
1761 	for (i = 0; i < arity; ++i) {
1762 		dbg_info  *dbgi;
1763 		ir_node   *pred = get_irn_n(node, i);
1764 		ir_entity *entity;
1765 		ir_entity *pic_symbol;
1766 		ir_node   *pic_symconst;
1767 
1768 		if (!is_SymConst(pred))
1769 			continue;
1770 
1771 		entity = get_SymConst_entity(pred);
1772 		block  = get_nodes_block(pred);
1773 
1774 		/* calls can jump to relative addresses, so we can directly jump to
1775 		   the (relatively) known call address or the trampoline */
1776 		if (i == 1 && is_Call(node)) {
1777 			ir_entity *trampoline;
1778 			ir_node   *trampoline_const;
1779 
1780 			if (can_address_relative(entity))
1781 				continue;
1782 
1783 			dbgi             = get_irn_dbg_info(pred);
1784 			trampoline       = get_trampoline(be, entity);
1785 			trampoline_const = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
1786 			                                            trampoline);
1787 			set_irn_n(node, i, trampoline_const);
1788 			continue;
1789 		}
1790 
1791 		/* everything else is accessed relative to EIP */
1792 		mode     = get_irn_mode(pred);
1793 		pic_base = get_pic_base(irg);
1794 
1795 		/* all ok now for locally constructed stuff */
1796 		if (can_address_relative(entity)) {
1797 			ir_node *add = new_r_Add(block, pic_base, pred, mode);
1798 
1799 			/* make sure the walker doesn't visit this add again */
1800 			mark_irn_visited(add);
1801 			set_irn_n(node, i, add);
1802 			continue;
1803 		}
1804 
1805 		/* get entry from pic symbol segment */
1806 		dbgi         = get_irn_dbg_info(pred);
1807 		pic_symbol   = get_pic_symbol(be, entity);
1808 		pic_symconst = new_rd_SymConst_addr_ent(dbgi, irg, mode_P_code,
1809 		                                        pic_symbol);
1810 		add = new_r_Add(block, pic_base, pic_symconst, mode);
1811 		mark_irn_visited(add);
1812 
1813 		/* we need an extra indirection for global data outside our current
1814 		   module. The loads are always safe and can therefore float
1815 		   and need no memory input */
1816 		load     = new_r_Load(block, get_irg_no_mem(irg), add, mode, cons_floats);
1817 		load_res = new_r_Proj(load, mode, pn_Load_res);
1818 
1819 		set_irn_n(node, i, load_res);
1820 	}
1821 }
1822 
be_abi_introduce(ir_graph * irg)1823 void be_abi_introduce(ir_graph *irg)
1824 {
1825 	be_abi_irg_t     *env         = XMALLOCZ(be_abi_irg_t);
1826 	ir_node          *old_frame   = get_irg_frame(irg);
1827 	const arch_env_t *arch_env    = be_get_irg_arch_env(irg);
1828 	ir_entity        *entity      = get_irg_entity(irg);
1829 	ir_type          *method_type = get_entity_type(entity);
1830 	be_irg_t         *birg        = be_birg_from_irg(irg);
1831 	struct obstack   *obst        = &birg->obst;
1832 	ir_node          *dummy       = new_r_Dummy(irg,
1833 	                                            arch_env->sp->reg_class->mode);
1834 	unsigned          r;
1835 
1836 	/* determine allocatable registers */
1837 	assert(birg->allocatable_regs == NULL);
1838 	birg->allocatable_regs = rbitset_obstack_alloc(obst, arch_env->n_registers);
1839 	for (r = 0; r < arch_env->n_registers; ++r) {
1840 		const arch_register_t *reg = &arch_env->registers[r];
1841 		if ( !(reg->type & arch_register_type_ignore)) {
1842 			rbitset_set(birg->allocatable_regs, r);
1843 		}
1844 	}
1845 
1846 	/* break here if backend provides a custom API.
1847 	 * Note: we shouldn't have to setup any be_abi_irg_t* stuff at all,
1848 	 * but need more cleanup to make this work
1849 	 */
1850 	be_set_irg_abi(irg, env);
1851 
1852 	be_omit_fp        = be_options.omit_fp;
1853 
1854 	env->keep_map     = pmap_create();
1855 	env->call         = be_abi_call_new(arch_env->sp->reg_class);
1856 	arch_env_get_call_abi(arch_env, method_type, env->call);
1857 
1858 	env->init_sp = dummy;
1859 	env->calls   = NEW_ARR_F(ir_node*, 0);
1860 
1861 	assure_edges(irg);
1862 
1863 	if (be_options.pic) {
1864 		irg_walk_graph(irg, fix_pic_symconsts, NULL, env);
1865 	}
1866 
1867 	/* Lower all call nodes in the IRG. */
1868 	process_calls(irg);
1869 
1870 	/* Process the IRG */
1871 	modify_irg(irg);
1872 
1873 	/* fix call inputs for state registers */
1874 	fix_call_state_inputs(irg);
1875 
1876 	/* We don't need the keep map anymore. */
1877 	pmap_destroy(env->keep_map);
1878 	env->keep_map = NULL;
1879 
1880 	/* calls array is not needed anymore */
1881 	DEL_ARR_F(env->calls);
1882 	env->calls = NULL;
1883 
1884 	/* reroute the stack origin of the calls to the true stack origin. */
1885 	exchange(dummy, env->init_sp);
1886 	exchange(old_frame, get_irg_frame(irg));
1887 
1888 	pmap_destroy(env->regs);
1889 	env->regs = NULL;
1890 }
1891 
be_abi_free(ir_graph * irg)1892 void be_abi_free(ir_graph *irg)
1893 {
1894 	be_abi_irg_t *env = be_get_irg_abi(irg);
1895 
1896 	if (env->call != NULL)
1897 		be_abi_call_free(env->call);
1898 	assert(env->regs == NULL);
1899 	free(env);
1900 
1901 	be_set_irg_abi(irg, NULL);
1902 }
1903 
be_put_allocatable_regs(const ir_graph * irg,const arch_register_class_t * cls,bitset_t * bs)1904 void be_put_allocatable_regs(const ir_graph *irg,
1905                              const arch_register_class_t *cls, bitset_t *bs)
1906 {
1907 	be_irg_t *birg             = be_birg_from_irg(irg);
1908 	unsigned *allocatable_regs = birg->allocatable_regs;
1909 	unsigned  i;
1910 
1911 	assert(bitset_size(bs) == cls->n_regs);
1912 	bitset_clear_all(bs);
1913 	for (i = 0; i < cls->n_regs; ++i) {
1914 		const arch_register_t *reg = &cls->regs[i];
1915 		if (rbitset_is_set(allocatable_regs, reg->global_index))
1916 			bitset_set(bs, i);
1917 	}
1918 }
1919 
be_get_n_allocatable_regs(const ir_graph * irg,const arch_register_class_t * cls)1920 unsigned be_get_n_allocatable_regs(const ir_graph *irg,
1921                                    const arch_register_class_t *cls)
1922 {
1923 	bitset_t *bs = bitset_alloca(cls->n_regs);
1924 	be_put_allocatable_regs(irg, cls, bs);
1925 	return bitset_popcount(bs);
1926 }
1927 
be_set_allocatable_regs(const ir_graph * irg,const arch_register_class_t * cls,unsigned * raw_bitset)1928 void be_set_allocatable_regs(const ir_graph *irg,
1929                              const arch_register_class_t *cls,
1930                              unsigned *raw_bitset)
1931 {
1932 	be_irg_t *birg             = be_birg_from_irg(irg);
1933 	unsigned *allocatable_regs = birg->allocatable_regs;
1934 	unsigned  i;
1935 
1936 	rbitset_clear_all(raw_bitset, cls->n_regs);
1937 	for (i = 0; i < cls->n_regs; ++i) {
1938 		const arch_register_t *reg = &cls->regs[i];
1939 		if (rbitset_is_set(allocatable_regs, reg->global_index))
1940 			rbitset_set(raw_bitset, i);
1941 	}
1942 }
1943 
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi)1944 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_abi)
1945 void be_init_abi(void)
1946 {
1947 	FIRM_DBG_REGISTER(dbg, "firm.be.abi");
1948 }
1949