1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief       Helper functions for handling ABI constraints in the code
23  *              selection phase.
24  * @author      Matthias Braun
25  */
26 #include "config.h"
27 
28 #include "beabihelper.h"
29 #include "bearch.h"
30 #include "benode.h"
31 #include "besched.h"
32 #include "ircons.h"
33 #include "iredges.h"
34 #include "irgwalk.h"
35 #include "irnodemap.h"
36 #include "irtools.h"
37 #include "heights.h"
38 
39 /**
40  * An entry in the register state map.
41  */
42 typedef struct reg_flag_t {
43 	const arch_register_t *reg;     /**< register at an input position.
44 	                                     may be NULL in case of memory input */
45 	arch_register_req_type_t flags; /**< requirement flags for this register. */
46 } reg_flag_t;
47 
48 /**
49  * A register state mapping keeps track of the symbol values (=firm nodes)
50  * to registers. This is useful when constructing straight line code
51  * like the function prolog or epilog in some architectures.
52  */
53 typedef struct register_state_mapping_t {
54 	ir_node   **value_map;     /**< mapping of state indices to values */
55 	size_t    **reg_index_map; /**< mapping of regclass,regnum to an index
56 	                                into the value_map */
57 	reg_flag_t *regs;          /**< registers (and memory values) that form a
58 	                                state */
59 } register_state_mapping_t;
60 
61 /**
62  * The environment for all helper functions.
63  */
64 struct beabi_helper_env_t {
65 	ir_graph                 *irg;         /**< the graph we operate on */
66 	register_state_mapping_t  prolog;      /**< the register state map for the prolog */
67 	register_state_mapping_t  epilog;      /**< the register state map for the epilog */
68 };
69 
70 /**
71  * Create a new empty register state map for the given
72  * architecture.
73  *
74  * @param rsm       the register state map to be initialized
75  * @param arch_env  the architecture environment
76  *
77  * After this call, the register map is initialized to empty.
78  */
prepare_rsm(register_state_mapping_t * rsm,const arch_env_t * arch_env)79 static void prepare_rsm(register_state_mapping_t *rsm,
80                         const arch_env_t *arch_env)
81 {
82 	unsigned   n_reg_classes = arch_env->n_register_classes;
83 	unsigned   c;
84 	reg_flag_t memory = { NULL, arch_register_req_type_none };
85 
86 	rsm->regs = NEW_ARR_F(reg_flag_t, 0);
87 	/* memory input at 0 */
88 	ARR_APP1(reg_flag_t, rsm->regs, memory);
89 
90 	rsm->value_map     = NULL;
91 	rsm->reg_index_map = XMALLOCN(size_t*, n_reg_classes);
92 	for (c = 0; c < n_reg_classes; ++c) {
93 		const arch_register_class_t *cls    = &arch_env->register_classes[c];
94 		unsigned                     n_regs = arch_register_class_n_regs(cls);
95 		unsigned                     r;
96 
97 		rsm->reg_index_map[c] = XMALLOCN(size_t, n_regs);
98 		for (r = 0; r < n_regs; ++r) {
99 			rsm->reg_index_map[c][r] = (size_t)-1;
100 		}
101 	}
102 }
103 
104 /**
105  * Destroy a register state map for the given
106  * architecture.
107  *
108  * @param rsm       the register state map to be destroyed
109  * @param arch_env  the architecture environment
110  *
111  * After this call, the register map is initialized to empty.
112  */
free_rsm(register_state_mapping_t * rsm,const arch_env_t * arch_env)113 static void free_rsm(register_state_mapping_t *rsm, const arch_env_t *arch_env)
114 {
115 	unsigned n_reg_classes = arch_env->n_register_classes;
116 	unsigned c;
117 
118 	for (c = 0; c < n_reg_classes; ++c) {
119 		free(rsm->reg_index_map[c]);
120 	}
121 
122 	free(rsm->reg_index_map);
123 	if (rsm->value_map != NULL)
124 		DEL_ARR_F(rsm->value_map);
125 	DEL_ARR_F(rsm->regs);
126 
127 	rsm->regs          = NULL;
128 	rsm->reg_index_map = NULL;
129 	rsm->value_map     = NULL;
130 }
131 
132 /**
133  * Remove all registers from a register state map.
134  *
135  * @param rsm       the register state map to be destroyed
136  * @param arch_env  the architecture environment
137  */
rsm_clear_regs(register_state_mapping_t * rsm,const arch_env_t * arch_env)138 static void rsm_clear_regs(register_state_mapping_t *rsm,
139                            const arch_env_t *arch_env)
140 {
141 	unsigned   n_reg_classes = arch_env->n_register_classes;
142 	unsigned   c;
143 	reg_flag_t memory = { NULL, arch_register_req_type_none };
144 
145 	for (c = 0; c < n_reg_classes; ++c) {
146 		const arch_register_class_t *cls    = &arch_env->register_classes[c];
147 		unsigned                     n_regs = arch_register_class_n_regs(cls);
148 		unsigned                     r;
149 
150 		for (r = 0; r < n_regs; ++r) {
151 			rsm->reg_index_map[c][r] = (size_t)-1;
152 		}
153 	}
154 	ARR_RESIZE(reg_flag_t, rsm->regs, 0);
155 	ARR_APP1(reg_flag_t, rsm->regs, memory);
156 
157 	if (rsm->value_map != NULL) {
158 		DEL_ARR_F(rsm->value_map);
159 		rsm->value_map = NULL;
160 	}
161 }
162 
163 /**
164  * Add a register and its constraint flags to a register state map
165  * and return its index inside the map.
166  */
rsm_add_reg(register_state_mapping_t * rsm,const arch_register_t * reg,arch_register_req_type_t flags)167 static size_t rsm_add_reg(register_state_mapping_t *rsm,
168                           const arch_register_t *reg,
169                            arch_register_req_type_t flags)
170 {
171 	size_t     input_idx = ARR_LEN(rsm->regs);
172 	int        cls_idx   = reg->reg_class->index;
173 	int        reg_idx   = reg->index;
174 	reg_flag_t regflag   = { reg, flags };
175 
176 	/* we must not have used get_value yet */
177 	assert(rsm->reg_index_map[cls_idx][reg_idx] == (size_t)-1);
178 	rsm->reg_index_map[cls_idx][reg_idx] = input_idx;
179 	ARR_APP1(reg_flag_t, rsm->regs, regflag);
180 
181 	if (rsm->value_map != NULL) {
182 		ARR_APP1(ir_node*, rsm->value_map, NULL);
183 		assert(ARR_LEN(rsm->value_map) == ARR_LEN(rsm->regs));
184 	}
185 	return input_idx;
186 }
187 
188 /**
189  * Retrieve the ir_node stored at the given index in the register state map.
190  */
rsm_get_value(register_state_mapping_t * rsm,size_t index)191 static ir_node *rsm_get_value(register_state_mapping_t *rsm, size_t index)
192 {
193 	assert(index < ARR_LEN(rsm->value_map));
194 	return rsm->value_map[index];
195 }
196 
197 /**
198  * Retrieve the ir_node occupying the given register in the register state map.
199  */
rsm_get_reg_value(register_state_mapping_t * rsm,const arch_register_t * reg)200 static ir_node *rsm_get_reg_value(register_state_mapping_t *rsm,
201                                   const arch_register_t *reg)
202 {
203 	int    cls_idx   = reg->reg_class->index;
204 	int    reg_idx   = reg->index;
205 	size_t input_idx = rsm->reg_index_map[cls_idx][reg_idx];
206 
207 	return rsm_get_value(rsm, input_idx);
208 }
209 
210 /**
211  * Enter a ir_node at the given index in the register state map.
212  */
rsm_set_value(register_state_mapping_t * rsm,size_t index,ir_node * value)213 static void rsm_set_value(register_state_mapping_t *rsm, size_t index,
214                           ir_node *value)
215 {
216 	assert(index < ARR_LEN(rsm->value_map));
217 	rsm->value_map[index] = value;
218 }
219 
220 /**
221  * Enter a ir_node at the given register in the register state map.
222  */
rsm_set_reg_value(register_state_mapping_t * rsm,const arch_register_t * reg,ir_node * value)223 static void rsm_set_reg_value(register_state_mapping_t *rsm,
224                               const arch_register_t *reg, ir_node *value)
225 {
226 	int    cls_idx   = reg->reg_class->index;
227 	int    reg_idx   = reg->index;
228 	size_t input_idx = rsm->reg_index_map[cls_idx][reg_idx];
229 	rsm_set_value(rsm, input_idx, value);
230 }
231 
232 
be_abihelper_prepare(ir_graph * irg)233 beabi_helper_env_t *be_abihelper_prepare(ir_graph *irg)
234 {
235 	const arch_env_t   *arch_env = be_get_irg_arch_env(irg);
236 	beabi_helper_env_t *env      = XMALLOCZ(beabi_helper_env_t);
237 
238 	env->irg = irg;
239 	prepare_rsm(&env->prolog, arch_env);
240 	prepare_rsm(&env->epilog, arch_env);
241 
242 	return env;
243 }
244 
be_abihelper_finish(beabi_helper_env_t * env)245 void be_abihelper_finish(beabi_helper_env_t *env)
246 {
247 	const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
248 
249 	free_rsm(&env->prolog, arch_env);
250 	if (env->epilog.reg_index_map != NULL) {
251 		free_rsm(&env->epilog, arch_env);
252 	}
253 	xfree(env);
254 }
255 
be_prolog_add_reg(beabi_helper_env_t * env,const arch_register_t * reg,arch_register_req_type_t flags)256 void be_prolog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
257                        arch_register_req_type_t flags)
258 {
259 	rsm_add_reg(&env->prolog, reg, flags);
260 }
261 
be_prolog_create_start(beabi_helper_env_t * env,dbg_info * dbgi,ir_node * block)262 ir_node *be_prolog_create_start(beabi_helper_env_t *env, dbg_info *dbgi,
263                                 ir_node *block)
264 {
265 	int      n_start_outs = ARR_LEN(env->prolog.regs);
266 	ir_node *start        = be_new_Start(dbgi, block, n_start_outs);
267 	int      o;
268 
269 	assert(env->prolog.value_map == NULL);
270 	env->prolog.value_map = NEW_ARR_F(ir_node*, n_start_outs);
271 
272 	for (o = 0; o < n_start_outs; ++o) {
273 		const reg_flag_t      *regflag = &env->prolog.regs[o];
274 		const arch_register_t *reg     = regflag->reg;
275 		ir_node               *proj;
276 		if (reg == NULL) {
277 			arch_set_irn_register_req_out(start, o, arch_no_register_req);
278 			proj = new_r_Proj(start, mode_M, o);
279 		} else {
280 			be_set_constr_single_reg_out(start, o, regflag->reg,
281 			                             regflag->flags);
282 			arch_set_irn_register_out(start, o, regflag->reg);
283 			proj = new_r_Proj(start, reg->reg_class->mode, o);
284 		}
285 		env->prolog.value_map[o] = proj;
286 	}
287 
288 	return start;
289 }
290 
be_prolog_get_reg_value(beabi_helper_env_t * env,const arch_register_t * reg)291 ir_node *be_prolog_get_reg_value(beabi_helper_env_t *env,
292                                  const arch_register_t *reg)
293 {
294 	return rsm_get_reg_value(&env->prolog, reg);
295 }
296 
be_prolog_get_memory(beabi_helper_env_t * env)297 ir_node *be_prolog_get_memory(beabi_helper_env_t *env)
298 {
299 	return rsm_get_value(&env->prolog, 0);
300 }
301 
be_prolog_set_reg_value(beabi_helper_env_t * env,const arch_register_t * reg,ir_node * value)302 void be_prolog_set_reg_value(beabi_helper_env_t *env,
303                              const arch_register_t *reg, ir_node *value)
304 {
305 	rsm_set_reg_value(&env->prolog, reg, value);
306 }
307 
be_prolog_set_memory(beabi_helper_env_t * env,ir_node * value)308 void be_prolog_set_memory(beabi_helper_env_t *env, ir_node *value)
309 {
310 	rsm_set_value(&env->prolog, 0, value);
311 }
312 
313 
314 
be_epilog_begin(beabi_helper_env_t * env)315 void be_epilog_begin(beabi_helper_env_t *env)
316 {
317 	const arch_env_t *arch_env = be_get_irg_arch_env(env->irg);
318 	rsm_clear_regs(&env->epilog, arch_env);
319 	env->epilog.value_map    = NEW_ARR_F(ir_node*, 1);
320 	env->epilog.value_map[0] = NULL;
321 }
322 
be_epilog_add_reg(beabi_helper_env_t * env,const arch_register_t * reg,arch_register_req_type_t flags,ir_node * value)323 void be_epilog_add_reg(beabi_helper_env_t *env, const arch_register_t *reg,
324                        arch_register_req_type_t flags, ir_node *value)
325 {
326 	size_t index = rsm_add_reg(&env->epilog, reg, flags);
327 	rsm_set_value(&env->epilog, index, value);
328 }
329 
be_epilog_set_reg_value(beabi_helper_env_t * env,const arch_register_t * reg,ir_node * value)330 void be_epilog_set_reg_value(beabi_helper_env_t *env,
331                              const arch_register_t *reg, ir_node *value)
332 {
333 	rsm_set_reg_value(&env->epilog, reg, value);
334 }
335 
be_epilog_set_memory(beabi_helper_env_t * env,ir_node * value)336 void be_epilog_set_memory(beabi_helper_env_t *env, ir_node *value)
337 {
338 	rsm_set_value(&env->epilog, 0, value);
339 }
340 
be_epilog_get_reg_value(beabi_helper_env_t * env,const arch_register_t * reg)341 ir_node *be_epilog_get_reg_value(beabi_helper_env_t *env,
342                                  const arch_register_t *reg)
343 {
344 	return rsm_get_reg_value(&env->epilog, reg);
345 }
346 
be_epilog_get_memory(beabi_helper_env_t * env)347 ir_node *be_epilog_get_memory(beabi_helper_env_t *env)
348 {
349 	return rsm_get_value(&env->epilog, 0);
350 }
351 
be_epilog_create_return(beabi_helper_env_t * env,dbg_info * dbgi,ir_node * block)352 ir_node *be_epilog_create_return(beabi_helper_env_t *env, dbg_info *dbgi,
353                                  ir_node *block)
354 {
355 	size_t    n_return_in = ARR_LEN(env->epilog.regs);
356 	ir_node **in          = env->epilog.value_map;
357 	int       n_res       = 1; /* TODO */
358 	unsigned  pop         = 0; /* TODO */
359 	size_t    i;
360 	ir_node  *ret;
361 
362 	assert(ARR_LEN(env->epilog.value_map) == n_return_in);
363 
364 	ret = be_new_Return(dbgi, get_irn_irg(block), block, n_res, pop,
365 	                    n_return_in, in);
366 	for (i = 0; i < n_return_in; ++i) {
367 		const reg_flag_t      *regflag = &env->epilog.regs[i];
368 		const arch_register_t *reg     = regflag->reg;
369 		if (reg != NULL) {
370 			be_set_constr_single_reg_in(ret, i, reg,
371 			                            arch_register_req_type_none);
372 		}
373 	}
374 
375 	rsm_clear_regs(&env->epilog, be_get_irg_arch_env(env->irg));
376 
377 	return ret;
378 }
379 
380 /**
381  * Tests whether a node has a real user and is not just kept by the End or
382  * Anchor node
383  */
has_real_user(const ir_node * node)384 static bool has_real_user(const ir_node *node)
385 {
386 	foreach_out_edge(node, edge) {
387 		ir_node *user = get_edge_src_irn(edge);
388 		if (!is_End(user) && !is_Anchor(user))
389 			return true;
390 	}
391 	return false;
392 }
393 
add_to_keep(ir_node * last_keep,const arch_register_class_t * cls,ir_node * node)394 static ir_node *add_to_keep(ir_node *last_keep,
395                             const arch_register_class_t *cls, ir_node *node)
396 {
397 	if (last_keep != NULL) {
398 		be_Keep_add_node(last_keep, cls, node);
399 	} else {
400 		ir_node *in[1] = { node };
401 		ir_node *block = get_nodes_block(node);
402 		ir_node *schedpoint;
403 		last_keep = be_new_Keep(block, 1, in);
404 
405 		schedpoint = skip_Proj(node);
406 		if (sched_is_scheduled(schedpoint)) {
407 			sched_add_after(schedpoint, last_keep);
408 		}
409 	}
410 	return last_keep;
411 }
412 
be_add_missing_keeps_node(ir_node * node)413 void be_add_missing_keeps_node(ir_node *node)
414 {
415 	int       n_outs, i;
416 	unsigned *found_projs;
417 	ir_mode  *mode = get_irn_mode(node);
418 	ir_node  *last_keep;
419 	ir_node **existing_projs;
420 
421 	if (mode != mode_T) {
422 		if (!has_real_user(node)) {
423 			const arch_register_req_t   *req = arch_get_irn_register_req(node);
424 			const arch_register_class_t *cls = req->cls;
425 			if (cls == NULL
426 					|| (cls->flags & arch_register_class_flag_manual_ra)) {
427 				return;
428 			}
429 
430 			add_to_keep(NULL, cls, node);
431 		}
432 		return;
433 	}
434 
435 	n_outs = arch_get_irn_n_outs(node);
436 	if (n_outs <= 0)
437 		return;
438 
439 	rbitset_alloca(found_projs, n_outs);
440 	existing_projs = ALLOCANZ(ir_node*, n_outs);
441 	foreach_out_edge(node, edge) {
442 		ir_node *succ = get_edge_src_irn(edge);
443 		ir_mode *mode = get_irn_mode(succ);
444 		int      pn;
445 
446 		/* The node could be kept */
447 		if (is_End(succ) || is_Anchor(succ))
448 			continue;
449 		if (mode == mode_M || mode == mode_X)
450 			continue;
451 		pn                 = get_Proj_proj(succ);
452 		existing_projs[pn] = succ;
453 		if (!has_real_user(succ))
454 			continue;
455 
456 		assert(pn < n_outs);
457 		rbitset_set(found_projs, pn);
458 	}
459 
460 	/* are keeps missing? */
461 	last_keep = NULL;
462 	for (i = 0; i < n_outs; ++i) {
463 		ir_node                     *value;
464 		const arch_register_req_t   *req;
465 		const arch_register_class_t *cls;
466 
467 		if (rbitset_is_set(found_projs, i)) {
468 			continue;
469 		}
470 
471 		req = arch_get_irn_register_req_out(node, i);
472 		cls = req->cls;
473 		if (cls == NULL || (cls->flags & arch_register_class_flag_manual_ra)) {
474 			continue;
475 		}
476 
477 		value = existing_projs[i];
478 		if (value == NULL)
479 			value = new_r_Proj(node, arch_register_class_mode(cls), i);
480 		last_keep = add_to_keep(last_keep, cls, value);
481 	}
482 }
483 
add_missing_keep_walker(ir_node * node,void * data)484 static void add_missing_keep_walker(ir_node *node, void *data)
485 {
486 	(void)data;
487 	be_add_missing_keeps_node(node);
488 }
489 
be_add_missing_keeps(ir_graph * irg)490 void be_add_missing_keeps(ir_graph *irg)
491 {
492 	irg_walk_graph(irg, add_missing_keep_walker, NULL, NULL);
493 }
494 
495 
496 /**
497  * Link the node into its block list as a new head.
498  */
collect_node(ir_node * node)499 static void collect_node(ir_node *node)
500 {
501 	ir_node *block = get_nodes_block(node);
502 	ir_node *old   = (ir_node*)get_irn_link(block);
503 
504 	set_irn_link(node, old);
505 	set_irn_link(block, node);
506 }
507 
508 /**
509  * Post-walker: link all nodes that probably access the stack into lists of their block.
510  */
link_ops_in_block_walker(ir_node * node,void * data)511 static void link_ops_in_block_walker(ir_node *node, void *data)
512 {
513 	(void) data;
514 
515 	switch (get_irn_opcode(node)) {
516 	case iro_Return:
517 	case iro_Call:
518 		collect_node(node);
519 		break;
520 	case iro_Alloc:
521 		/** all non-stack alloc nodes should be lowered before the backend */
522 		assert(get_Alloc_where(node) == stack_alloc);
523 		collect_node(node);
524 		break;
525 	case iro_Free:
526 		assert(get_Free_where(node) == stack_alloc);
527 		collect_node(node);
528 		break;
529 	case iro_Builtin:
530 		if (get_Builtin_kind(node) == ir_bk_return_address) {
531 			ir_node   *param = get_Builtin_param(node, 0);
532 			ir_tarval *tv    = get_Const_tarval(param); /* must be Const */
533 			long       value = get_tarval_long(tv);
534 			if (value > 0) {
535 				/* not the return address of the current function:
536 				 * we need the stack pointer for the frame climbing */
537 				collect_node(node);
538 			}
539 		}
540 		break;
541 	default:
542 		break;
543 	}
544 }
545 
546 static ir_heights_t *heights;
547 
548 /**
549  * Check if a node is somehow data dependent on another one.
550  * both nodes must be in the same basic block.
551  * @param n1 The first node.
552  * @param n2 The second node.
553  * @return 1, if n1 is data dependent (transitively) on n2, 0 if not.
554  */
dependent_on(const ir_node * n1,const ir_node * n2)555 static int dependent_on(const ir_node *n1, const ir_node *n2)
556 {
557 	assert(get_nodes_block(n1) == get_nodes_block(n2));
558 
559 	return heights_reachable_in_block(heights, n1, n2);
560 }
561 
562 /**
563  * Classical qsort() comparison function behavior:
564  *
565  * 0  if both elements are equal, no node depend on the other
566  * +1 if first depends on second (first is greater)
567  * -1 if second depends on first (second is greater)
568 */
cmp_call_dependency(const void * c1,const void * c2)569 static int cmp_call_dependency(const void *c1, const void *c2)
570 {
571 	const ir_node *n1 = *(const ir_node **) c1;
572 	const ir_node *n2 = *(const ir_node **) c2;
573 	unsigned h1, h2;
574 
575 	if (dependent_on(n1, n2))
576 		return 1;
577 
578 	if (dependent_on(n2, n1))
579 		return -1;
580 
581 	/* The nodes have no depth order, but we need a total order because qsort()
582 	 * is not stable.
583 	 *
584 	 * Additionally, we need to respect transitive dependencies. Consider a
585 	 * Call a depending on Call b and an independent Call c.
586 	 * We MUST NOT order c > a and b > c. */
587 	h1 = get_irn_height(heights, n1);
588 	h2 = get_irn_height(heights, n2);
589 	if (h1 < h2) return  1;
590 	if (h1 > h2) return -1;
591 	/* Same height, so use a random (but stable) order */
592 	return get_irn_idx(n2) - get_irn_idx(n1);
593 }
594 
595 /**
596  * Block-walker: sorts dependencies and remember them into a phase
597  */
process_ops_in_block(ir_node * block,void * data)598 static void process_ops_in_block(ir_node *block, void *data)
599 {
600 	ir_nodemap *map = (ir_nodemap*)data;
601 	unsigned    n;
602 	unsigned    n_nodes;
603 	ir_node    *node;
604 	ir_node   **nodes;
605 
606 	n_nodes = 0;
607 	for (node = (ir_node*)get_irn_link(block); node != NULL;
608 	     node = (ir_node*)get_irn_link(node)) {
609 		++n_nodes;
610 	}
611 
612 	if (n_nodes == 0)
613 		return;
614 
615 	nodes = XMALLOCN(ir_node*, n_nodes);
616 	n = 0;
617 	for (node = (ir_node*)get_irn_link(block); node != NULL;
618 	     node = (ir_node*)get_irn_link(node)) {
619 		nodes[n++] = node;
620 	}
621 	assert(n == n_nodes);
622 
623 	/* order nodes according to their data dependencies */
624 	qsort(nodes, n_nodes, sizeof(nodes[0]), cmp_call_dependency);
625 
626 	/* remember the calculated dependency into a phase */
627 	for (n = n_nodes-1; n > 0; --n) {
628 		ir_node *node = nodes[n];
629 		ir_node *pred = nodes[n-1];
630 
631 		ir_nodemap_insert(map, node, pred);
632 	}
633 	xfree(nodes);
634 }
635 
636 
637 
638 struct be_stackorder_t {
639 	ir_nodemap stack_order; /**< a phase to handle stack dependencies. */
640 };
641 
be_collect_stacknodes(ir_graph * irg)642 be_stackorder_t *be_collect_stacknodes(ir_graph *irg)
643 {
644 	be_stackorder_t *env = XMALLOCZ(be_stackorder_t);
645 
646 	ir_reserve_resources(irg, IR_RESOURCE_IRN_LINK);
647 
648 	/* collect all potential^stack accessing nodes */
649 	irg_walk_graph(irg, firm_clear_link, link_ops_in_block_walker, NULL);
650 
651 	ir_nodemap_init(&env->stack_order, irg);
652 
653 	/* use heights to create a total order for those nodes: this order is stored
654 	 * in the created phase */
655 	heights = heights_new(irg);
656 	irg_block_walk_graph(irg, NULL, process_ops_in_block, &env->stack_order);
657 	heights_free(heights);
658 
659 	ir_free_resources(irg, IR_RESOURCE_IRN_LINK);
660 
661 	return env;
662 }
663 
be_get_stack_pred(const be_stackorder_t * env,const ir_node * node)664 ir_node *be_get_stack_pred(const be_stackorder_t *env, const ir_node *node)
665 {
666 	return ir_nodemap_get(ir_node, &env->stack_order, node);
667 }
668 
be_free_stackorder(be_stackorder_t * env)669 void be_free_stackorder(be_stackorder_t *env)
670 {
671 	ir_nodemap_destroy(&env->stack_order);
672 	free(env);
673 }
674 
create_stores_for_type(ir_graph * irg,ir_type * type)675 static void create_stores_for_type(ir_graph *irg, ir_type *type)
676 {
677 	size_t   n           = get_compound_n_members(type);
678 	ir_node *frame       = get_irg_frame(irg);
679 	ir_node *initial_mem = get_irg_initial_mem(irg);
680 	ir_node *mem         = initial_mem;
681 	ir_node *first_store = NULL;
682 	ir_node *start_block = get_irg_start_block(irg);
683 	ir_node *args        = get_irg_args(irg);
684 	size_t   i;
685 
686 	/* all parameter entities left in the frame type require stores.
687 	 * (The ones passed on the stack have been moved to the arg type) */
688 	for (i = 0; i < n; ++i) {
689 		ir_entity *entity = get_compound_member(type, i);
690 		ir_node   *addr;
691 		size_t     arg;
692 		if (!is_parameter_entity(entity))
693 			continue;
694 
695 		arg = get_entity_parameter_number(entity);
696 		if (arg == IR_VA_START_PARAMETER_NUMBER)
697 			continue;
698 
699 		addr = new_r_Sel(start_block, mem, frame, 0, NULL, entity);
700 		if (entity->attr.parameter.doubleword_low_mode != NULL) {
701 			ir_mode *mode      = entity->attr.parameter.doubleword_low_mode;
702 			ir_node *val0      = new_r_Proj(args, mode, arg);
703 			ir_node *val1      = new_r_Proj(args, mode, arg+1);
704 			ir_node *store0    = new_r_Store(start_block, mem, addr, val0,
705 			                                 cons_none);
706 			ir_node *mem0      = new_r_Proj(store0, mode_M, pn_Store_M);
707 			size_t   offset    = get_mode_size_bits(mode)/8;
708 			ir_mode *addr_mode = get_irn_mode(addr);
709 			ir_node *cnst      = new_r_Const_long(irg, addr_mode, offset);
710 			ir_node *next_addr = new_r_Add(start_block, addr, cnst, addr_mode);
711 			ir_node *store1    = new_r_Store(start_block, mem0, next_addr, val1,
712 			                                 cons_none);
713 			mem = new_r_Proj(store1, mode_M, pn_Store_M);
714 			if (first_store == NULL)
715 				first_store = store0;
716 		} else {
717 			ir_type *tp    = get_entity_type(entity);
718 			ir_mode *mode  = is_compound_type(tp) ? mode_P : get_type_mode(tp);
719 			ir_node *val   = new_r_Proj(args, mode, arg);
720 			ir_node *store = new_r_Store(start_block, mem, addr, val, cons_none);
721 			mem = new_r_Proj(store, mode_M, pn_Store_M);
722 			if (first_store == NULL)
723 				first_store = store;
724 		}
725 	}
726 
727 	if (mem != initial_mem)
728 		edges_reroute_except(initial_mem, mem, first_store);
729 }
730 
be_add_parameter_entity_stores(ir_graph * irg)731 void be_add_parameter_entity_stores(ir_graph *irg)
732 {
733 	ir_type           *frame_type   = get_irg_frame_type(irg);
734 	be_stack_layout_t *layout       = be_get_irg_stack_layout(irg);
735 	ir_type           *between_type = layout->between_type;
736 
737 	create_stores_for_type(irg, frame_type);
738 	if (between_type != NULL) {
739 		create_stores_for_type(irg, between_type);
740 	}
741 }
742