1 /*
2  * Copyright (C) 1995-2010 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief   The codegenerator (transform FIRM into arm FIRM)
23  * @author  Matthias Braun, Oliver Richter, Tobias Gneist, Michael Beck
24  */
25 #include "config.h"
26 
27 #include "irnode_t.h"
28 #include "irgraph_t.h"
29 #include "irmode_t.h"
30 #include "irgmod.h"
31 #include "iredges.h"
32 #include "ircons.h"
33 #include "irprintf.h"
34 #include "dbginfo.h"
35 #include "iropt_t.h"
36 #include "debug.h"
37 #include "error.h"
38 #include "util.h"
39 
40 #include "benode.h"
41 #include "beirg.h"
42 #include "beutil.h"
43 #include "betranshlp.h"
44 #include "beabihelper.h"
45 #include "beabi.h"
46 
47 #include "bearch_arm_t.h"
48 #include "arm_nodes_attr.h"
49 #include "arm_transform.h"
50 #include "arm_optimize.h"
51 #include "arm_new_nodes.h"
52 #include "arm_map_regs.h"
53 #include "arm_cconv.h"
54 
55 #include "gen_arm_regalloc_if.h"
56 
57 #include <limits.h>
58 
59 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
60 
61 static const arch_register_t *sp_reg = &arm_registers[REG_SP];
62 static ir_mode               *mode_gp;
63 static ir_mode               *mode_fp;
64 static beabi_helper_env_t    *abihelper;
65 static be_stackorder_t       *stackorder;
66 static calling_convention_t  *cconv = NULL;
67 static arm_isa_t             *isa;
68 
69 static pmap                  *node_to_stack;
70 
71 static const arch_register_t *const callee_saves[] = {
72 	&arm_registers[REG_R4],
73 	&arm_registers[REG_R5],
74 	&arm_registers[REG_R6],
75 	&arm_registers[REG_R7],
76 	&arm_registers[REG_R8],
77 	&arm_registers[REG_R9],
78 	&arm_registers[REG_R10],
79 	&arm_registers[REG_R11],
80 	&arm_registers[REG_LR],
81 };
82 
83 static const arch_register_t *const caller_saves[] = {
84 	&arm_registers[REG_R0],
85 	&arm_registers[REG_R1],
86 	&arm_registers[REG_R2],
87 	&arm_registers[REG_R3],
88 	&arm_registers[REG_LR],
89 
90 	&arm_registers[REG_F0],
91 	&arm_registers[REG_F1],
92 	&arm_registers[REG_F2],
93 	&arm_registers[REG_F3],
94 	&arm_registers[REG_F4],
95 	&arm_registers[REG_F5],
96 	&arm_registers[REG_F6],
97 	&arm_registers[REG_F7],
98 };
99 
mode_needs_gp_reg(ir_mode * mode)100 static bool mode_needs_gp_reg(ir_mode *mode)
101 {
102 	return mode_is_int(mode) || mode_is_reference(mode);
103 }
104 
105 /**
106  * create firm graph for a constant
107  */
create_const_graph_value(dbg_info * dbgi,ir_node * block,unsigned int value)108 static ir_node *create_const_graph_value(dbg_info *dbgi, ir_node *block,
109                                          unsigned int value)
110 {
111 	ir_node *result;
112 	arm_vals v, vn;
113 	int cnt;
114 
115 	/* We only have 8 bit immediates. So we possibly have to combine several
116 	 * operations to construct the desired value.
117 	 *
118 	 * we can either create the value by adding bits to 0 or by removing bits
119 	 * from an register with all bits set. Try which alternative needs fewer
120 	 * operations */
121 	arm_gen_vals_from_word(value, &v);
122 	arm_gen_vals_from_word(~value, &vn);
123 
124 	if (vn.ops < v.ops) {
125 		/* remove bits */
126 		result = new_bd_arm_Mvn_imm(dbgi, block, vn.values[0], vn.rors[0]);
127 
128 		for (cnt = 1; cnt < vn.ops; ++cnt) {
129 			result = new_bd_arm_Bic_imm(dbgi, block, result,
130 			                            vn.values[cnt], vn.rors[cnt]);
131 		}
132 	} else {
133 		/* add bits */
134 		result = new_bd_arm_Mov_imm(dbgi, block, v.values[0], v.rors[0]);
135 
136 		for (cnt = 1; cnt < v.ops; ++cnt) {
137 			result = new_bd_arm_Or_imm(dbgi, block, result,
138 			                           v.values[cnt], v.rors[cnt]);
139 		}
140 	}
141 	return result;
142 }
143 
144 /**
145  * Create a DAG constructing a given Const.
146  *
147  * @param irn  a Firm const
148  */
create_const_graph(ir_node * irn,ir_node * block)149 static ir_node *create_const_graph(ir_node *irn, ir_node *block)
150 {
151 	ir_tarval *tv   = get_Const_tarval(irn);
152 	ir_mode   *mode = get_tarval_mode(tv);
153 	unsigned   value;
154 
155 	if (mode_is_reference(mode)) {
156 		/* ARM is 32bit, so we can safely convert a reference tarval into Iu */
157 		assert(get_mode_size_bits(mode) == get_mode_size_bits(mode_Iu));
158 		tv = tarval_convert_to(tv, mode_Iu);
159 	}
160 	value = get_tarval_long(tv);
161 	return create_const_graph_value(get_irn_dbg_info(irn), block, value);
162 }
163 
164 /**
165  * Create an And that will zero out upper bits.
166  *
167  * @param dbgi     debug info
168  * @param block    the basic block
169  * @param op       the original node
170  * param src_bits  number of lower bits that will remain
171  */
gen_zero_extension(dbg_info * dbgi,ir_node * block,ir_node * op,int src_bits)172 static ir_node *gen_zero_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
173                                    int src_bits)
174 {
175 	if (src_bits == 8) {
176 		return new_bd_arm_And_imm(dbgi, block, op, 0xFF, 0);
177 	} else if (src_bits == 16) {
178 		ir_node *lshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, 16);
179 		ir_node *rshift = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift, ARM_SHF_LSR_IMM, 16);
180 		return rshift;
181 	} else {
182 		panic("zero extension only supported for 8 and 16 bits");
183 	}
184 }
185 
186 /**
187  * Generate code for a sign extension.
188  */
gen_sign_extension(dbg_info * dbgi,ir_node * block,ir_node * op,int src_bits)189 static ir_node *gen_sign_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
190                                    int src_bits)
191 {
192 	int shift_width = 32 - src_bits;
193 	ir_node *lshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, op, ARM_SHF_LSL_IMM, shift_width);
194 	ir_node *rshift_node = new_bd_arm_Mov_reg_shift_imm(dbgi, block, lshift_node, ARM_SHF_ASR_IMM, shift_width);
195 	return rshift_node;
196 }
197 
gen_extension(dbg_info * dbgi,ir_node * block,ir_node * op,ir_mode * orig_mode)198 static ir_node *gen_extension(dbg_info *dbgi, ir_node *block, ir_node *op,
199                               ir_mode *orig_mode)
200 {
201 	int bits = get_mode_size_bits(orig_mode);
202 	if (bits == 32)
203 		return op;
204 
205 	if (mode_is_signed(orig_mode)) {
206 		return gen_sign_extension(dbgi, block, op, bits);
207 	} else {
208 		return gen_zero_extension(dbgi, block, op, bits);
209 	}
210 }
211 
212 /**
213  * returns true if it is assured, that the upper bits of a node are "clean"
214  * which means for a 16 or 8 bit value, that the upper bits in the register
215  * are 0 for unsigned and a copy of the last significant bit for signed
216  * numbers.
217  */
upper_bits_clean(ir_node * transformed_node,ir_mode * mode)218 static bool upper_bits_clean(ir_node *transformed_node, ir_mode *mode)
219 {
220 	(void) transformed_node;
221 	(void) mode;
222 	/* TODO */
223 	return false;
224 }
225 
226 /**
227  * Transforms a Conv node.
228  *
229  * @return The created ia32 Conv node
230  */
gen_Conv(ir_node * node)231 static ir_node *gen_Conv(ir_node *node)
232 {
233 	ir_node  *block    = be_transform_node(get_nodes_block(node));
234 	ir_node  *op       = get_Conv_op(node);
235 	ir_node  *new_op   = be_transform_node(op);
236 	ir_mode  *src_mode = get_irn_mode(op);
237 	ir_mode  *dst_mode = get_irn_mode(node);
238 	dbg_info *dbg      = get_irn_dbg_info(node);
239 
240 	if (src_mode == dst_mode)
241 		return new_op;
242 
243 	if (mode_is_float(src_mode) || mode_is_float(dst_mode)) {
244 		if (USE_FPA(isa)) {
245 			if (mode_is_float(src_mode)) {
246 				if (mode_is_float(dst_mode)) {
247 					/* from float to float */
248 					return new_bd_arm_Mvf(dbg, block, new_op, dst_mode);
249 				} else {
250 					/* from float to int */
251 					panic("TODO");
252 				}
253 			} else {
254 				/* from int to float */
255 				if (!mode_is_signed(src_mode)) {
256 					panic("TODO");
257 				} else {
258 					return new_bd_arm_FltX(dbg, block, new_op, dst_mode);
259 				}
260 			}
261 		} else if (USE_VFP(isa)) {
262 			panic("VFP not supported yet");
263 		} else {
264 			panic("Softfloat not supported yet");
265 		}
266 	} else { /* complete in gp registers */
267 		int src_bits = get_mode_size_bits(src_mode);
268 		int dst_bits = get_mode_size_bits(dst_mode);
269 		int min_bits;
270 		ir_mode *min_mode;
271 
272 		if (src_bits == dst_bits) {
273 			/* kill unnecessary conv */
274 			return new_op;
275 		}
276 
277 		if (src_bits < dst_bits) {
278 			min_bits = src_bits;
279 			min_mode = src_mode;
280 		} else {
281 			min_bits = dst_bits;
282 			min_mode = dst_mode;
283 		}
284 
285 		if (upper_bits_clean(new_op, min_mode)) {
286 			return new_op;
287 		}
288 
289 		if (mode_is_signed(min_mode)) {
290 			return gen_sign_extension(dbg, block, new_op, min_bits);
291 		} else {
292 			return gen_zero_extension(dbg, block, new_op, min_bits);
293 		}
294 	}
295 }
296 
297 typedef struct {
298 	unsigned char  imm_8;
299 	unsigned char  rot;
300 } arm_immediate_t;
301 
try_encode_as_immediate(const ir_node * node,arm_immediate_t * res)302 static bool try_encode_as_immediate(const ir_node *node, arm_immediate_t *res)
303 {
304 	unsigned val, low_pos, high_pos;
305 
306 	if (!is_Const(node))
307 		return false;
308 
309 	val = get_tarval_long(get_Const_tarval(node));
310 
311 	if (val == 0) {
312 		res->imm_8 = 0;
313 		res->rot   = 0;
314 		return true;
315 	}
316 	if (val <= 0xff) {
317 		res->imm_8 = val;
318 		res->rot   = 0;
319 		return true;
320 	}
321 	/* arm allows to use to rotate an 8bit immediate value by a multiple of 2
322 	   (= 0, 2, 4, 6, ...).
323 	   So we determine the smallest even position with a bit set
324 	   and the highest even position with no bit set anymore.
325 	   If the difference between these 2 is <= 8, then we can encode the value
326 	   as immediate.
327 	 */
328 	low_pos  = ntz(val) & ~1u;
329 	high_pos = (32-nlz(val)+1) & ~1u;
330 
331 	if (high_pos - low_pos <= 8) {
332 		res->imm_8 = val >> low_pos;
333 		res->rot   = 32 - low_pos;
334 		return true;
335 	}
336 
337 	if (high_pos > 24) {
338 		res->rot = 34 - high_pos;
339 		val      = val >> (32-res->rot) | val << (res->rot);
340 		if (val <= 0xff) {
341 			res->imm_8 = val;
342 			return true;
343 		}
344 	}
345 
346 	return false;
347 }
348 
is_downconv(const ir_node * node)349 static bool is_downconv(const ir_node *node)
350 {
351 	ir_mode *src_mode;
352 	ir_mode *dest_mode;
353 
354 	if (!is_Conv(node))
355 		return false;
356 
357 	/* we only want to skip the conv when we're the only user
358 	 * (not optimal but for now...)
359 	 */
360 	if (get_irn_n_edges(node) > 1)
361 		return false;
362 
363 	src_mode  = get_irn_mode(get_Conv_op(node));
364 	dest_mode = get_irn_mode(node);
365 	return
366 		mode_needs_gp_reg(src_mode)  &&
367 		mode_needs_gp_reg(dest_mode) &&
368 		get_mode_size_bits(dest_mode) <= get_mode_size_bits(src_mode);
369 }
370 
arm_skip_downconv(ir_node * node)371 static ir_node *arm_skip_downconv(ir_node *node)
372 {
373 	while (is_downconv(node))
374 		node = get_Conv_op(node);
375 	return node;
376 }
377 
378 typedef enum {
379 	MATCH_NONE         = 0,
380 	MATCH_COMMUTATIVE  = 1 << 0,  /**< commutative node */
381 	MATCH_REVERSE      = 1 << 1,  /**< support reverse opcode */
382 	MATCH_SIZE_NEUTRAL = 1 << 2,
383 	MATCH_SKIP_NOT     = 1 << 3,  /**< skip Not on ONE input */
384 } match_flags_t;
385 ENUM_BITSET(match_flags_t)
386 
387 /**
388  * possible binop constructors.
389  */
390 typedef struct arm_binop_factory_t {
391 	/** normal reg op reg operation. */
392 	ir_node *(*new_binop_reg)(dbg_info *dbgi, ir_node *block, ir_node *op1, ir_node *op2);
393 	/** normal reg op imm operation. */
394 	ir_node *(*new_binop_imm)(dbg_info *dbgi, ir_node *block, ir_node *op1, unsigned char imm8, unsigned char imm_rot);
395 	/** barrel shifter reg op (reg shift reg operation. */
396 	ir_node *(*new_binop_reg_shift_reg)(dbg_info *dbgi, ir_node *block, ir_node *left, ir_node *right, ir_node *shift, arm_shift_modifier_t shift_modifier);
397 	/** barrel shifter reg op (reg shift imm operation. */
398 	ir_node *(*new_binop_reg_shift_imm)(dbg_info *dbgi, ir_node *block, ir_node *left, ir_node *right, arm_shift_modifier_t shift_modifier, unsigned shift_immediate);
399 } arm_binop_factory_t;
400 
gen_int_binop(ir_node * node,match_flags_t flags,const arm_binop_factory_t * factory)401 static ir_node *gen_int_binop(ir_node *node, match_flags_t flags,
402 		const arm_binop_factory_t *factory)
403 {
404 	ir_node  *block   = be_transform_node(get_nodes_block(node));
405 	ir_node  *op1     = get_binop_left(node);
406 	ir_node  *new_op1;
407 	ir_node  *op2     = get_binop_right(node);
408 	ir_node  *new_op2;
409 	dbg_info *dbgi    = get_irn_dbg_info(node);
410 	arm_immediate_t imm;
411 
412 	if (flags & MATCH_SKIP_NOT) {
413 		if (is_Not(op1))
414 			op1 = get_Not_op(op1);
415 		else if (is_Not(op2))
416 			op2 = get_Not_op(op2);
417 		else
418 			panic("cannot execute MATCH_SKIP_NOT");
419 	}
420 	if (flags & MATCH_SIZE_NEUTRAL) {
421 		op1 = arm_skip_downconv(op1);
422 		op2 = arm_skip_downconv(op2);
423 	} else {
424 		assert(get_mode_size_bits(get_irn_mode(node)) == 32);
425 	}
426 
427 	if (try_encode_as_immediate(op2, &imm)) {
428 		new_op1 = be_transform_node(op1);
429 		return factory->new_binop_imm(dbgi, block, new_op1, imm.imm_8, imm.rot);
430 	}
431 	new_op2 = be_transform_node(op2);
432     if ((flags & (MATCH_COMMUTATIVE|MATCH_REVERSE)) && try_encode_as_immediate(op1, &imm)) {
433 		if (flags & MATCH_REVERSE)
434 			return factory[1].new_binop_imm(dbgi, block, new_op2, imm.imm_8, imm.rot);
435 		else
436 			return factory[0].new_binop_imm(dbgi, block, new_op2, imm.imm_8, imm.rot);
437 	}
438 	new_op1 = be_transform_node(op1);
439 
440 	/* check if we can fold in a Mov */
441 	if (is_arm_Mov(new_op2)) {
442 		const arm_shifter_operand_t *attr = get_arm_shifter_operand_attr_const(new_op2);
443 
444 		switch (attr->shift_modifier) {
445 		case ARM_SHF_IMM:
446 		case ARM_SHF_ASR_IMM:
447 		case ARM_SHF_LSL_IMM:
448 		case ARM_SHF_LSR_IMM:
449 		case ARM_SHF_ROR_IMM:
450 			if (factory->new_binop_reg_shift_imm) {
451 				ir_node *mov_op = get_irn_n(new_op2, 0);
452 				return factory->new_binop_reg_shift_imm(dbgi, block, new_op1, mov_op,
453 					attr->shift_modifier, attr->shift_immediate);
454 			}
455 			break;
456 
457 		case ARM_SHF_ASR_REG:
458 		case ARM_SHF_LSL_REG:
459 		case ARM_SHF_LSR_REG:
460 		case ARM_SHF_ROR_REG:
461 			if (factory->new_binop_reg_shift_reg) {
462 				ir_node *mov_op  = get_irn_n(new_op2, 0);
463 				ir_node *mov_sft = get_irn_n(new_op2, 1);
464 				return factory->new_binop_reg_shift_reg(dbgi, block, new_op1, mov_op, mov_sft,
465 					attr->shift_modifier);
466 			}
467 			break;
468 		case ARM_SHF_REG:
469 		case ARM_SHF_RRX:
470 			break;
471 		case ARM_SHF_INVALID:
472 			panic("invalid shift");
473 		}
474 	}
475 	if ((flags & (MATCH_COMMUTATIVE|MATCH_REVERSE)) && is_arm_Mov(new_op1)) {
476 		const arm_shifter_operand_t *attr = get_arm_shifter_operand_attr_const(new_op1);
477 		int idx = flags & MATCH_REVERSE ? 1 : 0;
478 
479 		switch (attr->shift_modifier) {
480 		ir_node *mov_op, *mov_sft;
481 
482 		case ARM_SHF_IMM:
483 		case ARM_SHF_ASR_IMM:
484 		case ARM_SHF_LSL_IMM:
485 		case ARM_SHF_LSR_IMM:
486 		case ARM_SHF_ROR_IMM:
487 			if (factory[idx].new_binop_reg_shift_imm) {
488 				mov_op = get_irn_n(new_op1, 0);
489 				return factory[idx].new_binop_reg_shift_imm(dbgi, block, new_op2, mov_op,
490 					attr->shift_modifier, attr->shift_immediate);
491 			}
492 			break;
493 
494 		case ARM_SHF_ASR_REG:
495 		case ARM_SHF_LSL_REG:
496 		case ARM_SHF_LSR_REG:
497 		case ARM_SHF_ROR_REG:
498 			if (factory[idx].new_binop_reg_shift_reg) {
499 				mov_op  = get_irn_n(new_op1, 0);
500 				mov_sft = get_irn_n(new_op1, 1);
501 				return factory[idx].new_binop_reg_shift_reg(dbgi, block, new_op2, mov_op, mov_sft,
502 					attr->shift_modifier);
503 			}
504 			break;
505 
506 		case ARM_SHF_REG:
507 		case ARM_SHF_RRX:
508 			break;
509 		case ARM_SHF_INVALID:
510 			panic("invalid shift");
511 		}
512 	}
513 	return factory->new_binop_reg(dbgi, block, new_op1, new_op2);
514 }
515 
516 /**
517  * Creates an ARM Add.
518  *
519  * @return the created arm Add node
520  */
gen_Add(ir_node * node)521 static ir_node *gen_Add(ir_node *node)
522 {
523 	static const arm_binop_factory_t add_factory = {
524 		new_bd_arm_Add_reg,
525 		new_bd_arm_Add_imm,
526 		new_bd_arm_Add_reg_shift_reg,
527 		new_bd_arm_Add_reg_shift_imm
528 	};
529 
530 	ir_mode *mode = get_irn_mode(node);
531 
532 	if (mode_is_float(mode)) {
533 		ir_node  *block   = be_transform_node(get_nodes_block(node));
534 		ir_node  *op1     = get_Add_left(node);
535 		ir_node  *op2     = get_Add_right(node);
536 		dbg_info *dbgi    = get_irn_dbg_info(node);
537 		ir_node  *new_op1 = be_transform_node(op1);
538 		ir_node  *new_op2 = be_transform_node(op2);
539 		if (USE_FPA(isa)) {
540 			return new_bd_arm_Adf(dbgi, block, new_op1, new_op2, mode);
541 		} else if (USE_VFP(isa)) {
542 			panic("VFP not supported yet");
543 		} else {
544 			panic("Softfloat not supported yet");
545 		}
546 	} else {
547 #if 0
548 		/* check for MLA */
549 		if (is_arm_Mul(new_op1) && get_irn_n_edges(op1) == 1) {
550 			new_op3 = new_op2;
551 			new_op2 = get_irn_n(new_op1, 1);
552 			new_op1 = get_irn_n(new_op1, 0);
553 
554 			return new_bd_arm_Mla(dbgi, block, new_op1, new_op2, new_op3);
555 		}
556 		if (is_arm_Mul(new_op2) && get_irn_n_edges(op2) == 1) {
557 			new_op3 = new_op1;
558 			new_op1 = get_irn_n(new_op2, 0);
559 			new_op2 = get_irn_n(new_op2, 1);
560 
561 			return new_bd_arm_Mla(dbgi, block, new_op1, new_op2, new_op3);
562 		}
563 #endif
564 
565 		return gen_int_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, &add_factory);
566 	}
567 }
568 
569 /**
570  * Creates an ARM Mul.
571  *
572  * @return the created arm Mul node
573  */
gen_Mul(ir_node * node)574 static ir_node *gen_Mul(ir_node *node)
575 {
576 	ir_node  *block   = be_transform_node(get_nodes_block(node));
577 	ir_node  *op1     = get_Mul_left(node);
578 	ir_node  *new_op1 = be_transform_node(op1);
579 	ir_node  *op2     = get_Mul_right(node);
580 	ir_node  *new_op2 = be_transform_node(op2);
581 	ir_mode  *mode    = get_irn_mode(node);
582 	dbg_info *dbg     = get_irn_dbg_info(node);
583 
584 	if (mode_is_float(mode)) {
585 		if (USE_FPA(isa)) {
586 			return new_bd_arm_Muf(dbg, block, new_op1, new_op2, mode);
587 		} else if (USE_VFP(isa)) {
588 			panic("VFP not supported yet");
589 		} else {
590 			panic("Softfloat not supported yet");
591 		}
592 	}
593 	assert(mode_is_data(mode));
594 	return new_bd_arm_Mul(dbg, block, new_op1, new_op2);
595 }
596 
gen_Div(ir_node * node)597 static ir_node *gen_Div(ir_node *node)
598 {
599 	ir_node  *block   = be_transform_node(get_nodes_block(node));
600 	ir_node  *op1     = get_Div_left(node);
601 	ir_node  *new_op1 = be_transform_node(op1);
602 	ir_node  *op2     = get_Div_right(node);
603 	ir_node  *new_op2 = be_transform_node(op2);
604 	ir_mode  *mode    = get_Div_resmode(node);
605 	dbg_info *dbg     = get_irn_dbg_info(node);
606 
607 	/* integer division should be replaced by builtin call */
608 	assert(mode_is_float(mode));
609 
610 	if (USE_FPA(isa)) {
611 		return new_bd_arm_Dvf(dbg, block, new_op1, new_op2, mode);
612 	} else if (USE_VFP(isa)) {
613 		panic("VFP not supported yet");
614 	} else {
615 		panic("Softfloat not supported yet");
616 	}
617 }
618 
gen_And(ir_node * node)619 static ir_node *gen_And(ir_node *node)
620 {
621 	static const arm_binop_factory_t and_factory = {
622 		new_bd_arm_And_reg,
623 		new_bd_arm_And_imm,
624 		new_bd_arm_And_reg_shift_reg,
625 		new_bd_arm_And_reg_shift_imm
626 	};
627 	static const arm_binop_factory_t bic_factory = {
628 		new_bd_arm_Bic_reg,
629 		new_bd_arm_Bic_imm,
630 		new_bd_arm_Bic_reg_shift_reg,
631 		new_bd_arm_Bic_reg_shift_imm
632 	};
633 
634 	/* check for and not */
635 	ir_node *left  = get_And_left(node);
636 	ir_node *right = get_And_right(node);
637 
638 	if (is_Not(left) || is_Not(right)) {
639 		return gen_int_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL | MATCH_SKIP_NOT,
640 			&bic_factory);
641 	}
642 
643 	return gen_int_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, &and_factory);
644 }
645 
gen_Or(ir_node * node)646 static ir_node *gen_Or(ir_node *node)
647 {
648 	static const arm_binop_factory_t or_factory = {
649 		new_bd_arm_Or_reg,
650 		new_bd_arm_Or_imm,
651 		new_bd_arm_Or_reg_shift_reg,
652 		new_bd_arm_Or_reg_shift_imm
653 	};
654 
655 	return gen_int_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, &or_factory);
656 }
657 
gen_Eor(ir_node * node)658 static ir_node *gen_Eor(ir_node *node)
659 {
660 	static const arm_binop_factory_t eor_factory = {
661 		new_bd_arm_Eor_reg,
662 		new_bd_arm_Eor_imm,
663 		new_bd_arm_Eor_reg_shift_reg,
664 		new_bd_arm_Eor_reg_shift_imm
665 	};
666 
667 	return gen_int_binop(node, MATCH_COMMUTATIVE | MATCH_SIZE_NEUTRAL, &eor_factory);
668 }
669 
gen_Sub(ir_node * node)670 static ir_node *gen_Sub(ir_node *node)
671 {
672 	static const arm_binop_factory_t sub_rsb_factory[2] = {
673 		{
674 			new_bd_arm_Sub_reg,
675 			new_bd_arm_Sub_imm,
676 			new_bd_arm_Sub_reg_shift_reg,
677 			new_bd_arm_Sub_reg_shift_imm
678 		},
679 		{
680 			new_bd_arm_Rsb_reg,
681 			new_bd_arm_Rsb_imm,
682 			new_bd_arm_Rsb_reg_shift_reg,
683 			new_bd_arm_Rsb_reg_shift_imm
684 		}
685 	};
686 
687 	ir_node  *block   = be_transform_node(get_nodes_block(node));
688 	ir_node  *op1     = get_Sub_left(node);
689 	ir_node  *new_op1 = be_transform_node(op1);
690 	ir_node  *op2     = get_Sub_right(node);
691 	ir_node  *new_op2 = be_transform_node(op2);
692 	ir_mode  *mode    = get_irn_mode(node);
693 	dbg_info *dbgi    = get_irn_dbg_info(node);
694 
695 	if (mode_is_float(mode)) {
696 		if (USE_FPA(isa)) {
697 			return new_bd_arm_Suf(dbgi, block, new_op1, new_op2, mode);
698 		} else if (USE_VFP(isa)) {
699 			panic("VFP not supported yet");
700 		} else {
701 			panic("Softfloat not supported yet");
702 		}
703 	} else {
704 		return gen_int_binop(node, MATCH_SIZE_NEUTRAL | MATCH_REVERSE, sub_rsb_factory);
705 	}
706 }
707 
708 /**
709  * Checks if a given value can be used as an immediate for the given
710  * ARM shift mode.
711  */
can_use_shift_constant(unsigned int val,arm_shift_modifier_t modifier)712 static bool can_use_shift_constant(unsigned int val,
713                                    arm_shift_modifier_t modifier)
714 {
715 	if (val <= 31)
716 		return true;
717 	if (val == 32 && modifier != ARM_SHF_LSL_REG && modifier != ARM_SHF_ROR_REG)
718 		return true;
719 	return false;
720 }
721 
722 /**
723  * generate an ARM shift instruction.
724  *
725  * @param node            the node
726  * @param flags           matching flags
727  * @param shift_modifier  initial encoding of the desired shift operation
728  */
make_shift(ir_node * node,match_flags_t flags,arm_shift_modifier_t shift_modifier)729 static ir_node *make_shift(ir_node *node, match_flags_t flags,
730 		arm_shift_modifier_t shift_modifier)
731 {
732 	ir_node  *block = be_transform_node(get_nodes_block(node));
733 	ir_node  *op1   = get_binop_left(node);
734 	ir_node  *op2   = get_binop_right(node);
735 	dbg_info *dbgi  = get_irn_dbg_info(node);
736 	ir_mode  *mode  = get_irn_mode(node);
737 	ir_node  *new_op1;
738 	ir_node  *new_op2;
739 
740 	if (get_mode_modulo_shift(mode) != 32)
741 		panic("modulo shift!=32 not supported");
742 
743 	if (flags & MATCH_SIZE_NEUTRAL) {
744 		op1 = arm_skip_downconv(op1);
745 		op2 = arm_skip_downconv(op2);
746 	}
747 
748 	new_op1 = be_transform_node(op1);
749 	if (is_Const(op2)) {
750 		ir_tarval   *tv  = get_Const_tarval(op2);
751 		unsigned int val = get_tarval_long(tv);
752 		assert(tarval_is_long(tv));
753 		if (can_use_shift_constant(val, shift_modifier)) {
754 			switch (shift_modifier) {
755 			case ARM_SHF_LSL_REG: shift_modifier = ARM_SHF_LSL_IMM; break;
756 			case ARM_SHF_LSR_REG: shift_modifier = ARM_SHF_LSR_IMM; break;
757 			case ARM_SHF_ASR_REG: shift_modifier = ARM_SHF_ASR_IMM; break;
758 			case ARM_SHF_ROR_REG: shift_modifier = ARM_SHF_ROR_IMM; break;
759 			default: panic("unexpected shift modifier");
760 			}
761 			return new_bd_arm_Mov_reg_shift_imm(dbgi, block, new_op1,
762 			                                    shift_modifier, val);
763 		}
764 	}
765 
766 	new_op2 = be_transform_node(op2);
767 	return new_bd_arm_Mov_reg_shift_reg(dbgi, block, new_op1, new_op2,
768 	                                    shift_modifier);
769 }
770 
gen_Shl(ir_node * node)771 static ir_node *gen_Shl(ir_node *node)
772 {
773 	return make_shift(node, MATCH_SIZE_NEUTRAL, ARM_SHF_LSL_REG);
774 }
775 
gen_Shr(ir_node * node)776 static ir_node *gen_Shr(ir_node *node)
777 {
778 	return make_shift(node, MATCH_NONE, ARM_SHF_LSR_REG);
779 }
780 
gen_Shrs(ir_node * node)781 static ir_node *gen_Shrs(ir_node *node)
782 {
783 	return make_shift(node, MATCH_NONE, ARM_SHF_ASR_REG);
784 }
785 
gen_Ror(ir_node * node,ir_node * op1,ir_node * op2)786 static ir_node *gen_Ror(ir_node *node, ir_node *op1, ir_node *op2)
787 {
788 	ir_node  *block   = be_transform_node(get_nodes_block(node));
789 	ir_node  *new_op1 = be_transform_node(op1);
790 	dbg_info *dbgi    = get_irn_dbg_info(node);
791 	ir_node  *new_op2 = be_transform_node(op2);
792 
793 	return new_bd_arm_Mov_reg_shift_reg(dbgi, block, new_op1, new_op2,
794 	                                    ARM_SHF_ROR_REG);
795 }
796 
gen_Rol(ir_node * node,ir_node * op1,ir_node * op2)797 static ir_node *gen_Rol(ir_node *node, ir_node *op1, ir_node *op2)
798 {
799 	ir_node  *block   = be_transform_node(get_nodes_block(node));
800 	ir_node  *new_op1 = be_transform_node(op1);
801 	dbg_info *dbgi    = get_irn_dbg_info(node);
802 	ir_node  *new_op2 = be_transform_node(op2);
803 
804 	/* Note: there is no Rol on arm, we have to use Ror */
805 	new_op2 = new_bd_arm_Rsb_imm(dbgi, block, new_op2, 32, 0);
806 	return new_bd_arm_Mov_reg_shift_reg(dbgi, block, new_op1, new_op2,
807 	                                    ARM_SHF_ROR_REG);
808 }
809 
gen_Rotl(ir_node * node)810 static ir_node *gen_Rotl(ir_node *node)
811 {
812 	ir_node *rotate = NULL;
813 	ir_node *op1    = get_Rotl_left(node);
814 	ir_node *op2    = get_Rotl_right(node);
815 
816 	/* Firm has only RotL, so we are looking for a right (op2)
817 	   operand "-e+mode_size_bits" (it's an already modified "mode_size_bits-e",
818 	   that means we can create a RotR. */
819 
820 	if (is_Add(op2)) {
821 		ir_node *right = get_Add_right(op2);
822 		if (is_Const(right)) {
823 			ir_tarval *tv   = get_Const_tarval(right);
824 			ir_mode   *mode = get_irn_mode(node);
825 			long       bits = get_mode_size_bits(mode);
826 			ir_node   *left = get_Add_left(op2);
827 
828 			if (is_Minus(left) &&
829 			    tarval_is_long(tv)          &&
830 			    get_tarval_long(tv) == bits &&
831 			    bits                == 32)
832 				rotate = gen_Ror(node, op1, get_Minus_op(left));
833 		}
834 	} else if (is_Sub(op2)) {
835 		ir_node *left = get_Sub_left(op2);
836 		if (is_Const(left)) {
837 			ir_tarval *tv   = get_Const_tarval(left);
838 			ir_mode   *mode = get_irn_mode(node);
839 			long       bits = get_mode_size_bits(mode);
840 			ir_node   *right = get_Sub_right(op2);
841 
842 			if (tarval_is_long(tv)          &&
843 			    get_tarval_long(tv) == bits &&
844 			    bits                == 32)
845 				rotate = gen_Ror(node, op1, right);
846 		}
847 	} else if (is_Const(op2)) {
848 		ir_tarval *tv   = get_Const_tarval(op2);
849 		ir_mode   *mode = get_irn_mode(node);
850 		long       bits = get_mode_size_bits(mode);
851 
852 		if (tarval_is_long(tv) && bits == 32) {
853 			ir_node  *block   = be_transform_node(get_nodes_block(node));
854 			ir_node  *new_op1 = be_transform_node(op1);
855 			dbg_info *dbgi    = get_irn_dbg_info(node);
856 
857 			bits = (bits - get_tarval_long(tv)) & 31;
858 			rotate = new_bd_arm_Mov_reg_shift_imm(dbgi, block, new_op1, ARM_SHF_ROR_IMM, bits);
859 		}
860 	}
861 
862 	if (rotate == NULL) {
863 		rotate = gen_Rol(node, op1, op2);
864 	}
865 
866 	return rotate;
867 }
868 
gen_Not(ir_node * node)869 static ir_node *gen_Not(ir_node *node)
870 {
871 	ir_node  *block   = be_transform_node(get_nodes_block(node));
872 	ir_node  *op      = get_Not_op(node);
873 	ir_node  *new_op  = be_transform_node(op);
874 	dbg_info *dbgi    = get_irn_dbg_info(node);
875 
876 	/* check if we can fold in a Mov */
877 	if (is_arm_Mov(new_op)) {
878 		const arm_shifter_operand_t *attr = get_arm_shifter_operand_attr_const(new_op);
879 
880 		switch (attr->shift_modifier) {
881 		ir_node *mov_op, *mov_sft;
882 
883 		case ARM_SHF_IMM:
884 		case ARM_SHF_ASR_IMM:
885 		case ARM_SHF_LSL_IMM:
886 		case ARM_SHF_LSR_IMM:
887 		case ARM_SHF_ROR_IMM:
888 			mov_op = get_irn_n(new_op, 0);
889 			return new_bd_arm_Mvn_reg_shift_imm(dbgi, block, mov_op,
890 				attr->shift_modifier, attr->shift_immediate);
891 
892 		case ARM_SHF_ASR_REG:
893 		case ARM_SHF_LSL_REG:
894 		case ARM_SHF_LSR_REG:
895 		case ARM_SHF_ROR_REG:
896 			mov_op  = get_irn_n(new_op, 0);
897 			mov_sft = get_irn_n(new_op, 1);
898 			return new_bd_arm_Mvn_reg_shift_reg(dbgi, block, mov_op, mov_sft,
899 				attr->shift_modifier);
900 
901 		case ARM_SHF_REG:
902 		case ARM_SHF_RRX:
903 			break;
904 		case ARM_SHF_INVALID:
905 			panic("invalid shift");
906 		}
907 	}
908 
909 	return new_bd_arm_Mvn_reg(dbgi, block, new_op);
910 }
911 
gen_Minus(ir_node * node)912 static ir_node *gen_Minus(ir_node *node)
913 {
914 	ir_node  *block   = be_transform_node(get_nodes_block(node));
915 	ir_node  *op      = get_Minus_op(node);
916 	ir_node  *new_op  = be_transform_node(op);
917 	dbg_info *dbgi    = get_irn_dbg_info(node);
918 	ir_mode  *mode    = get_irn_mode(node);
919 
920 	if (mode_is_float(mode)) {
921 		if (USE_FPA(isa)) {
922 			return new_bd_arm_Mvf(dbgi, block, op, mode);
923 		} else if (USE_VFP(isa)) {
924 			panic("VFP not supported yet");
925 		} else {
926 			panic("Softfloat not supported yet");
927 		}
928 	}
929 	assert(mode_is_data(mode));
930 	return new_bd_arm_Rsb_imm(dbgi, block, new_op, 0, 0);
931 }
932 
gen_Load(ir_node * node)933 static ir_node *gen_Load(ir_node *node)
934 {
935 	ir_node  *block    = be_transform_node(get_nodes_block(node));
936 	ir_node  *ptr      = get_Load_ptr(node);
937 	ir_node  *new_ptr  = be_transform_node(ptr);
938 	ir_node  *mem      = get_Load_mem(node);
939 	ir_node  *new_mem  = be_transform_node(mem);
940 	ir_mode  *mode     = get_Load_mode(node);
941 	dbg_info *dbgi      = get_irn_dbg_info(node);
942 	ir_node  *new_load = NULL;
943 
944 	if (get_Load_unaligned(node) == align_non_aligned)
945 		panic("unaligned Loads not supported yet");
946 
947 	if (mode_is_float(mode)) {
948 		if (USE_FPA(isa)) {
949 			new_load = new_bd_arm_Ldf(dbgi, block, new_ptr, new_mem, mode,
950 			                          NULL, 0, 0, false);
951 		} else if (USE_VFP(isa)) {
952 			panic("VFP not supported yet");
953 		} else {
954 			panic("Softfloat not supported yet");
955 		}
956 	} else {
957 		assert(mode_is_data(mode) && "unsupported mode for Load");
958 
959 		new_load = new_bd_arm_Ldr(dbgi, block, new_ptr, new_mem, mode, NULL, 0, 0, false);
960 	}
961 	set_irn_pinned(new_load, get_irn_pinned(node));
962 
963 	/* check for special case: the loaded value might not be used */
964 	if (be_get_Proj_for_pn(node, pn_Load_res) == NULL) {
965 		/* add a result proj and a Keep to produce a pseudo use */
966 		ir_node *proj = new_r_Proj(new_load, mode_Iu, pn_arm_Ldr_res);
967 		be_new_Keep(block, 1, &proj);
968 	}
969 
970 	return new_load;
971 }
972 
gen_Store(ir_node * node)973 static ir_node *gen_Store(ir_node *node)
974 {
975 	ir_node  *block    = be_transform_node(get_nodes_block(node));
976 	ir_node  *ptr      = get_Store_ptr(node);
977 	ir_node  *new_ptr  = be_transform_node(ptr);
978 	ir_node  *mem      = get_Store_mem(node);
979 	ir_node  *new_mem  = be_transform_node(mem);
980 	ir_node  *val      = get_Store_value(node);
981 	ir_node  *new_val  = be_transform_node(val);
982 	ir_mode  *mode     = get_irn_mode(val);
983 	dbg_info *dbgi     = get_irn_dbg_info(node);
984 	ir_node *new_store = NULL;
985 
986 	if (get_Store_unaligned(node) == align_non_aligned)
987 		panic("unaligned Stores not supported yet");
988 
989 	if (mode_is_float(mode)) {
990 		if (USE_FPA(isa)) {
991 			new_store = new_bd_arm_Stf(dbgi, block, new_ptr, new_val,
992 			                           new_mem, mode, NULL, 0, 0, false);
993 		} else if (USE_VFP(isa)) {
994 			panic("VFP not supported yet");
995 		} else {
996 			panic("Softfloat not supported yet");
997 		}
998 	} else {
999 		assert(mode_is_data(mode) && "unsupported mode for Store");
1000 		new_store = new_bd_arm_Str(dbgi, block, new_ptr, new_val, new_mem, mode,
1001 		                           NULL, 0, 0, false);
1002 	}
1003 	set_irn_pinned(new_store, get_irn_pinned(node));
1004 	return new_store;
1005 }
1006 
gen_Jmp(ir_node * node)1007 static ir_node *gen_Jmp(ir_node *node)
1008 {
1009 	ir_node  *block     = get_nodes_block(node);
1010 	ir_node  *new_block = be_transform_node(block);
1011 	dbg_info *dbgi      = get_irn_dbg_info(node);
1012 
1013 	return new_bd_arm_Jmp(dbgi, new_block);
1014 }
1015 
gen_Switch(ir_node * node)1016 static ir_node *gen_Switch(ir_node *node)
1017 {
1018 	ir_graph              *irg      = get_irn_irg(node);
1019 	ir_node               *block    = be_transform_node(get_nodes_block(node));
1020 	ir_node               *selector = get_Switch_selector(node);
1021 	dbg_info              *dbgi     = get_irn_dbg_info(node);
1022 	ir_node               *new_op   = be_transform_node(selector);
1023 	ir_mode               *mode     = get_irn_mode(selector);
1024 	const ir_switch_table *table    = get_Switch_table(node);
1025 	unsigned               n_outs   = get_Switch_n_outs(node);
1026 
1027 	table = ir_switch_table_duplicate(irg, table);
1028 
1029 	/* switch with smaller modes not implemented yet */
1030 	assert(get_mode_size_bits(mode) == 32);
1031 
1032 	return new_bd_arm_SwitchJmp(dbgi, block, new_op, n_outs, table);
1033 }
1034 
gen_Cmp(ir_node * node)1035 static ir_node *gen_Cmp(ir_node *node)
1036 {
1037 	ir_node  *block    = be_transform_node(get_nodes_block(node));
1038 	ir_node  *op1      = get_Cmp_left(node);
1039 	ir_node  *op2      = get_Cmp_right(node);
1040 	ir_mode  *cmp_mode = get_irn_mode(op1);
1041 	dbg_info *dbgi     = get_irn_dbg_info(node);
1042 	ir_node  *new_op1;
1043 	ir_node  *new_op2;
1044 	bool      is_unsigned;
1045 
1046 	if (mode_is_float(cmp_mode)) {
1047 		/* TODO: this is broken... */
1048 		new_op1 = be_transform_node(op1);
1049 		new_op2 = be_transform_node(op2);
1050 
1051 		return new_bd_arm_Cmfe(dbgi, block, new_op1, new_op2, false);
1052 	}
1053 
1054 	assert(get_irn_mode(op2) == cmp_mode);
1055 	is_unsigned = !mode_is_signed(cmp_mode);
1056 
1057 	/* integer compare, TODO: use shifter_op in all its combinations */
1058 	new_op1 = be_transform_node(op1);
1059 	new_op1 = gen_extension(dbgi, block, new_op1, cmp_mode);
1060 	new_op2 = be_transform_node(op2);
1061 	new_op2 = gen_extension(dbgi, block, new_op2, cmp_mode);
1062 	return new_bd_arm_Cmp_reg(dbgi, block, new_op1, new_op2, false,
1063 	                          is_unsigned);
1064 }
1065 
gen_Cond(ir_node * node)1066 static ir_node *gen_Cond(ir_node *node)
1067 {
1068 	ir_node    *selector = get_Cond_selector(node);
1069 	ir_relation relation;
1070 	ir_node    *block;
1071 	ir_node    *flag_node;
1072 	dbg_info   *dbgi;
1073 
1074 	assert(is_Cmp(selector));
1075 
1076 	block     = be_transform_node(get_nodes_block(node));
1077 	dbgi      = get_irn_dbg_info(node);
1078 	flag_node = be_transform_node(selector);
1079 	relation  = get_Cmp_relation(selector);
1080 
1081 	return new_bd_arm_B(dbgi, block, flag_node, relation);
1082 }
1083 
1084 enum fpa_imm_mode {
1085 	FPA_IMM_FLOAT    = 0,
1086 	FPA_IMM_DOUBLE   = 1,
1087 	FPA_IMM_MAX = FPA_IMM_DOUBLE
1088 };
1089 
1090 static ir_tarval *fpa_imm[FPA_IMM_MAX + 1][fpa_max];
1091 
1092 #if 0
1093 /**
1094  * Check, if a floating point tarval is an fpa immediate, i.e.
1095  * one of 0, 1, 2, 3, 4, 5, 10, or 0.5.
1096  */
1097 static int is_fpa_immediate(tarval *tv)
1098 {
1099 	ir_mode *mode = get_tarval_mode(tv);
1100 	int i, j, res = 1;
1101 
1102 	switch (get_mode_size_bits(mode)) {
1103 	case 32:
1104 		i = FPA_IMM_FLOAT;
1105 		break;
1106 	case 64:
1107 		i = FPA_IMM_DOUBLE;
1108 		break;
1109 	}
1110 
1111 	if (tarval_is_negative(tv)) {
1112 		tv = tarval_neg(tv);
1113 		res = -1;
1114 	}
1115 
1116 	for (j = 0; j < fpa_max; ++j) {
1117 		if (tv == fpa_imm[i][j])
1118 			return res * j;
1119 	}
1120 	return fpa_max;
1121 }
1122 #endif
1123 
gen_Const(ir_node * node)1124 static ir_node *gen_Const(ir_node *node)
1125 {
1126 	ir_node  *block = be_transform_node(get_nodes_block(node));
1127 	ir_mode *mode = get_irn_mode(node);
1128 	dbg_info *dbg = get_irn_dbg_info(node);
1129 
1130 	if (mode_is_float(mode)) {
1131 		if (USE_FPA(isa)) {
1132 			ir_tarval *tv = get_Const_tarval(node);
1133 			node          = new_bd_arm_fConst(dbg, block, tv);
1134 			return node;
1135 		} else if (USE_VFP(isa)) {
1136 			panic("VFP not supported yet");
1137 		} else {
1138 			panic("Softfloat not supported yet");
1139 		}
1140 	}
1141 	return create_const_graph(node, block);
1142 }
1143 
gen_SymConst(ir_node * node)1144 static ir_node *gen_SymConst(ir_node *node)
1145 {
1146 	ir_node   *block  = be_transform_node(get_nodes_block(node));
1147 	ir_entity *entity = get_SymConst_entity(node);
1148 	dbg_info  *dbgi   = get_irn_dbg_info(node);
1149 	ir_node   *new_node;
1150 
1151 	new_node = new_bd_arm_SymConst(dbgi, block, entity, 0);
1152 	return new_node;
1153 }
1154 
ints_to_double(dbg_info * dbgi,ir_node * block,ir_node * node0,ir_node * node1)1155 static ir_node *ints_to_double(dbg_info *dbgi, ir_node *block, ir_node *node0,
1156                                ir_node *node1)
1157 {
1158 	/* the good way to do this would be to use the stm (store multiple)
1159 	 * instructions, since our input is nearly always 2 consecutive 32bit
1160 	 * registers... */
1161 	ir_graph *irg   = current_ir_graph;
1162 	ir_node  *stack = get_irg_frame(irg);
1163 	ir_node  *nomem = get_irg_no_mem(irg);
1164 	ir_node  *str0  = new_bd_arm_Str(dbgi, block, stack, node0, nomem, mode_gp,
1165 	                                 NULL, 0, 0, true);
1166 	ir_node  *str1  = new_bd_arm_Str(dbgi, block, stack, node1, nomem, mode_gp,
1167 	                                 NULL, 0, 4, true);
1168 	ir_node  *in[2] = { str0, str1 };
1169 	ir_node  *sync  = new_r_Sync(block, 2, in);
1170 	ir_node  *ldf;
1171 	set_irn_pinned(str0, op_pin_state_floats);
1172 	set_irn_pinned(str1, op_pin_state_floats);
1173 
1174 	ldf = new_bd_arm_Ldf(dbgi, block, stack, sync, mode_D, NULL, 0, 0, true);
1175 	set_irn_pinned(ldf, op_pin_state_floats);
1176 
1177 	return new_r_Proj(ldf, mode_fp, pn_arm_Ldf_res);
1178 }
1179 
int_to_float(dbg_info * dbgi,ir_node * block,ir_node * node)1180 static ir_node *int_to_float(dbg_info *dbgi, ir_node *block, ir_node *node)
1181 {
1182 	ir_graph *irg   = current_ir_graph;
1183 	ir_node  *stack = get_irg_frame(irg);
1184 	ir_node  *nomem = get_irg_no_mem(irg);
1185 	ir_node  *str   = new_bd_arm_Str(dbgi, block, stack, node, nomem, mode_gp,
1186 	                                 NULL, 0, 0, true);
1187 	ir_node  *ldf;
1188 	set_irn_pinned(str, op_pin_state_floats);
1189 
1190 	ldf = new_bd_arm_Ldf(dbgi, block, stack, str, mode_F, NULL, 0, 0, true);
1191 	set_irn_pinned(ldf, op_pin_state_floats);
1192 
1193 	return new_r_Proj(ldf, mode_fp, pn_arm_Ldf_res);
1194 }
1195 
float_to_int(dbg_info * dbgi,ir_node * block,ir_node * node)1196 static ir_node *float_to_int(dbg_info *dbgi, ir_node *block, ir_node *node)
1197 {
1198 	ir_graph *irg   = current_ir_graph;
1199 	ir_node  *stack = get_irg_frame(irg);
1200 	ir_node  *nomem = get_irg_no_mem(irg);
1201 	ir_node  *stf   = new_bd_arm_Stf(dbgi, block, stack, node, nomem, mode_F,
1202 	                                 NULL, 0, 0, true);
1203 	ir_node  *ldr;
1204 	set_irn_pinned(stf, op_pin_state_floats);
1205 
1206 	ldr = new_bd_arm_Ldr(dbgi, block, stack, stf, mode_gp, NULL, 0, 0, true);
1207 	set_irn_pinned(ldr, op_pin_state_floats);
1208 
1209 	return new_r_Proj(ldr, mode_gp, pn_arm_Ldr_res);
1210 }
1211 
double_to_ints(dbg_info * dbgi,ir_node * block,ir_node * node,ir_node ** out_value0,ir_node ** out_value1)1212 static void double_to_ints(dbg_info *dbgi, ir_node *block, ir_node *node,
1213                            ir_node **out_value0, ir_node **out_value1)
1214 {
1215 	ir_graph *irg   = current_ir_graph;
1216 	ir_node  *stack = get_irg_frame(irg);
1217 	ir_node  *nomem = get_irg_no_mem(irg);
1218 	ir_node  *stf   = new_bd_arm_Stf(dbgi, block, stack, node, nomem, mode_D,
1219 	                                 NULL, 0, 0, true);
1220 	ir_node  *ldr0, *ldr1;
1221 	set_irn_pinned(stf, op_pin_state_floats);
1222 
1223 	ldr0 = new_bd_arm_Ldr(dbgi, block, stack, stf, mode_gp, NULL, 0, 0, true);
1224 	set_irn_pinned(ldr0, op_pin_state_floats);
1225 	ldr1 = new_bd_arm_Ldr(dbgi, block, stack, stf, mode_gp, NULL, 0, 4, true);
1226 	set_irn_pinned(ldr1, op_pin_state_floats);
1227 
1228 	*out_value0 = new_r_Proj(ldr0, mode_gp, pn_arm_Ldr_res);
1229 	*out_value1 = new_r_Proj(ldr1, mode_gp, pn_arm_Ldr_res);
1230 }
1231 
gen_CopyB(ir_node * node)1232 static ir_node *gen_CopyB(ir_node *node)
1233 {
1234 	ir_node  *block    = be_transform_node(get_nodes_block(node));
1235 	ir_node  *src      = get_CopyB_src(node);
1236 	ir_node  *new_src  = be_transform_node(src);
1237 	ir_node  *dst      = get_CopyB_dst(node);
1238 	ir_node  *new_dst  = be_transform_node(dst);
1239 	ir_node  *mem      = get_CopyB_mem(node);
1240 	ir_node  *new_mem  = be_transform_node(mem);
1241 	dbg_info *dbg      = get_irn_dbg_info(node);
1242 	int      size      = get_type_size_bytes(get_CopyB_type(node));
1243 	ir_node  *src_copy;
1244 	ir_node  *dst_copy;
1245 
1246 	src_copy = be_new_Copy(block, new_src);
1247 	dst_copy = be_new_Copy(block, new_dst);
1248 
1249 	return new_bd_arm_CopyB(dbg, block, dst_copy, src_copy,
1250 			new_bd_arm_EmptyReg(dbg, block),
1251 			new_bd_arm_EmptyReg(dbg, block),
1252 			new_bd_arm_EmptyReg(dbg, block),
1253 			new_mem, size);
1254 }
1255 
1256 /**
1257  * Transform builtin clz.
1258  */
gen_clz(ir_node * node)1259 static ir_node *gen_clz(ir_node *node)
1260 {
1261 	ir_node  *block  = be_transform_node(get_nodes_block(node));
1262 	dbg_info *dbg    = get_irn_dbg_info(node);
1263 	ir_node  *op     = get_irn_n(node, 1);
1264 	ir_node  *new_op = be_transform_node(op);
1265 
1266 	/* TODO armv5 instruction, otherwise create a call */
1267 	return new_bd_arm_Clz(dbg, block, new_op);
1268 }
1269 
1270 /**
1271  * Transform Builtin node.
1272  */
gen_Builtin(ir_node * node)1273 static ir_node *gen_Builtin(ir_node *node)
1274 {
1275 	ir_builtin_kind kind = get_Builtin_kind(node);
1276 
1277 	switch (kind) {
1278 	case ir_bk_trap:
1279 	case ir_bk_debugbreak:
1280 	case ir_bk_return_address:
1281 	case ir_bk_frame_address:
1282 	case ir_bk_prefetch:
1283 	case ir_bk_ffs:
1284 		break;
1285 	case ir_bk_clz:
1286 		return gen_clz(node);
1287 	case ir_bk_ctz:
1288 	case ir_bk_parity:
1289 	case ir_bk_popcount:
1290 	case ir_bk_bswap:
1291 	case ir_bk_outport:
1292 	case ir_bk_inport:
1293 	case ir_bk_inner_trampoline:
1294 		break;
1295 	}
1296 	panic("Builtin %s not implemented", get_builtin_kind_name(kind));
1297 }
1298 
1299 /**
1300  * Transform Proj(Builtin) node.
1301  */
gen_Proj_Builtin(ir_node * proj)1302 static ir_node *gen_Proj_Builtin(ir_node *proj)
1303 {
1304 	ir_node         *node     = get_Proj_pred(proj);
1305 	ir_node         *new_node = be_transform_node(node);
1306 	ir_builtin_kind kind      = get_Builtin_kind(node);
1307 
1308 	switch (kind) {
1309 	case ir_bk_return_address:
1310 	case ir_bk_frame_address:
1311 	case ir_bk_ffs:
1312 	case ir_bk_clz:
1313 	case ir_bk_ctz:
1314 	case ir_bk_parity:
1315 	case ir_bk_popcount:
1316 	case ir_bk_bswap:
1317 		assert(get_Proj_proj(proj) == pn_Builtin_max+1);
1318 		return new_node;
1319 	case ir_bk_trap:
1320 	case ir_bk_debugbreak:
1321 	case ir_bk_prefetch:
1322 	case ir_bk_outport:
1323 		assert(get_Proj_proj(proj) == pn_Builtin_M);
1324 		return new_node;
1325 	case ir_bk_inport:
1326 	case ir_bk_inner_trampoline:
1327 		break;
1328 	}
1329 	panic("Builtin %s not implemented", get_builtin_kind_name(kind));
1330 }
1331 
gen_Proj_Load(ir_node * node)1332 static ir_node *gen_Proj_Load(ir_node *node)
1333 {
1334 	ir_node  *load     = get_Proj_pred(node);
1335 	ir_node  *new_load = be_transform_node(load);
1336 	dbg_info *dbgi     = get_irn_dbg_info(node);
1337 	long     proj      = get_Proj_proj(node);
1338 
1339 	/* renumber the proj */
1340 	switch (get_arm_irn_opcode(new_load)) {
1341 	case iro_arm_Ldr:
1342 		/* handle all gp loads equal: they have the same proj numbers. */
1343 		if (proj == pn_Load_res) {
1344 			return new_rd_Proj(dbgi, new_load, mode_Iu, pn_arm_Ldr_res);
1345 		} else if (proj == pn_Load_M) {
1346 			return new_rd_Proj(dbgi, new_load, mode_M, pn_arm_Ldr_M);
1347 		}
1348 		break;
1349 	case iro_arm_Ldf:
1350 		if (proj == pn_Load_res) {
1351 			ir_mode *mode = get_Load_mode(load);
1352 			return new_rd_Proj(dbgi, new_load, mode, pn_arm_Ldf_res);
1353 		} else if (proj == pn_Load_M) {
1354 			return new_rd_Proj(dbgi, new_load, mode_M, pn_arm_Ldf_M);
1355 		}
1356 		break;
1357 	default:
1358 		break;
1359 	}
1360 	panic("Unsupported Proj from Load");
1361 }
1362 
gen_Proj_CopyB(ir_node * node)1363 static ir_node *gen_Proj_CopyB(ir_node *node)
1364 {
1365 	ir_node  *pred     = get_Proj_pred(node);
1366 	ir_node  *new_pred = be_transform_node(pred);
1367 	dbg_info *dbgi     = get_irn_dbg_info(node);
1368 	long     proj      = get_Proj_proj(node);
1369 
1370 	switch (proj) {
1371 	case pn_CopyB_M:
1372 		if (is_arm_CopyB(new_pred)) {
1373 			return new_rd_Proj(dbgi, new_pred, mode_M, pn_arm_CopyB_M);
1374 		}
1375 		break;
1376 	default:
1377 		break;
1378 	}
1379 	panic("Unsupported Proj from CopyB");
1380 }
1381 
gen_Proj_Div(ir_node * node)1382 static ir_node *gen_Proj_Div(ir_node *node)
1383 {
1384 	ir_node  *pred     = get_Proj_pred(node);
1385 	ir_node  *new_pred = be_transform_node(pred);
1386 	dbg_info *dbgi     = get_irn_dbg_info(node);
1387 	ir_mode  *mode     = get_irn_mode(node);
1388 	long     proj      = get_Proj_proj(node);
1389 
1390 	switch (proj) {
1391 	case pn_Div_M:
1392 		return new_rd_Proj(dbgi, new_pred, mode_M, pn_arm_Dvf_M);
1393 	case pn_Div_res:
1394 		return new_rd_Proj(dbgi, new_pred, mode, pn_arm_Dvf_res);
1395 	default:
1396 		break;
1397 	}
1398 	panic("Unsupported Proj from Div");
1399 }
1400 
gen_Proj_Start(ir_node * node)1401 static ir_node *gen_Proj_Start(ir_node *node)
1402 {
1403 	ir_node *block     = get_nodes_block(node);
1404 	ir_node *new_block = be_transform_node(block);
1405 	long     proj      = get_Proj_proj(node);
1406 
1407 	switch ((pn_Start) proj) {
1408 	case pn_Start_X_initial_exec:
1409 		/* we exchange the ProjX with a jump */
1410 		return new_bd_arm_Jmp(NULL, new_block);
1411 
1412 	case pn_Start_M:
1413 		return be_prolog_get_memory(abihelper);
1414 
1415 	case pn_Start_T_args:
1416 		return new_r_Bad(get_irn_irg(block), mode_T);
1417 
1418 	case pn_Start_P_frame_base:
1419 		return be_prolog_get_reg_value(abihelper, sp_reg);
1420 	}
1421 	panic("unexpected start proj: %ld\n", proj);
1422 }
1423 
gen_Proj_Proj_Start(ir_node * node)1424 static ir_node *gen_Proj_Proj_Start(ir_node *node)
1425 {
1426 	long       pn          = get_Proj_proj(node);
1427 	ir_node   *block       = get_nodes_block(node);
1428 	ir_node   *new_block   = be_transform_node(block);
1429 	ir_entity *entity      = get_irg_entity(current_ir_graph);
1430 	ir_type   *method_type = get_entity_type(entity);
1431 	ir_type   *param_type  = get_method_param_type(method_type, pn);
1432 	const reg_or_stackslot_t *param;
1433 
1434 	/* Proj->Proj->Start must be a method argument */
1435 	assert(get_Proj_proj(get_Proj_pred(node)) == pn_Start_T_args);
1436 
1437 	param = &cconv->parameters[pn];
1438 
1439 	if (param->reg0 != NULL) {
1440 		/* argument transmitted in register */
1441 		ir_mode *mode  = get_type_mode(param_type);
1442 		ir_node *value = be_prolog_get_reg_value(abihelper, param->reg0);
1443 
1444 		if (mode_is_float(mode)) {
1445 			ir_node *value1 = NULL;
1446 
1447 			if (param->reg1 != NULL) {
1448 				value1 = be_prolog_get_reg_value(abihelper, param->reg1);
1449 			} else if (param->entity != NULL) {
1450 				ir_graph *irg = get_irn_irg(node);
1451 				ir_node  *fp  = get_irg_frame(irg);
1452 				ir_node  *mem = be_prolog_get_memory(abihelper);
1453 				ir_node  *ldr = new_bd_arm_Ldr(NULL, new_block, fp, mem,
1454 				                               mode_gp, param->entity,
1455 				                               0, 0, true);
1456 				value1 = new_r_Proj(ldr, mode_gp, pn_arm_Ldr_res);
1457 			}
1458 
1459 			/* convert integer value to float */
1460 			if (value1 == NULL) {
1461 				value = int_to_float(NULL, new_block, value);
1462 			} else {
1463 				value = ints_to_double(NULL, new_block, value, value1);
1464 			}
1465 		}
1466 		return value;
1467 	} else {
1468 		/* argument transmitted on stack */
1469 		ir_graph *irg  = get_irn_irg(node);
1470 		ir_node  *fp   = get_irg_frame(irg);
1471 		ir_node  *mem  = be_prolog_get_memory(abihelper);
1472 		ir_mode  *mode = get_type_mode(param->type);
1473 		ir_node  *load;
1474 		ir_node  *value;
1475 
1476 		if (mode_is_float(mode)) {
1477 			load  = new_bd_arm_Ldf(NULL, new_block, fp, mem, mode,
1478 			                       param->entity, 0, 0, true);
1479 			value = new_r_Proj(load, mode_fp, pn_arm_Ldf_res);
1480 		} else {
1481 			load  = new_bd_arm_Ldr(NULL, new_block, fp, mem, mode,
1482 			                       param->entity, 0, 0, true);
1483 			value = new_r_Proj(load, mode_gp, pn_arm_Ldr_res);
1484 		}
1485 		set_irn_pinned(load, op_pin_state_floats);
1486 
1487 		return value;
1488 	}
1489 }
1490 
1491 /**
1492  * Finds number of output value of a mode_T node which is constrained to
1493  * a single specific register.
1494  */
find_out_for_reg(ir_node * node,const arch_register_t * reg)1495 static int find_out_for_reg(ir_node *node, const arch_register_t *reg)
1496 {
1497 	int n_outs = arch_get_irn_n_outs(node);
1498 	int o;
1499 
1500 	for (o = 0; o < n_outs; ++o) {
1501 		const arch_register_req_t *req = arch_get_irn_register_req_out(node, o);
1502 		if (req == reg->single_req)
1503 			return o;
1504 	}
1505 	return -1;
1506 }
1507 
gen_Proj_Proj_Call(ir_node * node)1508 static ir_node *gen_Proj_Proj_Call(ir_node *node)
1509 {
1510 	long                  pn            = get_Proj_proj(node);
1511 	ir_node              *call          = get_Proj_pred(get_Proj_pred(node));
1512 	ir_node              *new_call      = be_transform_node(call);
1513 	ir_type              *function_type = get_Call_type(call);
1514 	calling_convention_t *cconv
1515 		= arm_decide_calling_convention(NULL, function_type);
1516 	const reg_or_stackslot_t *res = &cconv->results[pn];
1517 	ir_mode              *mode;
1518 	int                   regn;
1519 
1520 	/* TODO 64bit modes */
1521 	assert(res->reg0 != NULL && res->reg1 == NULL);
1522 	regn = find_out_for_reg(new_call, res->reg0);
1523 	if (regn < 0) {
1524 		panic("Internal error in calling convention for return %+F", node);
1525 	}
1526 	mode = res->reg0->reg_class->mode;
1527 
1528 	arm_free_calling_convention(cconv);
1529 
1530 	return new_r_Proj(new_call, mode, regn);
1531 }
1532 
gen_Proj_Call(ir_node * node)1533 static ir_node *gen_Proj_Call(ir_node *node)
1534 {
1535 	long     pn        = get_Proj_proj(node);
1536 	ir_node *call      = get_Proj_pred(node);
1537 	ir_node *new_call  = be_transform_node(call);
1538 
1539 	switch ((pn_Call) pn) {
1540 	case pn_Call_M:
1541 		return new_r_Proj(new_call, mode_M, 0);
1542 	case pn_Call_X_regular:
1543 	case pn_Call_X_except:
1544 	case pn_Call_T_result:
1545 		break;
1546 	}
1547 	panic("Unexpected Call proj %ld\n", pn);
1548 }
1549 
1550 /**
1551  * Transform a Proj node.
1552  */
gen_Proj(ir_node * node)1553 static ir_node *gen_Proj(ir_node *node)
1554 {
1555 	ir_node  *pred = get_Proj_pred(node);
1556 	long      proj = get_Proj_proj(node);
1557 
1558 	switch (get_irn_opcode(pred)) {
1559 	case iro_Store:
1560 		if (proj == pn_Store_M) {
1561 			return be_transform_node(pred);
1562 		} else {
1563 			panic("Unsupported Proj from Store");
1564 		}
1565 	case iro_Load:
1566 		return gen_Proj_Load(node);
1567 	case iro_Call:
1568 		return gen_Proj_Call(node);
1569 	case iro_CopyB:
1570 		return gen_Proj_CopyB(node);
1571 	case iro_Div:
1572 		return gen_Proj_Div(node);
1573 	case iro_Start:
1574 		return gen_Proj_Start(node);
1575 	case iro_Cond:
1576 	case iro_Switch:
1577 		/* nothing to do */
1578 		return be_duplicate_node(node);
1579 	case iro_Proj: {
1580 		ir_node *pred_pred = get_Proj_pred(pred);
1581 		if (is_Call(pred_pred)) {
1582 			return gen_Proj_Proj_Call(node);
1583 		} else if (is_Start(pred_pred)) {
1584 			return gen_Proj_Proj_Start(node);
1585 		}
1586 		/* FALLTHROUGH */
1587 	}
1588 	case iro_Builtin:
1589 		return gen_Proj_Builtin(node);
1590 	default:
1591 		panic("code selection didn't expect Proj after %+F\n", pred);
1592 	}
1593 }
1594 
1595 typedef ir_node *(*create_const_node_func)(dbg_info *db, ir_node *block);
1596 
create_const(ir_graph * irg,ir_node ** place,create_const_node_func func,const arch_register_t * reg)1597 static inline ir_node *create_const(ir_graph *irg, ir_node **place,
1598                                     create_const_node_func func,
1599                                     const arch_register_t* reg)
1600 {
1601 	ir_node *block, *res;
1602 
1603 	if (*place != NULL)
1604 		return *place;
1605 
1606 	block = get_irg_start_block(irg);
1607 	res = func(NULL, block);
1608 	arch_set_irn_register(res, reg);
1609 	*place = res;
1610 	return res;
1611 }
1612 
gen_Unknown(ir_node * node)1613 static ir_node *gen_Unknown(ir_node *node)
1614 {
1615 	ir_node  *block     = get_nodes_block(node);
1616 	ir_node  *new_block = be_transform_node(block);
1617 	dbg_info *dbgi      = get_irn_dbg_info(node);
1618 
1619 	/* just produce a 0 */
1620 	ir_mode *mode = get_irn_mode(node);
1621 	if (mode_is_float(mode)) {
1622 		ir_tarval *tv     = get_mode_null(mode);
1623 		ir_node   *fconst = new_bd_arm_fConst(dbgi, new_block, tv);
1624 		return fconst;
1625 	} else if (mode_needs_gp_reg(mode)) {
1626 		return create_const_graph_value(dbgi, new_block, 0);
1627 	}
1628 
1629 	panic("Unexpected Unknown mode");
1630 }
1631 
1632 /**
1633  * Produces the type which sits between the stack args and the locals on the
1634  * stack. It will contain the return address and space to store the old base
1635  * pointer.
1636  * @return The Firm type modeling the ABI between type.
1637  */
arm_get_between_type(void)1638 static ir_type *arm_get_between_type(void)
1639 {
1640 	static ir_type *between_type = NULL;
1641 
1642 	if (between_type == NULL) {
1643 		between_type = new_type_class(new_id_from_str("arm_between_type"));
1644 		set_type_size_bytes(between_type, 0);
1645 	}
1646 
1647 	return between_type;
1648 }
1649 
create_stacklayout(ir_graph * irg)1650 static void create_stacklayout(ir_graph *irg)
1651 {
1652 	ir_entity         *entity        = get_irg_entity(irg);
1653 	ir_type           *function_type = get_entity_type(entity);
1654 	be_stack_layout_t *layout        = be_get_irg_stack_layout(irg);
1655 	ir_type           *arg_type;
1656 	int                p;
1657 	int                n_params;
1658 
1659 	/* calling conventions must be decided by now */
1660 	assert(cconv != NULL);
1661 
1662 	/* construct argument type */
1663 	arg_type = new_type_struct(id_mangle_u(get_entity_ident(entity), new_id_from_chars("arg_type", 8)));
1664 	n_params = get_method_n_params(function_type);
1665 	for (p = 0; p < n_params; ++p) {
1666 		reg_or_stackslot_t *param = &cconv->parameters[p];
1667 		char                buf[128];
1668 		ident              *id;
1669 
1670 		if (param->type == NULL)
1671 			continue;
1672 
1673 		snprintf(buf, sizeof(buf), "param_%d", p);
1674 		id            = new_id_from_str(buf);
1675 		param->entity = new_entity(arg_type, id, param->type);
1676 		set_entity_offset(param->entity, param->offset);
1677 	}
1678 
1679 	/* TODO: what about external functions? we don't know most of the stack
1680 	 * layout for them. And probably don't need all of this... */
1681 	memset(layout, 0, sizeof(*layout));
1682 
1683 	layout->frame_type     = get_irg_frame_type(irg);
1684 	layout->between_type   = arm_get_between_type();
1685 	layout->arg_type       = arg_type;
1686 	layout->initial_offset = 0;
1687 	layout->initial_bias   = 0;
1688 	layout->sp_relative    = true;
1689 
1690 	assert(N_FRAME_TYPES == 3);
1691 	layout->order[0] = layout->frame_type;
1692 	layout->order[1] = layout->between_type;
1693 	layout->order[2] = layout->arg_type;
1694 }
1695 
1696 /**
1697  * transform the start node to the prolog code
1698  */
gen_Start(ir_node * node)1699 static ir_node *gen_Start(ir_node *node)
1700 {
1701 	ir_graph  *irg           = get_irn_irg(node);
1702 	ir_entity *entity        = get_irg_entity(irg);
1703 	ir_type   *function_type = get_entity_type(entity);
1704 	ir_node   *block         = get_nodes_block(node);
1705 	ir_node   *new_block     = be_transform_node(block);
1706 	dbg_info  *dbgi          = get_irn_dbg_info(node);
1707 	ir_node   *start;
1708 	size_t     i;
1709 
1710 	/* stackpointer is important at function prolog */
1711 	be_prolog_add_reg(abihelper, sp_reg,
1712 			arch_register_req_type_produces_sp | arch_register_req_type_ignore);
1713 	/* function parameters in registers */
1714 	for (i = 0; i < get_method_n_params(function_type); ++i) {
1715 		const reg_or_stackslot_t *param = &cconv->parameters[i];
1716 		if (param->reg0 != NULL)
1717 			be_prolog_add_reg(abihelper, param->reg0, arch_register_req_type_none);
1718 		if (param->reg1 != NULL)
1719 			be_prolog_add_reg(abihelper, param->reg1, arch_register_req_type_none);
1720 	}
1721 	/* announce that we need the values of the callee save regs */
1722 	for (i = 0; i != ARRAY_SIZE(callee_saves); ++i) {
1723 		be_prolog_add_reg(abihelper, callee_saves[i], arch_register_req_type_none);
1724 	}
1725 
1726 	start = be_prolog_create_start(abihelper, dbgi, new_block);
1727 	return start;
1728 }
1729 
get_stack_pointer_for(ir_node * node)1730 static ir_node *get_stack_pointer_for(ir_node *node)
1731 {
1732 	/* get predecessor in stack_order list */
1733 	ir_node *stack_pred = be_get_stack_pred(stackorder, node);
1734 	ir_node *stack;
1735 
1736 	if (stack_pred == NULL) {
1737 		/* first stack user in the current block. We can simply use the
1738 		 * initial sp_proj for it */
1739 		ir_node *sp_proj = be_prolog_get_reg_value(abihelper, sp_reg);
1740 		return sp_proj;
1741 	}
1742 
1743 	be_transform_node(stack_pred);
1744 	stack = pmap_get(ir_node, node_to_stack, stack_pred);
1745 	if (stack == NULL) {
1746 		return get_stack_pointer_for(stack_pred);
1747 	}
1748 
1749 	return stack;
1750 }
1751 
1752 /**
1753  * transform a Return node into epilogue code + return statement
1754  */
gen_Return(ir_node * node)1755 static ir_node *gen_Return(ir_node *node)
1756 {
1757 	ir_node   *block          = get_nodes_block(node);
1758 	ir_node   *new_block      = be_transform_node(block);
1759 	dbg_info  *dbgi           = get_irn_dbg_info(node);
1760 	ir_node   *mem            = get_Return_mem(node);
1761 	ir_node   *new_mem        = be_transform_node(mem);
1762 	size_t     n_callee_saves = ARRAY_SIZE(callee_saves);
1763 	ir_node   *sp_proj        = get_stack_pointer_for(node);
1764 	size_t     n_res          = get_Return_n_ress(node);
1765 	ir_node   *bereturn;
1766 	size_t     i;
1767 
1768 	be_epilog_begin(abihelper);
1769 	be_epilog_set_memory(abihelper, new_mem);
1770 	/* connect stack pointer with initial stack pointer. fix_stack phase
1771 	   will later serialize all stack pointer adjusting nodes */
1772 	be_epilog_add_reg(abihelper, sp_reg,
1773 			arch_register_req_type_produces_sp | arch_register_req_type_ignore,
1774 			sp_proj);
1775 
1776 	/* result values */
1777 	for (i = 0; i < n_res; ++i) {
1778 		ir_node                  *res_value     = get_Return_res(node, i);
1779 		ir_node                  *new_res_value = be_transform_node(res_value);
1780 		const reg_or_stackslot_t *slot          = &cconv->results[i];
1781 		const arch_register_t    *reg           = slot->reg0;
1782 		assert(slot->reg1 == NULL);
1783 		be_epilog_add_reg(abihelper, reg, arch_register_req_type_none, new_res_value);
1784 	}
1785 
1786 	/* connect callee saves with their values at the function begin */
1787 	for (i = 0; i < n_callee_saves; ++i) {
1788 		const arch_register_t *reg   = callee_saves[i];
1789 		ir_node               *value = be_prolog_get_reg_value(abihelper, reg);
1790 		be_epilog_add_reg(abihelper, reg, arch_register_req_type_none, value);
1791 	}
1792 
1793 	/* epilog code: an incsp */
1794 	bereturn = be_epilog_create_return(abihelper, dbgi, new_block);
1795 	return bereturn;
1796 }
1797 
1798 
gen_Call(ir_node * node)1799 static ir_node *gen_Call(ir_node *node)
1800 {
1801 	ir_graph             *irg          = get_irn_irg(node);
1802 	ir_node              *callee       = get_Call_ptr(node);
1803 	ir_node              *block        = get_nodes_block(node);
1804 	ir_node              *new_block    = be_transform_node(block);
1805 	ir_node              *mem          = get_Call_mem(node);
1806 	ir_node              *new_mem      = be_transform_node(mem);
1807 	dbg_info             *dbgi         = get_irn_dbg_info(node);
1808 	ir_type              *type         = get_Call_type(node);
1809 	calling_convention_t *cconv        = arm_decide_calling_convention(NULL, type);
1810 	size_t                n_params     = get_Call_n_params(node);
1811 	size_t const          n_param_regs = cconv->n_reg_params;
1812 	/* max inputs: memory, callee, register arguments */
1813 	size_t const          max_inputs   = 2 + n_param_regs;
1814 	ir_node             **in           = ALLOCAN(ir_node*, max_inputs);
1815 	ir_node             **sync_ins     = ALLOCAN(ir_node*, max_inputs);
1816 	struct obstack       *obst         = be_get_be_obst(irg);
1817 	const arch_register_req_t **in_req
1818 		= OALLOCNZ(obst, const arch_register_req_t*, max_inputs);
1819 	size_t                in_arity       = 0;
1820 	size_t                sync_arity     = 0;
1821 	size_t const          n_caller_saves = ARRAY_SIZE(caller_saves);
1822 	ir_entity            *entity         = NULL;
1823 	ir_node              *incsp          = NULL;
1824 	int                   mem_pos;
1825 	ir_node              *res;
1826 	size_t                p;
1827 	size_t                o;
1828 	size_t                out_arity;
1829 
1830 	assert(n_params == get_method_n_params(type));
1831 
1832 	/* construct arguments */
1833 
1834 	/* memory input */
1835 	in_req[in_arity] = arch_no_register_req;
1836 	mem_pos          = in_arity;
1837 	++in_arity;
1838 	/* parameters */
1839 	for (p = 0; p < n_params; ++p) {
1840 		ir_node                  *value      = get_Call_param(node, p);
1841 		ir_node                  *new_value  = be_transform_node(value);
1842 		ir_node                  *new_value1 = NULL;
1843 		const reg_or_stackslot_t *param      = &cconv->parameters[p];
1844 		ir_type                  *param_type = get_method_param_type(type, p);
1845 		ir_mode                  *mode       = get_type_mode(param_type);
1846 		ir_node                  *str;
1847 
1848 		if (mode_is_float(mode) && param->reg0 != NULL) {
1849 			unsigned size_bits = get_mode_size_bits(mode);
1850 			if (size_bits == 64) {
1851 				double_to_ints(dbgi, new_block, new_value, &new_value,
1852 				               &new_value1);
1853 			} else {
1854 				assert(size_bits == 32);
1855 				new_value = float_to_int(dbgi, new_block, new_value);
1856 			}
1857 		}
1858 
1859 		/* put value into registers */
1860 		if (param->reg0 != NULL) {
1861 			in[in_arity]     = new_value;
1862 			in_req[in_arity] = param->reg0->single_req;
1863 			++in_arity;
1864 			if (new_value1 == NULL)
1865 				continue;
1866 		}
1867 		if (param->reg1 != NULL) {
1868 			assert(new_value1 != NULL);
1869 			in[in_arity]     = new_value1;
1870 			in_req[in_arity] = param->reg1->single_req;
1871 			++in_arity;
1872 			continue;
1873 		}
1874 
1875 		/* we need a store if we're here */
1876 		if (new_value1 != NULL) {
1877 			new_value = new_value1;
1878 			mode      = mode_gp;
1879 		}
1880 
1881 		/* create a parameter frame if necessary */
1882 		if (incsp == NULL) {
1883 			ir_node *new_frame = get_stack_pointer_for(node);
1884 			incsp = be_new_IncSP(sp_reg, new_block, new_frame,
1885 								 cconv->param_stack_size, 1);
1886 		}
1887 		if (mode_is_float(mode)) {
1888 			str = new_bd_arm_Stf(dbgi, new_block, incsp, new_value, new_mem,
1889 			                     mode, NULL, 0, param->offset, true);
1890 		} else {
1891 			str = new_bd_arm_Str(dbgi, new_block, incsp, new_value, new_mem,
1892 								 mode, NULL, 0, param->offset, true);
1893 		}
1894 		sync_ins[sync_arity++] = str;
1895 	}
1896 	assert(in_arity <= max_inputs);
1897 
1898 	/* construct memory input */
1899 	if (sync_arity == 0) {
1900 		in[mem_pos] = new_mem;
1901 	} else if (sync_arity == 1) {
1902 		in[mem_pos] = sync_ins[0];
1903 	} else {
1904 		in[mem_pos] = new_rd_Sync(NULL, new_block, sync_arity, sync_ins);
1905 	}
1906 
1907 	/* TODO: use a generic symconst matcher here */
1908 	if (is_SymConst(callee)) {
1909 		entity = get_SymConst_entity(callee);
1910 	} else {
1911 		/* TODO: finish load matcher here */
1912 #if 0
1913 		/* callee */
1914 		if (is_Proj(callee) && is_Load(get_Proj_pred(callee))) {
1915 			ir_node *load    = get_Proj_pred(callee);
1916 			ir_node *ptr     = get_Load_ptr(load);
1917 			ir_node *new_ptr = be_transform_node(ptr);
1918 			ir_node *mem     = get_Load_mem(load);
1919 			ir_node *new_mem = be_transform_node(mem);
1920 			ir_mode *mode    = get_Load_mode(node);
1921 
1922 		} else {
1923 #endif
1924 			in[in_arity]     = be_transform_node(callee);
1925 			in_req[in_arity] = arm_reg_classes[CLASS_arm_gp].class_req;
1926 			++in_arity;
1927 		//}
1928 	}
1929 
1930 	/* outputs:
1931 	 *  - memory
1932 	 *  - caller saves
1933 	 */
1934 	out_arity = 1 + n_caller_saves;
1935 
1936 	if (entity != NULL) {
1937 		/* TODO: use a generic symconst matcher here
1938 		 * so we can also handle entity+offset, etc. */
1939 		res = new_bd_arm_Bl(dbgi, new_block, in_arity, in, out_arity,entity, 0);
1940 	} else {
1941 		/* TODO:
1942 		 * - use a proper shifter_operand matcher
1943 		 * - we could also use LinkLdrPC
1944 		 */
1945 		res = new_bd_arm_LinkMovPC(dbgi, new_block, in_arity, in, out_arity,
1946 		                           ARM_SHF_REG, 0, 0);
1947 	}
1948 
1949 	if (incsp != NULL) {
1950 		/* IncSP to destroy the call stackframe */
1951 		incsp = be_new_IncSP(sp_reg, new_block, incsp, -cconv->param_stack_size,
1952 		                     0);
1953 		/* if we are the last IncSP producer in a block then we have to keep
1954 		 * the stack value.
1955 		 * Note: This here keeps all producers which is more than necessary */
1956 		add_irn_dep(incsp, res);
1957 		keep_alive(incsp);
1958 
1959 		pmap_insert(node_to_stack, node, incsp);
1960 	}
1961 
1962 	arch_set_irn_register_reqs_in(res, in_req);
1963 
1964 	/* create output register reqs */
1965 	arch_set_irn_register_req_out(res, 0, arch_no_register_req);
1966 	for (o = 0; o < n_caller_saves; ++o) {
1967 		const arch_register_t *reg = caller_saves[o];
1968 		arch_set_irn_register_req_out(res, o+1, reg->single_req);
1969 	}
1970 
1971 	/* copy pinned attribute */
1972 	set_irn_pinned(res, get_irn_pinned(node));
1973 
1974 	arm_free_calling_convention(cconv);
1975 	return res;
1976 }
1977 
1978 static ir_node *gen_Sel(ir_node *node)
1979 {
1980 	dbg_info  *dbgi      = get_irn_dbg_info(node);
1981 	ir_node   *block     = get_nodes_block(node);
1982 	ir_node   *new_block = be_transform_node(block);
1983 	ir_node   *ptr       = get_Sel_ptr(node);
1984 	ir_node   *new_ptr   = be_transform_node(ptr);
1985 	ir_entity *entity    = get_Sel_entity(node);
1986 
1987 	/* must be the frame pointer all other sels must have been lowered
1988 	 * already */
1989 	assert(is_Proj(ptr) && is_Start(get_Proj_pred(ptr)));
1990 
1991 	return new_bd_arm_FrameAddr(dbgi, new_block, new_ptr, entity, 0);
1992 }
1993 
1994 static ir_node *gen_Phi(ir_node *node)
1995 {
1996 	ir_mode                   *mode = get_irn_mode(node);
1997 	const arch_register_req_t *req;
1998 	if (mode_needs_gp_reg(mode)) {
1999 		/* we shouldn't have any 64bit stuff around anymore */
2000 		assert(get_mode_size_bits(mode) <= 32);
2001 		/* all integer operations are on 32bit registers now */
2002 		mode = mode_Iu;
2003 		req  = arm_reg_classes[CLASS_arm_gp].class_req;
2004 	} else {
2005 		req = arch_no_register_req;
2006 	}
2007 
2008 	return be_transform_phi(node, req);
2009 }
2010 
2011 /**
2012  * Enters all transform functions into the generic pointer
2013  */
2014 static void arm_register_transformers(void)
2015 {
2016 	be_start_transform_setup();
2017 
2018 	be_set_transform_function(op_Add,      gen_Add);
2019 	be_set_transform_function(op_And,      gen_And);
2020 	be_set_transform_function(op_Call,     gen_Call);
2021 	be_set_transform_function(op_Cmp,      gen_Cmp);
2022 	be_set_transform_function(op_Cond,     gen_Cond);
2023 	be_set_transform_function(op_Const,    gen_Const);
2024 	be_set_transform_function(op_Conv,     gen_Conv);
2025 	be_set_transform_function(op_CopyB,    gen_CopyB);
2026 	be_set_transform_function(op_Div,      gen_Div);
2027 	be_set_transform_function(op_Eor,      gen_Eor);
2028 	be_set_transform_function(op_Jmp,      gen_Jmp);
2029 	be_set_transform_function(op_Load,     gen_Load);
2030 	be_set_transform_function(op_Minus,    gen_Minus);
2031 	be_set_transform_function(op_Mul,      gen_Mul);
2032 	be_set_transform_function(op_Not,      gen_Not);
2033 	be_set_transform_function(op_Or,       gen_Or);
2034 	be_set_transform_function(op_Phi,      gen_Phi);
2035 	be_set_transform_function(op_Proj,     gen_Proj);
2036 	be_set_transform_function(op_Return,   gen_Return);
2037 	be_set_transform_function(op_Rotl,     gen_Rotl);
2038 	be_set_transform_function(op_Sel,      gen_Sel);
2039 	be_set_transform_function(op_Shl,      gen_Shl);
2040 	be_set_transform_function(op_Shr,      gen_Shr);
2041 	be_set_transform_function(op_Shrs,     gen_Shrs);
2042 	be_set_transform_function(op_Start,    gen_Start);
2043 	be_set_transform_function(op_Store,    gen_Store);
2044 	be_set_transform_function(op_Sub,      gen_Sub);
2045 	be_set_transform_function(op_Switch,   gen_Switch);
2046 	be_set_transform_function(op_SymConst, gen_SymConst);
2047 	be_set_transform_function(op_Unknown,  gen_Unknown);
2048 	be_set_transform_function(op_Builtin,  gen_Builtin);
2049 }
2050 
2051 /**
2052  * Initialize fpa Immediate support.
2053  */
2054 static void arm_init_fpa_immediate(void)
2055 {
2056 	/* 0, 1, 2, 3, 4, 5, 10, or 0.5. */
2057 	fpa_imm[FPA_IMM_FLOAT][fpa_null]  = get_mode_null(mode_F);
2058 	fpa_imm[FPA_IMM_FLOAT][fpa_one]   = get_mode_one(mode_F);
2059 	fpa_imm[FPA_IMM_FLOAT][fpa_two]   = new_tarval_from_str("2", 1, mode_F);
2060 	fpa_imm[FPA_IMM_FLOAT][fpa_three] = new_tarval_from_str("3", 1, mode_F);
2061 	fpa_imm[FPA_IMM_FLOAT][fpa_four]  = new_tarval_from_str("4", 1, mode_F);
2062 	fpa_imm[FPA_IMM_FLOAT][fpa_five]  = new_tarval_from_str("5", 1, mode_F);
2063 	fpa_imm[FPA_IMM_FLOAT][fpa_ten]   = new_tarval_from_str("10", 2, mode_F);
2064 	fpa_imm[FPA_IMM_FLOAT][fpa_half]  = new_tarval_from_str("0.5", 3, mode_F);
2065 
2066 	fpa_imm[FPA_IMM_DOUBLE][fpa_null]  = get_mode_null(mode_D);
2067 	fpa_imm[FPA_IMM_DOUBLE][fpa_one]   = get_mode_one(mode_D);
2068 	fpa_imm[FPA_IMM_DOUBLE][fpa_two]   = new_tarval_from_str("2", 1, mode_D);
2069 	fpa_imm[FPA_IMM_DOUBLE][fpa_three] = new_tarval_from_str("3", 1, mode_D);
2070 	fpa_imm[FPA_IMM_DOUBLE][fpa_four]  = new_tarval_from_str("4", 1, mode_D);
2071 	fpa_imm[FPA_IMM_DOUBLE][fpa_five]  = new_tarval_from_str("5", 1, mode_D);
2072 	fpa_imm[FPA_IMM_DOUBLE][fpa_ten]   = new_tarval_from_str("10", 2, mode_D);
2073 	fpa_imm[FPA_IMM_DOUBLE][fpa_half]  = new_tarval_from_str("0.5", 3, mode_D);
2074 }
2075 
2076 /**
2077  * Transform a Firm graph into an ARM graph.
2078  */
2079 void arm_transform_graph(ir_graph *irg)
2080 {
2081 	static int imm_initialized = 0;
2082 	ir_entity *entity          = get_irg_entity(irg);
2083 	const arch_env_t *arch_env = be_get_irg_arch_env(irg);
2084 	ir_type   *frame_type;
2085 
2086 	mode_gp = mode_Iu;
2087 	mode_fp = mode_F;
2088 
2089 	if (! imm_initialized) {
2090 		arm_init_fpa_immediate();
2091 		imm_initialized = 1;
2092 	}
2093 	arm_register_transformers();
2094 
2095 	isa = (arm_isa_t*) arch_env;
2096 
2097 	node_to_stack = pmap_create();
2098 
2099 	assert(abihelper == NULL);
2100 	abihelper = be_abihelper_prepare(irg);
2101 	stackorder = be_collect_stacknodes(irg);
2102 	assert(cconv == NULL);
2103 	cconv = arm_decide_calling_convention(irg, get_entity_type(entity));
2104 	create_stacklayout(irg);
2105 
2106 	be_transform_graph(irg, NULL);
2107 
2108 	be_abihelper_finish(abihelper);
2109 	abihelper = NULL;
2110 	be_free_stackorder(stackorder);
2111 	stackorder = NULL;
2112 
2113 	arm_free_calling_convention(cconv);
2114 	cconv = NULL;
2115 
2116 	frame_type = get_irg_frame_type(irg);
2117 	if (get_type_state(frame_type) == layout_undefined) {
2118 		default_layout_compound_type(frame_type);
2119 	}
2120 
2121 	pmap_destroy(node_to_stack);
2122 	node_to_stack = NULL;
2123 
2124 	be_add_missing_keeps(irg);
2125 }
2126 
2127 void arm_init_transform(void)
2128 {
2129 	FIRM_DBG_REGISTER(dbg, "firm.be.arm.transform");
2130 }
2131