1 /*
2 * Copyright (C) 1995-2011 University of Karlsruhe. All right reserved.
3 *
4 * This file is part of libFirm.
5 *
6 * This file may be distributed and/or modified under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation and appearing in the file LICENSE.GPL included in the
9 * packaging of this file.
10 *
11 * Licensees holding valid libFirm Professional Edition licenses may use
12 * this file in accordance with the libFirm Commercial License.
13 * Agreement provided with the Software.
14 *
15 * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16 * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 * PURPOSE.
18 */
19
20 /**
21 * @file
22 * @brief Processor architecture specification.
23 * @author Sebastian Hack
24 */
25 #ifndef FIRM_BE_BEARCH_H
26 #define FIRM_BE_BEARCH_H
27
28 #include <stdbool.h>
29
30 #include "firm_types.h"
31 #include "bitset.h"
32 #include "obst.h"
33 #include "raw_bitset.h"
34 #include "irop_t.h"
35
36 #include "be_types.h"
37 #include "beinfo.h"
38 #include "be.h"
39 #include "beirg.h"
40 #include "error.h"
41
42 /**
43 * this constant is returned by the get_sp_bias functions if the stack
44 * is reset (usually because the frame pointer is copied to the stack
45 * pointer
46 */
47 #define SP_BIAS_RESET INT_MIN
48
49 typedef enum arch_register_class_flags_t {
50 arch_register_class_flag_none = 0,
51 /** don't do automatic register allocation for this class */
52 arch_register_class_flag_manual_ra = 1U << 0,
53 /** the register models an abstract state (example: fpu rounding mode) */
54 arch_register_class_flag_state = 1U << 1
55 } arch_register_class_flags_t;
56 ENUM_BITSET(arch_register_class_flags_t)
57
58 typedef enum arch_register_type_t {
59 arch_register_type_none = 0,
60 /** Do not consider this register when allocating. */
61 arch_register_type_ignore = 1U << 0,
62 /** The emitter can choose an arbitrary register. The register fulfills any
63 * register constraints as long as the register class matches */
64 arch_register_type_joker = 1U << 1,
65 /** This is just a virtual register. Virtual registers fulfill any register
66 * constraints as long as the register class matches. It is a allowed to
67 * have multiple definitions for the same virtual register at a point */
68 arch_register_type_virtual = 1U << 2,
69 /** The register represents a state that should be handled by bestate
70 * code */
71 arch_register_type_state = 1U << 3,
72 } arch_register_type_t;
73 ENUM_BITSET(arch_register_type_t)
74
75 /**
76 * Different types of register allocation requirements.
77 */
78 typedef enum arch_register_req_type_t {
79 /** No register requirement. */
80 arch_register_req_type_none = 0,
81 /** All registers in the class are allowed. */
82 arch_register_req_type_normal = 1U << 0,
83 /** Only a real subset of the class is allowed. */
84 arch_register_req_type_limited = 1U << 1,
85 /** The register should be equal to another one at the node. */
86 arch_register_req_type_should_be_same = 1U << 2,
87 /** The register must be unequal from some other at the node. */
88 arch_register_req_type_must_be_different = 1U << 3,
89 /** The registernumber should be aligned (in case of multiregister values)*/
90 arch_register_req_type_aligned = 1U << 4,
91 /** ignore while allocating registers */
92 arch_register_req_type_ignore = 1U << 5,
93 /** the output produces a new value for the stack pointer
94 * (this is not really a constraint but a marker to guide the stackpointer
95 * rewiring logic) */
96 arch_register_req_type_produces_sp = 1U << 6,
97 } arch_register_req_type_t;
98 ENUM_BITSET(arch_register_req_type_t)
99
100 extern const arch_register_req_t *arch_no_register_req;
101
102 /**
103 * Print information about a register requirement in human readable form
104 * @param F output stream/file
105 * @param req The requirements structure to format.
106 */
107 void arch_dump_register_req(FILE *F, const arch_register_req_t *req,
108 const ir_node *node);
109
110 void arch_dump_register_reqs(FILE *F, const ir_node *node);
111 void arch_dump_reqs_and_registers(FILE *F, const ir_node *node);
112
113 void arch_set_frame_offset(ir_node *irn, int bias);
114
115 ir_entity *arch_get_frame_entity(const ir_node *irn);
116 int arch_get_sp_bias(ir_node *irn);
117
118 int arch_get_op_estimated_cost(const ir_node *irn);
119 arch_inverse_t *arch_get_inverse(const ir_node *irn, int i,
120 arch_inverse_t *inverse,
121 struct obstack *obstack);
122 int arch_possible_memory_operand(const ir_node *irn,
123 unsigned int i);
124 void arch_perform_memory_operand(ir_node *irn, ir_node *spill,
125 unsigned int i);
126
127 /**
128 * Get the register allocated for a value.
129 */
130 const arch_register_t *arch_get_irn_register(const ir_node *irn);
131
132 /**
133 * Assign register to a value
134 */
135 void arch_set_irn_register(ir_node *irn, const arch_register_t *reg);
136
137 /**
138 * Set the register for a certain output operand.
139 */
140 void arch_set_irn_register_out(ir_node *irn, unsigned pos, const arch_register_t *r);
141
142 const arch_register_t *arch_get_irn_register_out(const ir_node *irn, unsigned pos);
143 const arch_register_t *arch_get_irn_register_in(const ir_node *irn, int pos);
144
145 /**
146 * Get register constraints for an operand at position @p
147 */
arch_get_irn_register_req_in(const ir_node * node,int pos)148 static inline const arch_register_req_t *arch_get_irn_register_req_in(
149 const ir_node *node, int pos)
150 {
151 const backend_info_t *info = be_get_info(node);
152 if (info->in_reqs == NULL)
153 return arch_no_register_req;
154 return info->in_reqs[pos];
155 }
156
157 /**
158 * Get register constraint for a produced result (the @p pos result)
159 */
arch_get_irn_register_req_out(const ir_node * node,unsigned pos)160 static inline const arch_register_req_t *arch_get_irn_register_req_out(
161 const ir_node *node, unsigned pos)
162 {
163 const backend_info_t *info = be_get_info(node);
164 if (info->out_infos == NULL)
165 return arch_no_register_req;
166 return info->out_infos[pos].req;
167 }
168
arch_set_irn_register_req_out(ir_node * node,unsigned pos,const arch_register_req_t * req)169 static inline void arch_set_irn_register_req_out(ir_node *node, unsigned pos,
170 const arch_register_req_t *req)
171 {
172 backend_info_t *info = be_get_info(node);
173 assert(pos < (unsigned)ARR_LEN(info->out_infos));
174 info->out_infos[pos].req = req;
175 }
176
arch_set_irn_register_reqs_in(ir_node * node,const arch_register_req_t ** reqs)177 static inline void arch_set_irn_register_reqs_in(ir_node *node,
178 const arch_register_req_t **reqs)
179 {
180 backend_info_t *info = be_get_info(node);
181 info->in_reqs = reqs;
182 }
183
arch_get_irn_register_reqs_in(const ir_node * node)184 static inline const arch_register_req_t **arch_get_irn_register_reqs_in(
185 const ir_node *node)
186 {
187 backend_info_t *info = be_get_info(node);
188 return info->in_reqs;
189 }
190
191 const arch_register_req_t *arch_get_irn_register_req(const ir_node *node);
192
193 /**
194 * Get the flags of a node.
195 * @param irn The node.
196 * @return The flags.
197 */
198 arch_irn_flags_t arch_get_irn_flags(const ir_node *irn);
199
200 void arch_set_irn_flags(ir_node *node, arch_irn_flags_t flags);
201 void arch_add_irn_flags(ir_node *node, arch_irn_flags_t flags);
202
203 #define arch_irn_is(irn, flag) ((arch_get_irn_flags(irn) & arch_irn_flags_ ## flag) != 0)
204
arch_get_irn_n_outs(const ir_node * node)205 static inline unsigned arch_get_irn_n_outs(const ir_node *node)
206 {
207 backend_info_t *info = be_get_info(node);
208 if (info->out_infos == NULL)
209 return 0;
210
211 return (unsigned)ARR_LEN(info->out_infos);
212 }
213
214 /**
215 * Start codegeneration
216 */
217 arch_env_t *arch_env_begin_codegeneration(const arch_isa_if_t *isa,
218 be_main_env_t *main_env);
219
220 /**
221 * Register an instruction set architecture
222 */
223 void be_register_isa_if(const char *name, const arch_isa_if_t *isa);
224
225 /**
226 * A register.
227 */
228 struct arch_register_t {
229 const char *name; /**< The name of the register. */
230 const arch_register_class_t *reg_class; /**< The class of the register */
231 unsigned short index; /**< The index of the register in
232 the class. */
233 unsigned short global_index; /**< The global index this
234 register in the architecture. */
235 arch_register_type_t type; /**< The type of the register. */
236 /** register constraint allowing just this register */
237 const arch_register_req_t *single_req;
238 /** register number in dwarf debugging format */
239 unsigned short dwarf_number;
240 };
241
242 /**
243 * A class of registers.
244 * Like general purpose or floating point.
245 */
246 struct arch_register_class_t {
247 unsigned index; /**< index of this register class */
248 const char *name; /**< The name of the register class.*/
249 unsigned n_regs; /**< Number of registers in this
250 class. */
251 ir_mode *mode; /**< The mode of the register class.*/
252 const arch_register_t *regs; /**< The array of registers. */
253 arch_register_class_flags_t flags; /**< register class flags. */
254 const arch_register_req_t *class_req;
255 };
256
257 /** return the number of registers in this register class */
258 #define arch_register_class_n_regs(cls) ((cls)->n_regs)
259
260 /** return the largest mode of this register class */
261 #define arch_register_class_mode(cls) ((cls)->mode)
262
263 /** return the name of this register class */
264 #define arch_register_class_name(cls) ((cls)->name)
265
266 /** return the index of this register class */
267 #define arch_register_class_index(cls) ((cls)->index)
268
269 /** return the register class flags */
270 #define arch_register_class_flags(cls) ((cls)->flags)
271
arch_register_for_index(const arch_register_class_t * cls,unsigned idx)272 static inline const arch_register_t *arch_register_for_index(
273 const arch_register_class_t *cls, unsigned idx)
274 {
275 assert(idx < cls->n_regs);
276 return &cls->regs[idx];
277 }
278
279 /**
280 * Convenience macro to check for set constraints.
281 * @param req A pointer to register requirements.
282 * @param kind The kind of constraint to check for
283 * (see arch_register_req_type_t).
284 * @return 1, If the kind of constraint is present, 0 if not.
285 */
286 #define arch_register_req_is(req, kind) \
287 (((req)->type & (arch_register_req_type_ ## kind)) != 0)
288
289 /**
290 * Expresses requirements to register allocation for an operand.
291 */
292 struct arch_register_req_t {
293 arch_register_req_type_t type; /**< The type of the constraint. */
294 const arch_register_class_t *cls; /**< The register class this constraint
295 belongs to. */
296 const unsigned *limited; /**< allowed register bitset
297 (in case of wide-values this is
298 only about the first register) */
299 unsigned other_same; /**< Bitmask of ins which should use the
300 same register (should_be_same). */
301 unsigned other_different; /**< Bitmask of ins which shall use a
302 different register
303 (must_be_different) */
304 unsigned char width; /**< specifies how many sequential
305 registers are required */
306 };
307
reg_reqs_equal(const arch_register_req_t * req1,const arch_register_req_t * req2)308 static inline bool reg_reqs_equal(const arch_register_req_t *req1,
309 const arch_register_req_t *req2)
310 {
311 if (req1 == req2)
312 return true;
313
314 if (req1->type != req2->type ||
315 req1->cls != req2->cls ||
316 req1->other_same != req2->other_same ||
317 req1->other_different != req2->other_different ||
318 (req1->limited != NULL) != (req2->limited != NULL))
319 return false;
320
321 if (req1->limited != NULL) {
322 size_t const n_regs = arch_register_class_n_regs(req1->cls);
323 if (!rbitsets_equal(req1->limited, req2->limited, n_regs))
324 return false;
325 }
326
327 return true;
328 }
329
330 /**
331 * An inverse operation returned by the backend
332 */
333 struct arch_inverse_t {
334 int n; /**< count of nodes returned in nodes array */
335 int costs; /**< costs of this remat */
336
337 /** nodes for this inverse operation. shall be in schedule order.
338 * last element is the target value */
339 ir_node **nodes;
340 };
341
342 struct arch_irn_ops_t {
343
344 /**
345 * Get the entity on the stack frame this node depends on.
346 * @param irn The node in question.
347 * @return The entity on the stack frame or NULL, if the node does not have
348 * a stack frame entity.
349 */
350 ir_entity *(*get_frame_entity)(const ir_node *irn);
351
352 /**
353 * Set the offset of a node carrying an entity on the stack frame.
354 * @param irn The node.
355 * @param offset The offset of the node's stack frame entity.
356 */
357 void (*set_frame_offset)(ir_node *irn, int offset);
358
359 /**
360 * Returns the delta of the stackpointer for nodes that increment or
361 * decrement the stackpointer with a constant value. (push, pop
362 * nodes on most architectures).
363 * A positive value stands for an expanding stack area, a negative value for
364 * a shrinking one.
365 *
366 * @param irn The node
367 * @return 0 if the stackpointer is not modified with a constant
368 * value, otherwise the increment/decrement value
369 */
370 int (*get_sp_bias)(const ir_node *irn);
371
372 /**
373 * Returns an inverse operation which yields the i-th argument
374 * of the given node as result.
375 *
376 * @param irn The original operation
377 * @param i Index of the argument we want the inverse operation to
378 * yield
379 * @param inverse struct to be filled with the resulting inverse op
380 * @param obstack The obstack to use for allocation of the returned nodes
381 * array
382 * @return The inverse operation or NULL if operation invertible
383 */
384 arch_inverse_t *(*get_inverse)(const ir_node *irn, int i,
385 arch_inverse_t *inverse,
386 struct obstack *obstack);
387
388 /**
389 * Get the estimated cycle count for @p irn.
390 *
391 * @param irn The node.
392 * @return The estimated cycle count for this operation
393 */
394 int (*get_op_estimated_cost)(const ir_node *irn);
395
396 /**
397 * Asks the backend whether operand @p i of @p irn can be loaded form memory
398 * internally
399 *
400 * @param irn The node.
401 * @param i Index of the argument we would like to know whether @p irn
402 * can load it form memory internally
403 * @return nonzero if argument can be loaded or zero otherwise
404 */
405 int (*possible_memory_operand)(const ir_node *irn, unsigned int i);
406
407 /**
408 * Ask the backend to assimilate @p reload of operand @p i into @p irn.
409 *
410 * @param irn The node.
411 * @param spill The spill.
412 * @param i The position of the reload.
413 */
414 void (*perform_memory_operand)(ir_node *irn, ir_node *spill,
415 unsigned int i);
416 };
417
418 /**
419 * Architecture interface.
420 */
421 struct arch_isa_if_t {
422 /**
423 * Initializes the isa interface. This is necessary before calling any
424 * other functions from this interface.
425 */
426 void (*init)(void);
427
428 /**
429 * Fress resources allocated by this isa interface.
430 */
431 void (*finish)(void);
432
433 /**
434 * Returns the frontend settings needed for this backend.
435 */
436 const backend_params *(*get_params)(void);
437
438 /**
439 * lowers current program for target. See the documentation for
440 * be_lower_for_target() for details.
441 */
442 void (*lower_for_target)(void);
443
444 /**
445 * parse an assembler constraint part and set flags according to its nature
446 * advances the *c pointer to point to the last parsed character (so if you
447 * parse a single character don't advance c)
448 */
449 asm_constraint_flags_t (*parse_asm_constraint)(const char **c);
450
451 /**
452 * returns true if the string is a valid clobbered (register) in this
453 * backend
454 */
455 int (*is_valid_clobber)(const char *clobber);
456
457 /**
458 * Start codegeneration
459 * @return a new isa instance
460 */
461 arch_env_t *(*begin_codegeneration)(const be_main_env_t *env);
462
463 /**
464 * Free the isa instance.
465 */
466 void (*end_codegeneration)(void *self);
467
468 /**
469 * Initialize the code generator for a graph
470 * @param irg A graph
471 */
472 void (*init_graph)(ir_graph *irg);
473
474 /**
475 * Get the ABI restrictions for procedure calls.
476 * @param call_type The call type of the method (procedure) in question.
477 * @param p The array of parameter locations to be filled.
478 */
479 void (*get_call_abi)(ir_type *call_type, be_abi_call_t *abi);
480
481 /**
482 * mark node as rematerialized
483 */
484 void (*mark_remat)(ir_node *node);
485
486 /**
487 * return node used as base in pic code addresses
488 */
489 ir_node* (*get_pic_base)(ir_graph *irg);
490
491 /**
492 * Create a spill instruction. We assume that spill instructions
493 * do not need any additional registers and do not affect cpu-flags in any
494 * way.
495 * Construct a sequence of instructions after @p after (the resulting nodes
496 * are already scheduled).
497 * Returns a mode_M value which is used as input for a reload instruction.
498 */
499 ir_node *(*new_spill)(ir_node *value, ir_node *after);
500
501 /**
502 * Create a reload instruction. We assume that reload instructions do not
503 * need any additional registers and do not affect cpu-flags in any way.
504 * Constructs a sequence of instruction before @p before (the resulting
505 * nodes are already scheduled). A rewiring of users is not performed in
506 * this function.
507 * Returns a value representing the restored value.
508 */
509 ir_node *(*new_reload)(ir_node *value, ir_node *spilled_value,
510 ir_node *before);
511
512 /**
513 * Checks if the given register is callee/caller saved.
514 * @deprecated, only necessary if backend still uses beabi functions
515 */
516 int (*register_saved_by)(const arch_register_t *reg, int callee);
517
518 /**
519 * Called directly after initialization. Backend should handle all
520 * intrinsics here.
521 */
522 void (*handle_intrinsics)(void);
523
524 /**
525 * Called before abi introduce.
526 */
527 void (*before_abi)(ir_graph *irg);
528
529 /**
530 * Called, when the graph is being normalized.
531 */
532 void (*prepare_graph)(ir_graph *irg);
533
534 /**
535 * Called before register allocation.
536 */
537 void (*before_ra)(ir_graph *irg);
538
539 /**
540 * Called directly before done is called. This should be the last place
541 * where the irg is modified.
542 */
543 void (*finish_graph)(ir_graph *irg);
544
545 /**
546 * Called after everything happened. This call should emit the final
547 * assembly code but avoid changing the irg.
548 */
549 void (*emit)(ir_graph *irg);
550 };
551
552 #define arch_env_end_codegeneration(env) ((env)->impl->end_codegeneration(env))
553 #define arch_env_handle_intrinsics(env) \
554 do { if((env)->impl->handle_intrinsics != NULL) (env)->impl->handle_intrinsics(); } while(0)
555 #define arch_env_get_call_abi(env,tp,abi) ((env)->impl->get_call_abi((tp), (abi)))
556 #define arch_env_get_params(env) ((env)->impl->get_params())
557 #define arch_env_parse_asm_constraint(env,c) ((env)->impl->parse_asm_constraint((c))
558 #define arch_env_is_valid_clobber(env,clobber) ((env)->impl->is_valid_clobber((clobber))
559 #define arch_env_mark_remat(env,node) \
560 do { if ((env)->impl->mark_remat != NULL) (env)->impl->mark_remat((node)); } while(0)
561
562 #define arch_env_new_spill(env,value,after) ((env)->impl->new_spill(value, after))
563 #define arch_env_new_reload(env,value,spilled,before) ((env)->impl->new_reload(value, spilled, before))
564
565 /**
566 * ISA base class.
567 */
568 struct arch_env_t {
569 const arch_isa_if_t *impl;
570 unsigned n_registers; /**< number of registers */
571 const arch_register_t *registers; /**< register array */
572 unsigned n_register_classes; /**< number of register classes*/
573 const arch_register_class_t *register_classes; /**< register classes */
574 const arch_register_t *sp; /**< The stack pointer register. */
575 const arch_register_t *bp; /**< The base pointer register. */
576 const arch_register_class_t *link_class; /**< The static link pointer
577 register class. */
578 int stack_alignment; /**< power of 2 stack alignment */
579 const be_main_env_t *main_env; /**< the be main environment */
580 int spill_cost; /**< cost for a be_Spill node */
581 int reload_cost; /**< cost for a be_Reload node */
582 bool custom_abi : 1; /**< backend does all abi handling
583 and does not need the generic
584 stuff from beabi.h/.c */
585 };
586
arch_irn_is_ignore(const ir_node * irn)587 static inline bool arch_irn_is_ignore(const ir_node *irn)
588 {
589 const arch_register_req_t *req = arch_get_irn_register_req(irn);
590 return req->type & arch_register_req_type_ignore;
591 }
592
arch_irn_consider_in_reg_alloc(const arch_register_class_t * cls,const ir_node * node)593 static inline bool arch_irn_consider_in_reg_alloc(
594 const arch_register_class_t *cls, const ir_node *node)
595 {
596 const arch_register_req_t *req = arch_get_irn_register_req(node);
597 return
598 req->cls == cls &&
599 !(req->type & arch_register_req_type_ignore);
600 }
601
602 /**
603 * Iterate over all values defined by an instruction.
604 * Only looks at values in a certain register class where the requirements
605 * are not marked as ignore.
606 * Executes @p code for each definition.
607 */
608 #define be_foreach_definition_(node, ccls, value, code) \
609 do { \
610 if (get_irn_mode(node) == mode_T) { \
611 foreach_out_edge(node, edge_) { \
612 ir_node *const value = get_edge_src_irn(edge_); \
613 arch_register_req_t const *const req_ = arch_get_irn_register_req(value); \
614 if (req_->cls != ccls) \
615 continue; \
616 code \
617 } \
618 } else { \
619 arch_register_req_t const *const req_ = arch_get_irn_register_req(node); \
620 ir_node *const value = node; \
621 if (req_->cls == ccls) { \
622 code \
623 } \
624 } \
625 } while (0)
626
627 #define be_foreach_definition(node, ccls, value, code) \
628 be_foreach_definition_(node, ccls, value, \
629 if (req_->type & arch_register_req_type_ignore) \
630 continue; \
631 code \
632 )
633
arch_get_irn_reg_class(const ir_node * node)634 static inline const arch_register_class_t *arch_get_irn_reg_class(
635 const ir_node *node)
636 {
637 const arch_register_req_t *req = arch_get_irn_register_req(node);
638 return req->cls;
639 }
640
641 bool arch_reg_is_allocatable(const arch_register_req_t *req,
642 const arch_register_t *reg);
643
644 #endif
645