1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief       Peephole optimisation framework keeps track of which registers contain which values
23  * @author      Matthias Braun
24  */
25 #include "config.h"
26 
27 #include "array_t.h"
28 #include "bepeephole.h"
29 
30 #include "iredges_t.h"
31 #include "irgwalk.h"
32 #include "irprintf.h"
33 #include "ircons.h"
34 #include "irgmod.h"
35 #include "heights.h"
36 #include "error.h"
37 
38 #include "beirg.h"
39 #include "belive_t.h"
40 #include "bearch.h"
41 #include "beintlive_t.h"
42 #include "benode.h"
43 #include "besched.h"
44 #include "bemodule.h"
45 
46 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
47 
48 static const arch_env_t *arch_env;
49 static be_lv_t          *lv;
50 static ir_node          *current_node;
51 ir_node                **register_values;
52 
clear_reg_value(ir_node * node)53 static void clear_reg_value(ir_node *node)
54 {
55 	const arch_register_t *reg;
56 	unsigned               reg_idx;
57 
58 	if (!mode_is_data(get_irn_mode(node)))
59 		return;
60 
61 	reg = arch_get_irn_register(node);
62 	if (reg == NULL) {
63 		panic("No register assigned at %+F", node);
64 	}
65 	if (reg->type & arch_register_type_virtual)
66 		return;
67 	reg_idx = reg->global_index;
68 
69 	DB((dbg, LEVEL_1, "Clear Register %s\n", reg->name));
70 	register_values[reg_idx] = NULL;
71 }
72 
set_reg_value(ir_node * node)73 static void set_reg_value(ir_node *node)
74 {
75 	const arch_register_t *reg;
76 	unsigned               reg_idx;
77 
78 	if (!mode_is_data(get_irn_mode(node)))
79 		return;
80 
81 	reg = arch_get_irn_register(node);
82 	if (reg == NULL) {
83 		panic("No register assigned at %+F", node);
84 	}
85 	if (reg->type & arch_register_type_virtual)
86 		return;
87 	reg_idx = reg->global_index;
88 
89 	DB((dbg, LEVEL_1, "Set Register %s: %+F\n", reg->name, node));
90 	register_values[reg_idx] = node;
91 }
92 
clear_defs(ir_node * node)93 static void clear_defs(ir_node *node)
94 {
95 	/* clear values defined */
96 	if (get_irn_mode(node) == mode_T) {
97 		foreach_out_edge(node, edge) {
98 			ir_node *proj = get_edge_src_irn(edge);
99 			clear_reg_value(proj);
100 		}
101 	} else {
102 		clear_reg_value(node);
103 	}
104 }
105 
set_uses(ir_node * node)106 static void set_uses(ir_node *node)
107 {
108 	int i, arity;
109 
110 	/* set values used */
111 	arity = get_irn_arity(node);
112 	for (i = 0; i < arity; ++i) {
113 		ir_node *in = get_irn_n(node, i);
114 		set_reg_value(in);
115 	}
116 }
117 
be_peephole_new_node(ir_node * nw)118 void be_peephole_new_node(ir_node * nw)
119 {
120 	be_liveness_introduce(lv, nw);
121 }
122 
123 /**
124  * must be called from peephole optimisations before a node will be killed
125  * and its users will be redirected to new_node.
126  * so bepeephole can update its internal state.
127  *
128  * Note: killing a node and rewiring is only allowed if new_node produces
129  * the same registers as old_node.
130  */
be_peephole_before_exchange(const ir_node * old_node,ir_node * new_node)131 static void be_peephole_before_exchange(const ir_node *old_node,
132                                         ir_node *new_node)
133 {
134 	const arch_register_t *reg;
135 	unsigned               reg_idx;
136 	bool                   old_is_current = false;
137 
138 	DB((dbg, LEVEL_1, "About to exchange and kill %+F with %+F\n", old_node, new_node));
139 
140 	assert(sched_is_scheduled(skip_Proj_const(old_node)));
141 	assert(sched_is_scheduled(skip_Proj(new_node)));
142 
143 	if (current_node == old_node) {
144 		old_is_current = true;
145 
146 		/* next node to be processed will be killed. Its scheduling predecessor
147 		 * must be processed next. */
148 		current_node = sched_next(current_node);
149 		assert (!is_Bad(current_node));
150 
151 		/* we can't handle liveness updates correctly when exchange current node
152 		 * with something behind it */
153 		assert(value_dominates(skip_Proj(new_node), skip_Proj_const(old_node)));
154 	}
155 
156 	if (!mode_is_data(get_irn_mode(old_node)))
157 		return;
158 
159 	reg = arch_get_irn_register(old_node);
160 	if (reg == NULL) {
161 		panic("No register assigned at %+F", old_node);
162 	}
163 	assert(reg == arch_get_irn_register(new_node) &&
164 	      "KILLING a node and replacing by different register is not allowed");
165 
166 	reg_idx = reg->global_index;
167 	if (register_values[reg_idx] == old_node || old_is_current) {
168 		register_values[reg_idx] = new_node;
169 	}
170 
171 	be_liveness_remove(lv, old_node);
172 }
173 
be_peephole_exchange(ir_node * old,ir_node * nw)174 void be_peephole_exchange(ir_node *old, ir_node *nw)
175 {
176 	be_peephole_before_exchange(old, nw);
177 	sched_remove(old);
178 	exchange(old, nw);
179 	be_peephole_new_node(nw);
180 }
181 
182 /**
183  * block-walker: run peephole optimization on the given block.
184  */
process_block(ir_node * block,void * data)185 static void process_block(ir_node *block, void *data)
186 {
187 	(void) data;
188 
189 	/* construct initial register assignment */
190 	memset(register_values, 0, sizeof(ir_node*) * arch_env->n_registers);
191 
192 	assert(lv->sets_valid && "live sets must be computed");
193 	DB((dbg, LEVEL_1, "\nProcessing block %+F (from end)\n", block));
194 	be_lv_foreach(lv, block, be_lv_state_end, node) {
195 		set_reg_value(node);
196 	}
197 	DB((dbg, LEVEL_1, "\nstart processing\n"));
198 
199 	/* walk the block from last insn to the first */
200 	current_node = sched_last(block);
201 	for ( ; !sched_is_begin(current_node);
202 			current_node = sched_prev(current_node)) {
203 		ir_op             *op;
204 		peephole_opt_func  peephole_node;
205 
206 		assert(!is_Bad(current_node));
207 		if (is_Phi(current_node))
208 			break;
209 
210 		clear_defs(current_node);
211 		set_uses(current_node);
212 
213 		op            = get_irn_op(current_node);
214 		peephole_node = (peephole_opt_func)op->ops.generic;
215 		if (peephole_node == NULL)
216 			continue;
217 
218 		DB((dbg, LEVEL_2, "optimize %+F\n", current_node));
219 		peephole_node(current_node);
220 		assert(!is_Bad(current_node));
221 	}
222 }
223 
224 /**
225  * Check whether the node has only one user.  Explicitly ignore the anchor.
226  */
be_has_only_one_user(ir_node * node)227 bool be_has_only_one_user(ir_node *node)
228 {
229 	int n = get_irn_n_edges(node);
230 	int n_users;
231 
232 	if (n <= 1)
233 		return 1;
234 
235 	n_users = 0;
236 	foreach_out_edge(node, edge) {
237 		ir_node *src = get_edge_src_irn(edge);
238 		/* ignore anchor and keep-alive edges */
239 		if (is_Anchor(src) || is_End(src))
240 			continue;
241 		n_users++;
242 	}
243 
244 	return n_users == 1;
245 }
246 
overlapping_regs(const arch_register_t * reg0,const arch_register_req_t * req0,const arch_register_t * reg1,const arch_register_req_t * req1)247 static inline bool overlapping_regs(const arch_register_t *reg0,
248 	const arch_register_req_t *req0, const arch_register_t *reg1,
249 	const arch_register_req_t *req1)
250 {
251 	if (reg0 == NULL || reg1 == NULL)
252 		return false;
253 	return reg0->global_index < (unsigned)reg1->global_index + req1->width
254 		&& reg1->global_index < (unsigned)reg0->global_index + req0->width;
255 }
256 
be_can_move_down(ir_heights_t * heights,const ir_node * node,const ir_node * before)257 bool be_can_move_down(ir_heights_t *heights, const ir_node *node,
258                       const ir_node *before)
259 {
260 	assert(get_nodes_block(node) == get_nodes_block(before));
261 	assert(sched_get_time_step(node) < sched_get_time_step(before));
262 
263 	int      node_arity = get_irn_arity(node);
264 	ir_node *schedpoint = sched_next(node);
265 
266 	while (schedpoint != before) {
267 		/* schedpoint must not use our computed values */
268 		if (heights_reachable_in_block(heights, schedpoint, node))
269 			return false;
270 
271 		/* schedpoint must not overwrite registers of our inputs */
272 		unsigned n_outs = arch_get_irn_n_outs(schedpoint);
273 		for (int i = 0; i < node_arity; ++i) {
274 			ir_node                   *in  = get_irn_n(node, i);
275 			const arch_register_t     *reg = arch_get_irn_register(in);
276 			if (reg == NULL)
277 				continue;
278 			const arch_register_req_t *in_req
279 				= arch_get_irn_register_req_in(node, i);
280 			for (unsigned o = 0; o < n_outs; ++o) {
281 				const arch_register_t *outreg
282 					= arch_get_irn_register_out(schedpoint, o);
283 				const arch_register_req_t *outreq
284 					= arch_get_irn_register_req_out(schedpoint, o);
285 				if (overlapping_regs(reg, in_req, outreg, outreq))
286 					return false;
287 			}
288 		}
289 
290 		schedpoint = sched_next(schedpoint);
291 	}
292 	return true;
293 }
294 
be_can_move_up(ir_heights_t * heights,const ir_node * node,const ir_node * after)295 bool be_can_move_up(ir_heights_t *heights, const ir_node *node,
296                     const ir_node *after)
297 {
298 	unsigned       n_outs      = arch_get_irn_n_outs(node);
299 	const ir_node *node_block  = get_nodes_block(node);
300 	const ir_node *after_block = get_block_const(after);
301 	const ir_node *schedpoint;
302 	if (node_block != after_block) {
303 		/* currently we can move up exactly 1 block */
304 		assert(get_Block_cfgpred_block(node_block, 0) == after_block);
305 		ir_node *first = sched_first(node_block);
306 
307 		/* do not move nodes changing memory */
308 		if (is_memop(node)) {
309 			ir_node *meminput = get_memop_mem(node);
310 			if (!is_NoMem(meminput))
311 				return false;
312 		}
313 
314 		/* make sure we can move to the beginning of the succ block */
315 		if (node != first && !be_can_move_up(heights, node, sched_prev(first)))
316 			return false;
317 
318 		/* check if node overrides any of live-in values of other successors */
319 		ir_graph *irg = get_irn_irg(node);
320 		be_lv_t  *lv  = be_get_irg_liveness(irg);
321 		foreach_block_succ(after_block, edge) {
322 			ir_node *succ = get_edge_src_irn(edge);
323 			if (succ == node_block)
324 				continue;
325 
326 			be_lv_foreach(lv, succ, be_lv_state_in, live_node) {
327 				const arch_register_t     *reg = arch_get_irn_register(live_node);
328 				const arch_register_req_t *req = arch_get_irn_register_req(live_node);
329 				for (unsigned o = 0; o < n_outs; ++o) {
330 					const arch_register_t *outreg
331 						= arch_get_irn_register_out(node, o);
332 					const arch_register_req_t *outreq
333 						= arch_get_irn_register_req_out(node, o);
334 					if (overlapping_regs(outreg, outreq, reg, req))
335 						return false;
336 				}
337 			}
338 			sched_foreach(succ, phi) {
339 				if (!is_Phi(phi))
340 					break;
341 				const arch_register_t     *reg = arch_get_irn_register(phi);
342 				const arch_register_req_t *req = arch_get_irn_register_req(phi);
343 				for (unsigned o = 0; o < n_outs; ++o) {
344 					const arch_register_t *outreg
345 						= arch_get_irn_register_out(node, o);
346 					const arch_register_req_t *outreq
347 						= arch_get_irn_register_req_out(node, o);
348 					if (overlapping_regs(outreg, outreq, reg, req))
349 						return false;
350 				}
351 			}
352 		}
353 		schedpoint = sched_last(after_block);
354 	} else {
355 		schedpoint = sched_prev(node);
356 	}
357 
358 	/* move schedule upwards until we hit the "after" node */
359 	while (schedpoint != after) {
360 		/* TODO: the following heights query only works for nodes in the same
361 		 * block, otherwise we have to be conservative here */
362 		if (get_nodes_block(node) != get_nodes_block(schedpoint))
363 			return false;
364 		/* node must not depend on schedpoint */
365 		if (heights_reachable_in_block(heights, node, schedpoint))
366 			return false;
367 
368 		/* node must not overwrite registers used by schedpoint */
369 		int arity = get_irn_arity(schedpoint);
370 		for (int i = 0; i < arity; ++i) {
371 			const arch_register_t *reg
372 				= arch_get_irn_register_in(schedpoint, i);
373 			if (reg == NULL)
374 				continue;
375 			const arch_register_req_t *in_req
376 				= arch_get_irn_register_req_in(schedpoint, i);
377 			for (unsigned o = 0; o < n_outs; ++o) {
378 				const arch_register_t *outreg
379 					= arch_get_irn_register_out(node, o);
380 				const arch_register_req_t *outreq
381 					= arch_get_irn_register_req_out(node, o);
382 				if (overlapping_regs(outreg, outreq, reg, in_req))
383 					return false;
384 			}
385 		}
386 
387 		schedpoint = sched_prev(schedpoint);
388 	}
389 	return true;
390 }
391 
392 /*
393  * Tries to optimize a beIncSP node with its previous IncSP node.
394  * Must be run from a be_peephole_opt() context.
395  */
be_peephole_IncSP_IncSP(ir_node * node)396 ir_node *be_peephole_IncSP_IncSP(ir_node *node)
397 {
398 	int      pred_offs;
399 	int      curr_offs;
400 	int      offs;
401 	ir_node *pred = be_get_IncSP_pred(node);
402 
403 	if (!be_is_IncSP(pred))
404 		return node;
405 
406 	if (!be_has_only_one_user(pred))
407 		return node;
408 
409 	pred_offs = be_get_IncSP_offset(pred);
410 	curr_offs = be_get_IncSP_offset(node);
411 	offs = curr_offs + pred_offs;
412 
413 	/* add node offset to pred and remove our IncSP */
414 	be_set_IncSP_offset(pred, offs);
415 
416 	be_peephole_exchange(node, pred);
417 	return pred;
418 }
419 
be_peephole_opt(ir_graph * irg)420 void be_peephole_opt(ir_graph *irg)
421 {
422 #if 0
423 	/* we sometimes find BadE nodes in float apps like optest_float.c or
424 	 * kahansum.c for example... */
425 	be_invalidate_live_sets(irg);
426 #endif
427 	be_assure_live_sets(irg);
428 
429 	arch_env = be_get_irg_arch_env(irg);
430 	lv       = be_get_irg_liveness(irg);
431 
432 	register_values = XMALLOCN(ir_node*, arch_env->n_registers);
433 
434 	irg_block_walk_graph(irg, process_block, NULL, NULL);
435 
436 	xfree(register_values);
437 }
438 
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole)439 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_peephole)
440 void be_init_peephole(void)
441 {
442 	FIRM_DBG_REGISTER(dbg, "firm.be.peephole");
443 }
444