1 /*
2  * Copyright (C) 1995-2008 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief       First simple copy minimization heuristics.
23  * @author      Daniel Grund
24  * @date        12.04.2005
25  *
26  * Heuristic for minimizing copies using a queue which holds 'qnodes' not yet
27  * examined. A qnode has a 'target color', nodes out of the opt unit and
28  * a 'conflict graph'. 'Conflict graph' = "Interference graph' + 'conflict edges'
29  * A 'max indep set' is determined from these. We try to color this mis using a
30  * color-exchanging mechanism. Occuring conflicts are modeled with 'conflict edges'
31  * and the qnode is reinserted in the queue. The first qnode colored without
32  * conflicts is the best one.
33  */
34 #include "config.h"
35 
36 #include "debug.h"
37 #include "bitset.h"
38 #include "raw_bitset.h"
39 #include "xmalloc.h"
40 
41 #include "becopyopt_t.h"
42 #include "becopystat.h"
43 #include "beintlive_t.h"
44 #include "beirg.h"
45 #include "bemodule.h"
46 
47 DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
48 
49 /** Defines an invalid register index. */
50 #define NO_COLOR (-1)
51 
52 #define SEARCH_FREE_COLORS
53 
54 #define SLOTS_PINNED_GLOBAL 64
55 #define SLOTS_CONFLICTS 8
56 #define SLOTS_CHANGED_NODES 32
57 
58 #define list_entry_queue(lh) list_entry(lh, qnode_t, queue)
59 #define HASH_CONFLICT(c) (hash_irn(c.n1) ^ hash_irn(c.n2))
60 
61 /**
62  * Modeling additional conflicts between nodes. NOT live range interference
63  */
64 typedef struct conflict_t {
65 	const ir_node *n1, *n2;
66 } conflict_t;
67 
68 /**
69  * If an irn is changed, the changes first get stored in a node_stat_t,
70  * to allow undo of changes (=drop new data) in case of conflicts.
71  */
72 typedef struct node_stat_t {
73 	ir_node *irn;
74 	int     new_color;
75 	int     pinned_local :1;
76 } node_stat_t;
77 
78 /**
79  * Represents a node in the optimization queue.
80  */
81 typedef struct qnode_t {
82 	struct list_head queue;            /**< chaining of unit_t->queue */
83 	const unit_t     *ou;              /**< the opt unit this node belongs to */
84 	int              color;            /**< target color */
85 	set              *conflicts;       /**< contains conflict_t's. All internal conflicts */
86 	int              mis_costs;        /**< costs of nodes/copies in the mis. */
87 	int              mis_size;         /**< size of the array below */
88 	ir_node          **mis;            /**< the nodes of unit_t->nodes[] being part of the max independent set */
89 	set              *changed_nodes;   /**< contains node_stat_t's. */
90 } qnode_t;
91 
92 static pset *pinned_global;  /**< optimized nodes should not be altered any more */
93 
nodes_interfere(const be_chordal_env_t * env,const ir_node * a,const ir_node * b)94 static inline int nodes_interfere(const be_chordal_env_t *env, const ir_node *a, const ir_node *b)
95 {
96 	if (env->ifg)
97 		return be_ifg_connected(env->ifg, a, b);
98 	else {
99 		be_lv_t *lv = be_get_irg_liveness(env->irg);
100 		return be_values_interfere(lv, a, b);
101 	}
102 }
103 
set_cmp_conflict_t(const void * x,const void * y,size_t size)104 static int set_cmp_conflict_t(const void *x, const void *y, size_t size)
105 {
106 	const conflict_t *xx = (const conflict_t*)x;
107 	const conflict_t *yy = (const conflict_t*)y;
108 	(void) size;
109 
110 	return xx->n1 != yy->n1 || xx->n2 != yy->n2;
111 }
112 
113 /**
114  * If a local pinned conflict occurs, a new edge in the conflict graph is added.
115  * The next maximum independent set build, will regard it.
116  */
qnode_add_conflict(const qnode_t * qn,const ir_node * n1,const ir_node * n2)117 static inline void qnode_add_conflict(const qnode_t *qn, const ir_node *n1, const ir_node *n2)
118 {
119 	conflict_t c;
120 	DBG((dbg, LEVEL_4, "\t      %+F -- %+F\n", n1, n2));
121 
122 	if (get_irn_idx(n1) < get_irn_idx(n2)) {
123 		c.n1 = n1;
124 		c.n2 = n2;
125 	} else {
126 		c.n1 = n2;
127 		c.n2 = n1;
128 	}
129 	(void)set_insert(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c));
130 }
131 
132 /**
133  * Checks if two nodes are in a conflict.
134  */
qnode_are_conflicting(const qnode_t * qn,const ir_node * n1,const ir_node * n2)135 static inline int qnode_are_conflicting(const qnode_t *qn, const ir_node *n1, const ir_node *n2)
136 {
137 	conflict_t c;
138 	/* search for live range interference */
139 	if (n1!=n2 && nodes_interfere(qn->ou->co->cenv, n1, n2))
140 		return 1;
141 	/* search for recoloring conflicts */
142 	if (get_irn_idx(n1) < get_irn_idx(n2)) {
143 		c.n1 = n1;
144 		c.n2 = n2;
145 	} else {
146 		c.n1 = n2;
147 		c.n2 = n1;
148 	}
149 	return set_find(conflict_t, qn->conflicts, &c, sizeof(c), HASH_CONFLICT(c)) != 0;
150 }
151 
set_cmp_node_stat_t(const void * x,const void * y,size_t size)152 static int set_cmp_node_stat_t(const void *x, const void *y, size_t size)
153 {
154 	(void) size;
155 	return ((const node_stat_t*)x)->irn != ((const node_stat_t*)y)->irn;
156 }
157 
158 /**
159  * Finds a node status entry of a node if existent. Otherwise return NULL
160  */
qnode_find_node(const qnode_t * qn,ir_node * irn)161 static inline const node_stat_t *qnode_find_node(const qnode_t *qn, ir_node *irn)
162 {
163 	node_stat_t find;
164 	find.irn = irn;
165 	return set_find(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
166 }
167 
168 /**
169  * Finds a node status entry of a node if existent. Otherwise it will return
170  * an initialized new entry for this node.
171  */
qnode_find_or_insert_node(const qnode_t * qn,ir_node * irn)172 static inline node_stat_t *qnode_find_or_insert_node(const qnode_t *qn, ir_node *irn)
173 {
174 	node_stat_t find;
175 	find.irn = irn;
176 	find.new_color = NO_COLOR;
177 	find.pinned_local = 0;
178 	return set_insert(node_stat_t, qn->changed_nodes, &find, sizeof(find), hash_irn(irn));
179 }
180 
181 /**
182  * Returns the virtual color of a node if set before, else returns the real color.
183  */
qnode_get_new_color(const qnode_t * qn,ir_node * irn)184 static inline int qnode_get_new_color(const qnode_t *qn, ir_node *irn)
185 {
186 	const node_stat_t *found = qnode_find_node(qn, irn);
187 	if (found)
188 		return found->new_color;
189 	else
190 		return get_irn_col(irn);
191 }
192 
193 /**
194  * Sets the virtual color of a node.
195  */
qnode_set_new_color(const qnode_t * qn,ir_node * irn,int color)196 static inline void qnode_set_new_color(const qnode_t *qn, ir_node *irn, int color)
197 {
198 	node_stat_t *found = qnode_find_or_insert_node(qn, irn);
199 	found->new_color = color;
200 	DBG((dbg, LEVEL_3, "\t      col(%+F) := %d\n", irn, color));
201 }
202 
203 /**
204  * Checks if a node is local pinned. A node is local pinned, iff it belongs
205  * to the same optimization unit and has been optimized before the current
206  * processed node.
207  */
qnode_is_pinned_local(const qnode_t * qn,ir_node * irn)208 static inline int qnode_is_pinned_local(const qnode_t *qn, ir_node *irn)
209 {
210 	const node_stat_t *found = qnode_find_node(qn, irn);
211 	if (found)
212 		return found->pinned_local;
213 	else
214 		return 0;
215 }
216 
217 /**
218  * Local-pins a node, so optimizations of further nodes of the same opt unit
219  * can handle situations in which a color change would undo prior optimizations.
220  */
qnode_pin_local(const qnode_t * qn,ir_node * irn)221 static inline void qnode_pin_local(const qnode_t *qn, ir_node *irn)
222 {
223 	node_stat_t *found = qnode_find_or_insert_node(qn, irn);
224 	found->pinned_local = 1;
225 	if (found->new_color == NO_COLOR)
226 		found->new_color = get_irn_col(irn);
227 }
228 
229 
230 /**
231  * Possible return values of qnode_color_irn()
232  */
233 #define CHANGE_SAVE NULL
234 #define CHANGE_IMPOSSIBLE (ir_node *)1
235 
236 /**
237  * Performs virtual re-coloring of node @p n to color @p col. Virtual colors of
238  * other nodes are changed too, as required to preserve correctness. Function is
239  * aware of local and global pinning. Recursive.
240  *
241  * If irn == trigger the color @p col must be used. (the first recoloring)
242  * If irn != trigger an arbitrary free color may be used. If no color is free, @p col is used.
243  *
244  * @param  irn     The node to set the color for
245  * @param  col     The color to set
246  * @param  trigger The irn that caused the wish to change the color of the irn
247  *                 External callers must call with trigger = irn
248  *
249  * @return CHANGE_SAVE iff setting the color is possible, with all transitive effects.
250  *         CHANGE_IMPOSSIBLE iff conflicts with reg-constraintsis occured.
251  *         Else the first conflicting ir_node encountered is returned.
252  *
253  */
qnode_color_irn(const qnode_t * qn,ir_node * irn,int col,const ir_node * trigger)254 static ir_node *qnode_color_irn(const qnode_t *qn, ir_node *irn, int col, const ir_node *trigger)
255 {
256 	copy_opt_t *co = qn->ou->co;
257 	const be_chordal_env_t *chordal_env = co->cenv;
258 	const arch_register_class_t *cls = co->cls;
259 	int irn_col = qnode_get_new_color(qn, irn);
260 	ir_node *sub_res, *curr;
261 	be_ifg_t *ifg = chordal_env->ifg;
262 	neighbours_iter_t iter;
263 	const arch_register_req_t *req;
264 
265 	DBG((dbg, LEVEL_3, "\t    %+F \tcaused col(%+F) \t%2d --> %2d\n", trigger, irn, irn_col, col));
266 
267 	/* If the target color is already set do nothing */
268 	if (irn_col == col) {
269 		DBG((dbg, LEVEL_3, "\t      %+F same color\n", irn));
270 		return CHANGE_SAVE;
271 	}
272 
273 	/* If the irn is pinned, changing color is impossible */
274 	if (pset_find_ptr(pinned_global, irn) || qnode_is_pinned_local(qn, irn)) {
275 		DBG((dbg, LEVEL_3, "\t      %+F conflicting\n", irn));
276 		return irn;
277 	}
278 
279 	req = arch_get_irn_register_req(irn);
280 #ifdef SEARCH_FREE_COLORS
281 	/* If we resolve conflicts (recursive calls) we can use any unused color.
282 	 * In case of the first call @p col must be used.
283 	 */
284 	if (irn != trigger) {
285 		bitset_t *free_cols = bitset_alloca(cls->n_regs);
286 		ir_node *curr;
287 		int free_col;
288 
289 		/* Get all possible colors */
290 		bitset_copy(free_cols, co->cenv->allocatable_regs);
291 
292 		/* Exclude colors not assignable to the irn */
293 		if (arch_register_req_is(req, limited)) {
294 			bitset_t *limited = bitset_alloca(cls->n_regs);
295 			rbitset_copy_to_bitset(req->limited, limited);
296 			bitset_and(free_cols, limited);
297 		}
298 
299 		/* Exclude the color of the irn, because it must _change_ its color */
300 		bitset_clear(free_cols, irn_col);
301 
302 		/* Exclude all colors used by adjacent nodes */
303 		be_ifg_foreach_neighbour(ifg, &iter, irn, curr)
304 			bitset_clear(free_cols, qnode_get_new_color(qn, curr));
305 
306 		free_col = bitset_next_set(free_cols, 0);
307 
308 		if (free_col != -1) {
309 			qnode_set_new_color(qn, irn, free_col);
310 			return CHANGE_SAVE;
311 		}
312 	}
313 #endif /* SEARCH_FREE_COLORS */
314 
315 	/* If target color is not allocatable changing color is impossible */
316 	if (!arch_reg_is_allocatable(req, arch_register_for_index(cls, col))) {
317 		DBG((dbg, LEVEL_3, "\t      %+F impossible\n", irn));
318 		return CHANGE_IMPOSSIBLE;
319 	}
320 
321 	/*
322 	 * If we arrive here changing color may be possible, but there may be conflicts.
323 	 * Try to color all conflicting nodes 'curr' with the color of the irn itself.
324 	 */
325 	be_ifg_foreach_neighbour(ifg, &iter, irn, curr) {
326 		DBG((dbg, LEVEL_3, "\t      Confl %+F(%d)\n", curr, qnode_get_new_color(qn, curr)));
327 		if (qnode_get_new_color(qn, curr) == col && curr != trigger) {
328 			sub_res = qnode_color_irn(qn, curr, irn_col, irn);
329 			if (sub_res != CHANGE_SAVE) {
330 				be_ifg_neighbours_break(&iter);
331 				return sub_res;
332 			}
333 		}
334 	}
335 
336 	/*
337 	 * If we arrive here, all conflicts were resolved.
338 	 * So it is save to change this irn
339 	 */
340 	qnode_set_new_color(qn, irn, col);
341 	return CHANGE_SAVE;
342 }
343 
344 
345 /**
346  * Tries to set the colors for all members of this queue node;
347  * to the target color qn->color
348  * @returns 1 iff all members colors could be set
349  *          0 else
350  */
qnode_try_color(const qnode_t * qn)351 static int qnode_try_color(const qnode_t *qn)
352 {
353 	int i;
354 	for (i=0; i<qn->mis_size; ++i) {
355 		ir_node *test_node, *confl_node;
356 
357 		test_node = qn->mis[i];
358 		DBG((dbg, LEVEL_3, "\t    Testing %+F\n", test_node));
359 		confl_node = qnode_color_irn(qn, test_node, qn->color, test_node);
360 
361 		if (confl_node == CHANGE_SAVE) {
362 			DBG((dbg, LEVEL_3, "\t    Save --> pin local\n"));
363 			qnode_pin_local(qn, test_node);
364 		} else if (confl_node == CHANGE_IMPOSSIBLE) {
365 			DBG((dbg, LEVEL_3, "\t    Impossible --> remove from qnode\n"));
366 			qnode_add_conflict(qn, test_node, test_node);
367 			return 0;
368 		} else {
369 			if (qnode_is_pinned_local(qn, confl_node)) {
370 				/* changing test_node would change back a node of current ou */
371 				if (confl_node == qn->ou->nodes[0]) {
372 					/* Adding a conflict edge between testnode and conflnode
373 					 * would introduce a root -- arg interference.
374 					 * So remove the arg of the qn */
375 					DBG((dbg, LEVEL_3, "\t    Conflicting local with phi --> remove from qnode\n"));
376 					qnode_add_conflict(qn, test_node, test_node);
377 				} else {
378 					DBG((dbg, LEVEL_3, "\t    Conflicting local --> add conflict\n"));
379 					qnode_add_conflict(qn, confl_node, test_node);
380 				}
381 			}
382 			if (pset_find_ptr(pinned_global, confl_node)) {
383 				/* changing test_node would change back a node of a prior ou */
384 				DBG((dbg, LEVEL_3, "\t    Conflicting global --> remove from qnode\n"));
385 				qnode_add_conflict(qn, test_node, test_node);
386 			}
387 			return 0;
388 		}
389 	}
390 	return 1;
391 }
392 
393 /**
394  * Determines a maximum weighted independent set with respect to
395  * the interference and conflict edges of all nodes in a qnode.
396  */
qnode_max_ind_set(qnode_t * qn,const unit_t * ou)397 static inline void qnode_max_ind_set(qnode_t *qn, const unit_t *ou)
398 {
399 	ir_node **safe, **unsafe;
400 	int i, o, safe_count, safe_costs, unsafe_count, *unsafe_costs;
401 	bitset_t *curr, *best;
402 	int next, curr_weight, best_weight = 0;
403 
404 	/* assign the nodes into two groups.
405 	 * safe: node has no interference, hence it is in every max stable set.
406 	 * unsafe: node has an interference
407 	 */
408 	safe         = ALLOCAN(ir_node*, ou->node_count - 1);
409 	safe_costs   = 0;
410 	safe_count   = 0;
411 	unsafe       = ALLOCAN(ir_node*, ou->node_count - 1);
412 	unsafe_costs = ALLOCAN(int,      ou->node_count - 1);
413 	unsafe_count = 0;
414 	for (i=1; i<ou->node_count; ++i) {
415 		int is_safe = 1;
416 		for (o=1; o<ou->node_count; ++o) {
417 			if (qnode_are_conflicting(qn, ou->nodes[i], ou->nodes[o])) {
418 				if (i!=o) {
419 					unsafe_costs[unsafe_count] = ou->costs[i];
420 					unsafe[unsafe_count] = ou->nodes[i];
421 					++unsafe_count;
422 				}
423 				is_safe = 0;
424 				break;
425 			}
426 		}
427 		if (is_safe) {
428 			safe_costs += ou->costs[i];
429 			safe[safe_count++] = ou->nodes[i];
430 		}
431 	}
432 
433 
434 
435 	/* now compute the best set out of the unsafe nodes*/
436 	best = bitset_alloca(unsafe_count);
437 
438 	if (unsafe_count > MIS_HEUR_TRIGGER) {
439 		/* Heuristic: Greedy trial and error form index 0 to unsafe_count-1 */
440 		for (i=0; i<unsafe_count; ++i) {
441 			bitset_set(best, i);
442 			/* check if it is a stable set */
443 			for (o=bitset_next_set(best, 0); o!=-1 && o<=i; o=bitset_next_set(best, o+1))
444 				if (qnode_are_conflicting(qn, unsafe[i], unsafe[o])) {
445 					bitset_clear(best, i); /* clear the bit and try next one */
446 					break;
447 				}
448 		}
449 		/* compute the weight */
450 		bitset_foreach(best, pos)
451 			best_weight += unsafe_costs[pos];
452 	} else {
453 		/* Exact Algorithm: Brute force */
454 		curr = bitset_alloca(unsafe_count);
455 		bitset_set_all(curr);
456 		while (!bitset_is_empty(curr)) {
457 			/* check if curr is a stable set */
458 			for (i=bitset_next_set(curr, 0); i!=-1; i=bitset_next_set(curr, i+1))
459 				for (o=bitset_next_set(curr, i); o!=-1; o=bitset_next_set(curr, o+1)) /* !!!!! difference to ou_max_ind_set_costs(): NOT (curr, i+1) */
460 						if (qnode_are_conflicting(qn, unsafe[i], unsafe[o]))
461 							goto no_stable_set;
462 
463 			/* if we arrive here, we have a stable set */
464 			/* compute the weight of the stable set*/
465 			curr_weight = 0;
466 			bitset_foreach(curr, pos)
467 				curr_weight += unsafe_costs[pos];
468 
469 			/* any better ? */
470 			if (curr_weight > best_weight) {
471 				best_weight = curr_weight;
472 				bitset_copy(best, curr);
473 			}
474 
475 no_stable_set:
476 			bitset_minus1(curr);
477 		}
478 	}
479 
480 	/* transfer the best set into the qn */
481 	qn->mis_size = 1+safe_count+bitset_popcount(best);
482 	qn->mis_costs = safe_costs+best_weight;
483 	qn->mis[0] = ou->nodes[0]; /* the root is always in a max stable set */
484 	next = 1;
485 	for (i=0; i<safe_count; ++i)
486 		qn->mis[next++] = safe[i];
487 	bitset_foreach(best, pos)
488 		qn->mis[next++] = unsafe[pos];
489 }
490 
491 /**
492  * Creates a new qnode
493  */
new_qnode(const unit_t * ou,int color)494 static inline qnode_t *new_qnode(const unit_t *ou, int color)
495 {
496 	qnode_t *qn = XMALLOC(qnode_t);
497 	qn->ou            = ou;
498 	qn->color         = color;
499 	qn->mis           = XMALLOCN(ir_node*, ou->node_count);
500 	qn->conflicts     = new_set(set_cmp_conflict_t, SLOTS_CONFLICTS);
501 	qn->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
502 	return qn;
503 }
504 
505 /**
506  * Frees space used by a queue node
507  */
free_qnode(qnode_t * qn)508 static inline void free_qnode(qnode_t *qn)
509 {
510 	del_set(qn->conflicts);
511 	del_set(qn->changed_nodes);
512 	xfree(qn->mis);
513 	xfree(qn);
514 }
515 
516 /**
517  * Inserts a qnode in the sorted queue of the optimization unit. Queue is
518  * ordered by field 'size' (the size of the mis) in decreasing order.
519  */
ou_insert_qnode(unit_t * ou,qnode_t * qn)520 static inline void ou_insert_qnode(unit_t *ou, qnode_t *qn)
521 {
522 	struct list_head *lh;
523 
524 	if (qnode_are_conflicting(qn, ou->nodes[0], ou->nodes[0])) {
525 		/* root node is not in qnode */
526 		free_qnode(qn);
527 		return;
528 	}
529 
530 	qnode_max_ind_set(qn, ou);
531 	/* do the insertion */
532 	DBG((dbg, LEVEL_4, "\t  Insert qnode color %d with cost %d\n", qn->color, qn->mis_costs));
533 	lh = &ou->queue;
534 	while (lh->next != &ou->queue) {
535 		qnode_t *curr = list_entry_queue(lh->next);
536 		if (curr->mis_costs <= qn->mis_costs)
537 			break;
538 		lh = lh->next;
539 	}
540 	list_add(&qn->queue, lh);
541 }
542 
543 /**
544  * Tries to re-allocate colors of nodes in this opt unit, to achieve lower
545  * costs of copy instructions placed during SSA-destruction and lowering.
546  * Works only for opt units with exactly 1 root node, which is the
547  * case for approximately 80% of all phi classes and 100% of register constrained
548  * nodes. (All other phi classes are reduced to this case.)
549  */
ou_optimize(unit_t * ou)550 static void ou_optimize(unit_t *ou)
551 {
552 	qnode_t                   *curr = NULL;
553 	const arch_register_req_t *req;
554 	bitset_t const*            allocatable_regs;
555 	unsigned                   n_regs;
556 	unsigned                   idx;
557 	int                        i;
558 
559 	DBG((dbg, LEVEL_1, "\tOptimizing unit:\n"));
560 	for (i=0; i<ou->node_count; ++i)
561 		DBG((dbg, LEVEL_1, "\t %+F\n", ou->nodes[i]));
562 
563 	/* init queue */
564 	INIT_LIST_HEAD(&ou->queue);
565 
566 	req              = arch_get_irn_register_req(ou->nodes[0]);
567 	allocatable_regs = ou->co->cenv->allocatable_regs;
568 	n_regs           = req->cls->n_regs;
569 	if (arch_register_req_is(req, limited)) {
570 		unsigned const* limited = req->limited;
571 
572 		for (idx = 0; idx != n_regs; ++idx) {
573 			if (!bitset_is_set(allocatable_regs, idx))
574 				continue;
575 			if (!rbitset_is_set(limited, idx))
576 				continue;
577 
578 			ou_insert_qnode(ou, new_qnode(ou, idx));
579 		}
580 	} else {
581 		for (idx = 0; idx != n_regs; ++idx) {
582 			if (!bitset_is_set(allocatable_regs, idx))
583 				continue;
584 
585 			ou_insert_qnode(ou, new_qnode(ou, idx));
586 		}
587 	}
588 
589 	/* search best */
590 	for (;;) {
591 		assert(!list_empty(&ou->queue));
592 		/* get head of queue */
593 		curr = list_entry_queue(ou->queue.next);
594 		list_del(&curr->queue);
595 		DBG((dbg, LEVEL_2, "\t  Examine qnode color %d with cost %d\n", curr->color, curr->mis_costs));
596 
597 		/* try */
598 		if (qnode_try_color(curr))
599 			break;
600 
601 		/* no success, so re-insert */
602 		del_set(curr->changed_nodes);
603 		curr->changed_nodes = new_set(set_cmp_node_stat_t, SLOTS_CHANGED_NODES);
604 		ou_insert_qnode(ou, curr);
605 	}
606 
607 	/* apply the best found qnode */
608 	if (curr->mis_size >= 2) {
609 		int root_col = qnode_get_new_color(curr, ou->nodes[0]);
610 		DBG((dbg, LEVEL_1, "\t  Best color: %d  Costs: %d << %d << %d\n", curr->color, ou->min_nodes_costs, ou->all_nodes_costs - curr->mis_costs, ou->all_nodes_costs));
611 		/* globally pin root and all args which have the same color */
612 		pset_insert_ptr(pinned_global, ou->nodes[0]);
613 		for (i=1; i<ou->node_count; ++i) {
614 			ir_node *irn = ou->nodes[i];
615 			int nc = qnode_get_new_color(curr, irn);
616 			if (nc != NO_COLOR && nc == root_col)
617 				pset_insert_ptr(pinned_global, irn);
618 		}
619 
620 		/* set color of all changed nodes */
621 		foreach_set(curr->changed_nodes, node_stat_t, ns) {
622 			/* NO_COLOR is possible, if we had an undo */
623 			if (ns->new_color != NO_COLOR) {
624 				DBG((dbg, LEVEL_1, "\t    color(%+F) := %d\n", ns->irn, ns->new_color));
625 				set_irn_col(ou->co->cls, ns->irn, ns->new_color);
626 			}
627 		}
628 	}
629 
630 	/* free best qnode (curr) and queue */
631 	free_qnode(curr);
632 	list_for_each_entry_safe(qnode_t, curr, tmp, &ou->queue, queue)
633 		free_qnode(curr);
634 }
635 
636 /**
637  * Solves the problem using a heuristic approach
638  * Uses the OU data structure
639  */
co_solve_heuristic(copy_opt_t * co)640 int co_solve_heuristic(copy_opt_t *co)
641 {
642 	ASSERT_OU_AVAIL(co);
643 
644 	pinned_global = pset_new_ptr(SLOTS_PINNED_GLOBAL);
645 	list_for_each_entry(unit_t, curr, &co->units, units)
646 		if (curr->node_count > 1)
647 			ou_optimize(curr);
648 
649 	del_pset(pinned_global);
650 	return 0;
651 }
652 
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur)653 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_copyheur)
654 void be_init_copyheur(void)
655 {
656 	static co_algo_info copyheur = {
657 		co_solve_heuristic, 0
658 	};
659 
660 	be_register_copyopt("heur1", &copyheur);
661 	FIRM_DBG_REGISTER(dbg, "ir.be.copyoptheur");
662 }
663