1 /*
2  * Copyright (C) 1995-2011 University of Karlsruhe.  All right reserved.
3  *
4  * This file is part of libFirm.
5  *
6  * This file may be distributed and/or modified under the terms of the
7  * GNU General Public License version 2 as published by the Free Software
8  * Foundation and appearing in the file LICENSE.GPL included in the
9  * packaging of this file.
10  *
11  * Licensees holding valid libFirm Professional Edition licenses may use
12  * this file in accordance with the libFirm Commercial License.
13  * Agreement provided with the Software.
14  *
15  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
16  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17  * PURPOSE.
18  */
19 
20 /**
21  * @file
22  * @brief       Implements a trace scheduler as presented in Muchnik[TM].
23  * @author      Michael Beck
24  * @date        28.08.2006
25  */
26 #include "config.h"
27 
28 #include <stdlib.h>
29 
30 #include "iredges_t.h"
31 
32 #include "besched.h"
33 #include "belistsched.h"
34 #include "benode.h"
35 #include "belive.h"
36 #include "bemodule.h"
37 
38 /* we need a special mark */
39 static char _mark;
40 #define MARK &_mark
41 
42 typedef struct trace_irn {
43 	sched_timestep_t delay;      /**< The delay for this node if already calculated, else 0. */
44 	sched_timestep_t etime;      /**< The earliest time of this node. */
45 	unsigned num_user;           /**< The number real users (mode datab) of this node */
46 	int      reg_diff;           /**< The difference of num(out registers) - num(in registers) */
47 	int      preorder;           /**< The pre-order position */
48 	unsigned critical_path_len;  /**< The weighted length of the longest critical path */
49 	unsigned is_root       : 1;  /**< is a root node of a block */
50 } trace_irn_t;
51 
52 typedef struct trace_env {
53 	trace_irn_t      *sched_info;               /**< trace scheduling information about the nodes */
54 	sched_timestep_t curr_time;                 /**< current time of the scheduler */
55 	be_lv_t          *liveness;                 /**< The liveness for the irg */
56 	DEBUG_ONLY(firm_dbg_module_t *dbg;)
57 } trace_env_t;
58 
59 /**
60  * Returns a random node from a nodeset
61  */
get_nodeset_node(const ir_nodeset_t * nodeset)62 static ir_node *get_nodeset_node(const ir_nodeset_t *nodeset)
63 {
64 	return ir_nodeset_first(nodeset);
65 }
66 
67 /**
68  * Returns non-zero if the node is a root node
69  */
is_root_node(trace_env_t * env,ir_node * n)70 static inline unsigned is_root_node(trace_env_t *env, ir_node *n)
71 {
72 	unsigned const idx = get_irn_idx(n);
73 
74 	assert(idx < ARR_LEN(env->sched_info));
75 	return env->sched_info[idx].is_root;
76 }
77 
78 /**
79  * Mark a node as root node
80  */
mark_root_node(trace_env_t * env,ir_node * n)81 static inline void mark_root_node(trace_env_t *env, ir_node *n)
82 {
83 	unsigned const idx = get_irn_idx(n);
84 
85 	assert(idx < ARR_LEN(env->sched_info));
86 	env->sched_info[idx].is_root = 1;
87 }
88 
89 /**
90  * Get the current delay.
91  */
get_irn_delay(trace_env_t * env,ir_node * n)92 static inline sched_timestep_t get_irn_delay(trace_env_t *env, ir_node *n)
93 {
94 	unsigned const idx = get_irn_idx(n);
95 
96 	assert(idx < ARR_LEN(env->sched_info));
97 	return env->sched_info[idx].delay;
98 }
99 
100 /**
101  * Set the current delay.
102  */
set_irn_delay(trace_env_t * env,ir_node * n,sched_timestep_t delay)103 static inline void set_irn_delay(trace_env_t *env, ir_node *n, sched_timestep_t delay)
104 {
105 	unsigned const idx = get_irn_idx(n);
106 
107 	assert(idx < ARR_LEN(env->sched_info));
108 	env->sched_info[idx].delay = delay;
109 }
110 
111 /**
112  * Get the current etime.
113  */
get_irn_etime(trace_env_t * env,ir_node * n)114 static inline sched_timestep_t get_irn_etime(trace_env_t *env, ir_node *n)
115 {
116 	unsigned const idx = get_irn_idx(n);
117 
118 	assert(idx < ARR_LEN(env->sched_info));
119 	return env->sched_info[idx].etime;
120 }
121 
122 /**
123  * Set the current etime.
124  */
set_irn_etime(trace_env_t * env,ir_node * n,sched_timestep_t etime)125 static inline void set_irn_etime(trace_env_t *env, ir_node *n, sched_timestep_t etime)
126 {
127 	unsigned const idx = get_irn_idx(n);
128 
129 	assert(idx < ARR_LEN(env->sched_info));
130 	env->sched_info[idx].etime = etime;
131 }
132 
133 /**
134  * Get the number of users.
135  */
get_irn_num_user(trace_env_t * env,ir_node * n)136 static inline unsigned get_irn_num_user(trace_env_t *env, ir_node *n)
137 {
138 	unsigned const idx = get_irn_idx(n);
139 
140 	assert(idx < ARR_LEN(env->sched_info));
141 	return env->sched_info[idx].num_user;
142 }
143 
144 /**
145  * Set the number of users.
146  */
set_irn_num_user(trace_env_t * env,ir_node * n,unsigned num_user)147 static inline void set_irn_num_user(trace_env_t *env, ir_node *n, unsigned num_user)
148 {
149 	unsigned const idx = get_irn_idx(n);
150 
151 	assert(idx < ARR_LEN(env->sched_info));
152 	env->sched_info[idx].num_user = num_user;
153 }
154 
155 /**
156  * Get the register difference.
157  */
get_irn_reg_diff(trace_env_t * env,ir_node * n)158 static inline int get_irn_reg_diff(trace_env_t *env, ir_node *n)
159 {
160 	unsigned const idx = get_irn_idx(n);
161 
162 	assert(idx < ARR_LEN(env->sched_info));
163 	return env->sched_info[idx].reg_diff;
164 }
165 
166 /**
167  * Set the register difference.
168  */
set_irn_reg_diff(trace_env_t * env,ir_node * n,int reg_diff)169 static inline void set_irn_reg_diff(trace_env_t *env, ir_node *n, int reg_diff)
170 {
171 	unsigned const idx = get_irn_idx(n);
172 
173 	assert(idx < ARR_LEN(env->sched_info));
174 	env->sched_info[idx].reg_diff = reg_diff;
175 }
176 
177 /**
178  * Get the pre-order position.
179  */
get_irn_preorder(trace_env_t * env,ir_node * n)180 static inline int get_irn_preorder(trace_env_t *env, ir_node *n)
181 {
182 	unsigned const idx = get_irn_idx(n);
183 
184 	assert(idx < ARR_LEN(env->sched_info));
185 	return env->sched_info[idx].preorder;
186 }
187 
188 /**
189  * Set the pre-order position.
190  */
set_irn_preorder(trace_env_t * env,ir_node * n,int pos)191 static inline void set_irn_preorder(trace_env_t *env, ir_node *n, int pos)
192 {
193 	unsigned const idx = get_irn_idx(n);
194 
195 	assert(idx < ARR_LEN(env->sched_info));
196 	env->sched_info[idx].preorder = pos;
197 }
198 
199 /**
200  * Get the pre-order position.
201  */
get_irn_critical_path_len(trace_env_t * env,ir_node * n)202 static inline unsigned get_irn_critical_path_len(trace_env_t *env, ir_node *n)
203 {
204 	unsigned const idx = get_irn_idx(n);
205 
206 	assert(idx < ARR_LEN(env->sched_info));
207 	return env->sched_info[idx].critical_path_len;
208 }
209 
210 /**
211  * Set the pre-order position.
212  */
set_irn_critical_path_len(trace_env_t * env,ir_node * n,unsigned len)213 static inline void set_irn_critical_path_len(trace_env_t *env, ir_node *n, unsigned len)
214 {
215 	unsigned const idx = get_irn_idx(n);
216 
217 	assert(idx < ARR_LEN(env->sched_info));
218 	env->sched_info[idx].critical_path_len = len;
219 }
220 
221 /**
222  * returns the exec-time for node n.
223  */
exectime(trace_env_t * env,ir_node * n)224 static sched_timestep_t exectime(trace_env_t *env, ir_node *n)
225 {
226 	(void) env;
227 	if (be_is_Keep(n) || is_Proj(n))
228 		return 0;
229 #if 0
230 	if (env->selector->exectime)
231 		return env->selector->exectime(env->selector_env, n);
232 #endif
233 	return 1;
234 }
235 
236 /**
237  * Calculates the latency for between two ops
238  */
latency(trace_env_t * env,ir_node * pred,int pred_cycle,ir_node * curr,int curr_cycle)239 static sched_timestep_t latency(trace_env_t *env, ir_node *pred, int pred_cycle, ir_node *curr, int curr_cycle)
240 {
241 	(void) pred_cycle;
242 	(void) curr_cycle;
243 	/* a Keep hides a root */
244 	if (be_is_Keep(curr))
245 		return exectime(env, pred);
246 
247 	/* Proj's are executed immediately */
248 	if (is_Proj(curr))
249 		return 0;
250 
251 #if 0
252 	/* predecessors Proj's must be skipped */
253 	if (is_Proj(pred))
254 		pred = get_Proj_pred(pred);
255 
256 	if (env->selector->latency)
257 		return env->selector->latency(env->selector_env, pred, pred_cycle, curr, curr_cycle);
258 #endif
259 
260 	return 1;
261 }
262 
263 /**
264  * Returns the number of users of a node having mode datab.
265  */
get_num_successors(ir_node * irn)266 static int get_num_successors(ir_node *irn)
267 {
268 	int sum = 0;
269 
270 	if (get_irn_mode(irn) == mode_T) {
271 		/* for mode_T nodes: count the users of all Projs */
272 		foreach_out_edge(irn, edge) {
273 			ir_node *proj = get_edge_src_irn(edge);
274 			ir_mode *mode = get_irn_mode(proj);
275 
276 			if (mode == mode_T)
277 				sum += get_num_successors(proj);
278 			else if (mode_is_datab(mode))
279 				sum += get_irn_n_edges(proj);
280 		}
281 	}
282 	else {
283 		/* do not count keep-alive edges */
284 		foreach_out_edge(irn, edge) {
285 			if (get_irn_opcode(get_edge_src_irn(edge)) != iro_End)
286 				sum++;
287 		}
288 	}
289 
290 	return sum;
291 }
292 
293 /**
294  * Returns the difference of regs_output - regs_input;
295  */
get_reg_difference(trace_env_t * env,ir_node * irn)296 static int get_reg_difference(trace_env_t *env, ir_node *irn)
297 {
298 	int num_out = 0;
299 	int num_in  = 0;
300 	int i;
301 	ir_node *block = get_nodes_block(irn);
302 
303 	if (be_is_Call(irn)) {
304 		/* we want calls preferred */
305 		return -5;
306 	}
307 
308 	if (get_irn_mode(irn) == mode_T) {
309 		/* mode_T nodes: num out regs == num Projs with mode datab */
310 		foreach_out_edge(irn, edge) {
311 			ir_node *proj = get_edge_src_irn(edge);
312 			if (mode_is_datab(get_irn_mode(proj)))
313 				num_out++;
314 		}
315 	}
316 	else
317 		num_out = 1;
318 
319 	/* num in regs: number of ins with mode datab and not ignore */
320 	for (i = get_irn_arity(irn) - 1; i >= 0; i--) {
321 		ir_node *in = get_irn_n(irn, i);
322 
323 		if (!mode_is_datab(get_irn_mode(in)))
324 			continue;
325 
326 		if (arch_irn_is_ignore(in))
327 			continue;
328 
329 		if (be_is_live_end(env->liveness, block, in))
330 			continue;
331 
332 		num_in++;
333 	}
334 
335 	return num_out - num_in;
336 }
337 
338 /**
339  * descent into a dag and create a pre-order list.
340  */
descent(ir_node * root,ir_node * block,ir_node ** list,trace_env_t * env,unsigned path_len)341 static void descent(ir_node *root, ir_node *block, ir_node **list, trace_env_t *env, unsigned path_len)
342 {
343 	int i;
344 
345 	if (! is_Phi(root)) {
346 		path_len += exectime(env, root);
347 		if (get_irn_critical_path_len(env, root) < path_len) {
348 			set_irn_critical_path_len(env, root, path_len);
349 		}
350 		/* calculate number of users (needed for heuristic) */
351 		set_irn_num_user(env, root, get_num_successors(root));
352 
353 		/* calculate register difference (needed for heuristic) */
354 		set_irn_reg_diff(env, root, get_reg_difference(env, root));
355 
356 		/* Phi nodes always leave the block */
357 		for (i = get_irn_arity(root) - 1; i >= 0; --i) {
358 			ir_node *pred = get_irn_n(root, i);
359 
360 			DBG((env->dbg, LEVEL_3, "   node %+F\n", pred));
361 
362 			/* Blocks may happen as predecessors of End nodes */
363 			if (is_Block(pred))
364 				continue;
365 
366 			/* already seen nodes are not marked */
367 			if (get_irn_link(pred) != MARK)
368 				continue;
369 
370 			/* don't leave our block */
371 			if (get_nodes_block(pred) != block)
372 				continue;
373 
374 			set_irn_link(pred, NULL);
375 
376 			descent(pred, block, list, env, path_len);
377 		}
378 	}
379 	set_irn_link(root, *list);
380 	*list = root;
381 }
382 
383 /**
384  * Returns non-zero if root is a root in the block block.
385  */
is_root(ir_node * root,ir_node * block)386 static int is_root(ir_node *root, ir_node *block)
387 {
388 	foreach_out_edge(root, edge) {
389 		ir_node *succ = get_edge_src_irn(edge);
390 
391 		if (is_Block(succ))
392 			continue;
393 		/* Phi nodes are always in "another block */
394 		if (is_Phi(succ))
395 			continue;
396 		if (get_nodes_block(succ) == block)
397 			return 0;
398 	}
399 	return 1;
400 }
401 
402 /**
403  * Performs initial block calculations for trace scheduling.
404  */
trace_preprocess_block(trace_env_t * env,ir_node * block)405 static void trace_preprocess_block(trace_env_t *env, ir_node *block)
406 {
407 	ir_node *root = NULL, *preord = NULL;
408 	ir_node *curr, *irn;
409 	int cur_pos;
410 
411 	/* First step: Find the root set. */
412 	foreach_out_edge(block, edge) {
413 		ir_node *succ = get_edge_src_irn(edge);
414 
415 		if (is_Anchor(succ)) {
416 			/* ignore a keep alive edge */
417 			continue;
418 		}
419 		if (is_root(succ, block)) {
420 			mark_root_node(env, succ);
421 			set_irn_link(succ, root);
422 			root = succ;
423 		}
424 		else
425 			set_irn_link(succ, MARK);
426 	}
427 
428 	/* Second step: calculate the pre-order list. */
429 	preord = NULL;
430 	for (curr = root; curr; curr = irn) {
431 		irn = (ir_node*)get_irn_link(curr);
432 		DBG((env->dbg, LEVEL_2, "   DAG root %+F\n", curr));
433 		descent(curr, block, &preord, env, 0);
434 	}
435 	root = preord;
436 
437 	/* Third step: calculate the Delay. Note that our
438 	* list is now in pre-order, starting at root
439 	*/
440 	for (cur_pos = 0, curr = root; curr; curr = (ir_node*)get_irn_link(curr), cur_pos++) {
441 		sched_timestep_t d;
442 
443 		if (is_cfop(curr)) {
444 			/* assure, that branches can be executed last */
445 			d = 0;
446 		}
447 		else {
448 			if (is_root_node(env, curr))
449 				d = exectime(env, curr);
450 			else {
451 				d = 0;
452 				foreach_out_edge(curr, edge) {
453 					ir_node *n = get_edge_src_irn(edge);
454 
455 					if (get_nodes_block(n) == block) {
456 						sched_timestep_t ld;
457 
458 						ld = latency(env, curr, 1, n, 0) + get_irn_delay(env, n);
459 						d = ld > d ? ld : d;
460 					}
461 				}
462 			}
463 		}
464 		set_irn_delay(env, curr, d);
465 		DB((env->dbg, LEVEL_2, "\t%+F delay %u\n", curr, d));
466 
467 		/* set the etime of all nodes to 0 */
468 		set_irn_etime(env, curr, 0);
469 
470 		set_irn_preorder(env, curr, cur_pos);
471 	}
472 }
473 
474 /**
475  * This functions gets called after a node finally has been made ready.
476  */
trace_node_ready(void * data,ir_node * irn,ir_node * pred)477 static void trace_node_ready(void *data, ir_node *irn, ir_node *pred)
478 {
479 	trace_env_t *env = (trace_env_t*)data;
480 	sched_timestep_t etime_p, etime;
481 
482 	etime = env->curr_time;
483 	if (pred) {
484 		etime_p = get_irn_etime(env, pred);
485 		etime  += latency(env, pred, 1, irn, 0);
486 		etime   = etime_p > etime ? etime_p : etime;
487 	}
488 
489 	set_irn_etime(env, irn, etime);
490 	DB((env->dbg, LEVEL_2, "\tset etime of %+F to %u\n", irn, etime));
491 }
492 
493 /**
494  * Update the current time after irn has been selected.
495  */
trace_update_time(void * data,ir_node * irn)496 static void trace_update_time(void *data, ir_node *irn)
497 {
498 	trace_env_t *env = (trace_env_t*)data;
499 	if (is_Phi(irn) || get_irn_opcode(irn) == beo_Start) {
500 		env->curr_time += get_irn_etime(env, irn);
501 	}
502 	else {
503 		env->curr_time += exectime(env, irn);
504 	}
505 }
506 
507 /**
508  * Allocates memory and initializes trace scheduling environment.
509  * @param irg   The backend irg object
510  * @return The environment
511  */
trace_init(ir_graph * irg)512 static trace_env_t *trace_init(ir_graph *irg)
513 {
514 	trace_env_t *env = XMALLOCZ(trace_env_t);
515 	int         nn   = get_irg_last_idx(irg);
516 
517 	env->curr_time  = 0;
518 	env->sched_info = NEW_ARR_F(trace_irn_t, nn);
519 	env->liveness   = be_get_irg_liveness(irg);
520 	FIRM_DBG_REGISTER(env->dbg, "firm.be.sched.trace");
521 
522 	be_assure_live_chk(irg);
523 	memset(env->sched_info, 0, nn * sizeof(*(env->sched_info)));
524 
525 	return env;
526 }
527 
528 /**
529  * Frees all memory allocated for trace scheduling environment.
530  * @param env  The environment
531  */
trace_free(void * data)532 static void trace_free(void *data)
533 {
534 	trace_env_t *env = (trace_env_t*)data;
535 	DEL_ARR_F(env->sched_info);
536 	free(env);
537 }
538 
539 /**
540  * Simple selector. Just assure that jumps are scheduled last.
541  */
basic_selection(ir_nodeset_t * ready_set)542 static ir_node *basic_selection(ir_nodeset_t *ready_set)
543 {
544 	/* assure that branches and constants are executed last */
545 	foreach_ir_nodeset(ready_set, irn, iter) {
546 		if (!is_cfop(irn)) {
547 			return irn;
548 		}
549 	}
550 
551 	/* at last: schedule branches */
552 	return get_nodeset_node(ready_set);
553 }
554 
555 /**
556 * The muchnik selector.
557 */
muchnik_select(void * block_env,ir_nodeset_t * ready_set)558 static ir_node *muchnik_select(void *block_env, ir_nodeset_t *ready_set)
559 {
560 	trace_env_t *env = (trace_env_t*)block_env;
561 	ir_nodeset_t mcands, ecands;
562 	sched_timestep_t max_delay = 0;
563 
564 	/* calculate the max delay of all candidates */
565 	foreach_ir_nodeset(ready_set, irn, iter) {
566 		sched_timestep_t d = get_irn_delay(env, irn);
567 
568 		max_delay = d > max_delay ? d : max_delay;
569 	}
570 
571 	ir_nodeset_init_size(&mcands, 8);
572 	ir_nodeset_init_size(&ecands, 8);
573 
574 	/* build mcands and ecands */
575 	foreach_ir_nodeset(ready_set, irn, iter) {
576 		if (get_irn_delay(env, irn) == max_delay) {
577 			ir_nodeset_insert(&mcands, irn);
578 			if (get_irn_etime(env, irn) <= env->curr_time)
579 				ir_nodeset_insert(&ecands, irn);
580 		}
581 	}
582 
583 	/* select a node */
584 	ir_node *irn;
585 	if (ir_nodeset_size(&mcands) == 1) {
586 		irn = get_nodeset_node(&mcands);
587 		DB((env->dbg, LEVEL_3, "\tirn = %+F, mcand = 1, max_delay = %u\n", irn, max_delay));
588 	}
589 	else {
590 		size_t cnt = ir_nodeset_size(&ecands);
591 		if (cnt == 1) {
592 			irn = get_nodeset_node(&ecands);
593 
594 			if (is_cfop(irn)) {
595 				/* BEWARE: don't select a JUMP if others are still possible */
596 				goto force_mcands;
597 			}
598 			DB((env->dbg, LEVEL_3, "\tirn = %+F, ecand = 1, max_delay = %u\n", irn, max_delay));
599 		}
600 		else if (cnt > 1) {
601 			DB((env->dbg, LEVEL_3, "\tecand = %zu, max_delay = %u\n", cnt, max_delay));
602 			irn = basic_selection(&ecands);
603 		}
604 		else {
605 force_mcands:
606 			DB((env->dbg, LEVEL_3, "\tmcand = %zu\n", ir_nodeset_size(&mcands)));
607 			irn = basic_selection(&mcands);
608 		}
609 	}
610 
611 	return irn;
612 }
613 
muchnik_init_graph(ir_graph * irg)614 static void *muchnik_init_graph(ir_graph *irg)
615 {
616 	trace_env_t *env  = trace_init(irg);
617 	return (void *)env;
618 }
619 
muchnik_init_block(void * graph_env,ir_node * bl)620 static void *muchnik_init_block(void *graph_env, ir_node *bl)
621 {
622 	trace_env_t *env = (trace_env_t*) graph_env;
623 	trace_preprocess_block(env, bl);
624 	return graph_env;
625 }
626 
sched_muchnik(ir_graph * irg)627 static void sched_muchnik(ir_graph *irg)
628 {
629 	static const list_sched_selector_t muchnik_selector = {
630 		muchnik_init_graph,
631 		muchnik_init_block,
632 		muchnik_select,
633 		trace_node_ready,    /* node_ready */
634 		trace_update_time,   /* node_selected */
635 		NULL,                /* finish_block */
636 		trace_free           /* finish_graph */
637 	};
638 	be_list_sched_graph(irg, &muchnik_selector);
639 }
640 
641 /**
642  * Execute the heuristic function.
643  */
heuristic_select(void * block_env,ir_nodeset_t * ns)644 static ir_node *heuristic_select(void *block_env, ir_nodeset_t *ns)
645 {
646 	trace_env_t *trace_env   = (trace_env_t*)block_env;
647 	ir_node     *cand        = NULL;
648 	int         max_prio     = INT_MIN;
649 	int         cur_prio     = INT_MIN;
650 	int         reg_fact;
651 	/* Note: register pressure calculation needs an overhaul, you need correct
652 	 * tracking for each register class indidually and weight by each class
653 	int         cur_pressure = ir_nodeset_size(lv); */
654 	int         cur_pressure = 1;
655 
656 	/* prefer instructions which can be scheduled early */
657 #define PRIO_TIME        3
658 	/* prefer instructions with lots of successors */
659 #define PRIO_NUMSUCCS    8
660 	/* prefer instructions with long critical path */
661 #define PRIO_LEVEL      12
662 	/* prefer instructions coming early in preorder */
663 #define PRIO_PREORD      8
664 	/* weight of current register pressure */
665 #define PRIO_CUR_PRESS  20
666 	/* weight of register pressure difference */
667 #define PRIO_CHG_PRESS   8
668 
669 	/* priority based selection, heuristic inspired by mueller diss */
670 	foreach_ir_nodeset(ns, irn, iter) {
671 		/* make sure that branches are scheduled last */
672 		if (!is_cfop(irn)) {
673 			int rdiff = get_irn_reg_diff(trace_env, irn);
674 			int sign  = rdiff < 0;
675 			int chg   = (rdiff < 0 ? -rdiff : rdiff) << PRIO_CHG_PRESS;
676 
677 			reg_fact = chg * cur_pressure;
678 			if (reg_fact < chg)
679 				reg_fact = INT_MAX - 2;
680 			reg_fact = sign ? -reg_fact : reg_fact;
681 
682 			cur_prio = (get_irn_critical_path_len(trace_env, irn) << PRIO_LEVEL)
683 				//- (get_irn_delay(trace_env, irn) << PRIO_LEVEL)
684 				+ (get_irn_num_user(trace_env, irn) << PRIO_NUMSUCCS)
685 				- (get_irn_etime(trace_env, irn) << PRIO_TIME)
686 				//- ((get_irn_reg_diff(trace_env, irn) >> PRIO_CHG_PRESS) << ((cur_pressure >> PRIO_CUR_PRESS) - 3))
687 				- reg_fact
688 				+ (get_irn_preorder(trace_env, irn) << PRIO_PREORD); /* high preorder means early schedule */
689 			if (cur_prio > max_prio) {
690 				cand          = irn;
691 				max_prio      = cur_prio;
692 			}
693 
694 			DBG((trace_env->dbg, LEVEL_4, "checked NODE %+F\n", irn));
695 			DBG((trace_env->dbg, LEVEL_4, "\tpriority: %d\n", cur_prio));
696 			DBG((trace_env->dbg, LEVEL_4, "\tpath len: %d (%d)\n", get_irn_critical_path_len(trace_env, irn), get_irn_critical_path_len(trace_env, irn) << PRIO_LEVEL));
697 			DBG((trace_env->dbg, LEVEL_4, "\tdelay:    %d (%d)\n", get_irn_delay(trace_env, irn), get_irn_delay(trace_env, irn) << PRIO_LEVEL));
698 			DBG((trace_env->dbg, LEVEL_4, "\t#user:    %d (%d)\n", get_irn_num_user(trace_env, irn), get_irn_num_user(trace_env, irn) << PRIO_NUMSUCCS));
699 			DBG((trace_env->dbg, LEVEL_4, "\tetime:    %d (%d)\n", get_irn_etime(trace_env, irn), 0 - (get_irn_etime(trace_env, irn) << PRIO_TIME)));
700 			DBG((trace_env->dbg, LEVEL_4, "\tpreorder: %d (%d)\n", get_irn_preorder(trace_env, irn), get_irn_preorder(trace_env, irn) << PRIO_PREORD));
701 			DBG((trace_env->dbg, LEVEL_4, "\treg diff: %d (%d)\n", get_irn_reg_diff(trace_env, irn), 0 - reg_fact));
702 			DBG((trace_env->dbg, LEVEL_4, "\tpressure: %d\n", cur_pressure));
703 		}
704 	}
705 
706 	if (cand) {
707 		DBG((trace_env->dbg, LEVEL_4, "heuristic selected %+F:\n", cand));
708 	}
709 	else {
710 		cand = basic_selection(ns);
711 	}
712 
713 	return cand;
714 }
715 
sched_heuristic(ir_graph * irg)716 static void sched_heuristic(ir_graph *irg)
717 {
718 	static const list_sched_selector_t heuristic_selector = {
719 		muchnik_init_graph,
720 		muchnik_init_block,
721 		heuristic_select,
722 		trace_node_ready,    /* node_ready */
723 		trace_update_time,   /* node_selected */
724 		NULL,                /* finish_block */
725 		trace_free           /* finish_graph */
726 	};
727 	be_list_sched_graph(irg, &heuristic_selector);
728 }
729 
BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_trace)730 BE_REGISTER_MODULE_CONSTRUCTOR(be_init_sched_trace)
731 void be_init_sched_trace(void)
732 {
733 	be_register_scheduler("heur", sched_heuristic);
734 	be_register_scheduler("muchnik", sched_muchnik);
735 }
736