1 /* Graph coloring register allocator
2    Copyright (C) 2001, 2002 Free Software Foundation, Inc.
3    Contributed by Michael Matz <matz@suse.de>
4    and Daniel Berlin <dan@cgsoftware.com>.
5 
6    This file is part of GCC.
7 
8    GCC is free software; you can redistribute it and/or modify it under the
9    terms of the GNU General Public License as published by the Free Software
10    Foundation; either version 2, or (at your option) any later version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
15    details.
16 
17    You should have received a copy of the GNU General Public License along
18    with GCC; see the file COPYING.  If not, write to the Free Software
19    Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "tm_p.h"
27 #include "function.h"
28 #include "regs.h"
29 #include "hard-reg-set.h"
30 #include "basic-block.h"
31 #include "df.h"
32 #include "output.h"
33 #include "ra.h"
34 
35 /* This file is part of the graph coloring register allocator.
36    It contains the graph colorizer.  Given an interference graph
37    as set up in ra-build.c the toplevel function in this file
38    (ra_colorize_graph) colorizes the graph, leaving a list
39    of colored, coalesced and spilled nodes.
40 
41    The algorithm used is a merge of George & Appels iterative coalescing
42    and optimistic coalescing, switchable at runtime.  The current default
43    is "optimistic coalescing +", which is based on the normal Briggs/Cooper
44    framework.  We can also use biased coloring.  Most of the structure
45    here follows the different papers.
46 
47    Additionally there is a custom step to locally improve the overall
48    spill cost of the colored graph (recolor_spills).  */
49 
50 static void push_list (struct dlist *, struct dlist **);
51 static void push_list_end (struct dlist *, struct dlist **);
52 static void free_dlist (struct dlist **);
53 static void put_web_at_end (struct web *, enum node_type);
54 static void put_move (struct move *, enum move_type);
55 static void build_worklists (struct df *);
56 static void enable_move (struct web *);
57 static void decrement_degree (struct web *, int);
58 static void simplify (void);
59 static void remove_move_1 (struct web *, struct move *);
60 static void remove_move (struct web *, struct move *);
61 static void add_worklist (struct web *);
62 static int ok (struct web *, struct web *);
63 static int conservative (struct web *, struct web *);
64 static inline unsigned int simplify_p (enum node_type);
65 static void combine (struct web *, struct web *);
66 static void coalesce (void);
67 static void freeze_moves (struct web *);
68 static void freeze (void);
69 static void select_spill (void);
70 static int color_usable_p (int, HARD_REG_SET, HARD_REG_SET,
71 			   enum machine_mode);
72 int get_free_reg (HARD_REG_SET, HARD_REG_SET, enum machine_mode);
73 static int get_biased_reg (HARD_REG_SET, HARD_REG_SET, HARD_REG_SET,
74 			   HARD_REG_SET, enum machine_mode);
75 static int count_long_blocks (HARD_REG_SET, int);
76 static char * hardregset_to_string (HARD_REG_SET);
77 static void calculate_dont_begin (struct web *, HARD_REG_SET *);
78 static void colorize_one_web (struct web *, int);
79 static void assign_colors (void);
80 static void try_recolor_web (struct web *);
81 static void insert_coalesced_conflicts (void);
82 static int comp_webs_maxcost (const void *, const void *);
83 static void recolor_spills (void);
84 static void check_colors (void);
85 static void restore_conflicts_from_coalesce (struct web *);
86 static void break_coalesced_spills (void);
87 static void unalias_web (struct web *);
88 static void break_aliases_to_web (struct web *);
89 static void break_precolored_alias (struct web *);
90 static void init_web_pairs (void);
91 static void add_web_pair_cost (struct web *, struct web *,
92 		               unsigned HOST_WIDE_INT, unsigned int);
93 static int comp_web_pairs (const void *, const void *);
94 static void sort_and_combine_web_pairs (int);
95 static void aggressive_coalesce (void);
96 static void extended_coalesce_2 (void);
97 static void check_uncoalesced_moves (void);
98 
99 static struct dlist *mv_worklist, *mv_coalesced, *mv_constrained;
100 static struct dlist *mv_frozen, *mv_active;
101 
102 /* Push a node onto the front of the list.  */
103 
104 static void
push_list(struct dlist * x,struct dlist ** list)105 push_list (struct dlist *x, struct dlist **list)
106 {
107   if (x->next || x->prev)
108     abort ();
109   x->next = *list;
110   if (*list)
111     (*list)->prev = x;
112   *list = x;
113 }
114 
115 static void
push_list_end(struct dlist * x,struct dlist ** list)116 push_list_end (struct dlist *x, struct dlist **list)
117 {
118   if (x->prev || x->next)
119     abort ();
120   if (!*list)
121     {
122       *list = x;
123       return;
124     }
125   while ((*list)->next)
126     list = &((*list)->next);
127   x->prev = *list;
128   (*list)->next = x;
129 }
130 
131 /* Remove a node from the list.  */
132 
133 void
remove_list(struct dlist * x,struct dlist ** list)134 remove_list (struct dlist *x, struct dlist **list)
135 {
136   struct dlist *y = x->prev;
137   if (y)
138     y->next = x->next;
139   else
140     *list = x->next;
141   y = x->next;
142   if (y)
143     y->prev = x->prev;
144   x->next = x->prev = NULL;
145 }
146 
147 /* Pop the front of the list.  */
148 
149 struct dlist *
pop_list(struct dlist ** list)150 pop_list (struct dlist **list)
151 {
152   struct dlist *r = *list;
153   if (r)
154     remove_list (r, list);
155   return r;
156 }
157 
158 /* Free the given double linked list.  */
159 
160 static void
free_dlist(struct dlist ** list)161 free_dlist (struct dlist **list)
162 {
163   *list = NULL;
164 }
165 
166 /* The web WEB should get the given new TYPE.  Put it onto the
167    appropriate list.
168    Inline, because it's called with constant TYPE every time.  */
169 
170 inline void
put_web(struct web * web,enum node_type type)171 put_web (struct web *web, enum node_type type)
172 {
173   switch (type)
174     {
175       case INITIAL:
176       case FREE:
177       case FREEZE:
178       case SPILL:
179       case SPILLED:
180       case COALESCED:
181       case COLORED:
182       case SELECT:
183 	push_list (web->dlink, &WEBS(type));
184 	break;
185       case PRECOLORED:
186 	push_list (web->dlink, &WEBS(INITIAL));
187 	break;
188       case SIMPLIFY:
189 	if (web->spill_temp)
190 	  push_list (web->dlink, &WEBS(type = SIMPLIFY_SPILL));
191 	else if (web->add_hardregs)
192 	  push_list (web->dlink, &WEBS(type = SIMPLIFY_FAT));
193 	else
194 	  push_list (web->dlink, &WEBS(SIMPLIFY));
195 	break;
196       default:
197 	abort ();
198     }
199   web->type = type;
200 }
201 
202 /* After we are done with the whole pass of coloring/spilling,
203    we reset the lists of webs, in preparation of the next pass.
204    The spilled webs become free, colored webs go to the initial list,
205    coalesced webs become free or initial, according to what type of web
206    they are coalesced to.  */
207 
208 void
reset_lists(void)209 reset_lists (void)
210 {
211   struct dlist *d;
212   unsigned int i;
213   if (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_SPILL) || WEBS(SIMPLIFY_FAT)
214       || WEBS(FREEZE) || WEBS(SPILL) || WEBS(SELECT))
215     abort ();
216 
217   while ((d = pop_list (&WEBS(COALESCED))) != NULL)
218     {
219       struct web *web = DLIST_WEB (d);
220       struct web *aweb = alias (web);
221       /* Note, how alias() becomes invalid through the two put_web()'s
222 	 below.  It might set the type of a web to FREE (from COALESCED),
223 	 which itself is a target of aliasing (i.e. in the middle of
224 	 an alias chain).  We can handle this by checking also for
225 	 type == FREE.  Note nevertheless, that alias() is invalid
226 	 henceforth.  */
227       if (aweb->type == SPILLED || aweb->type == FREE)
228 	put_web (web, FREE);
229       else
230 	put_web (web, INITIAL);
231     }
232   while ((d = pop_list (&WEBS(SPILLED))) != NULL)
233     put_web (DLIST_WEB (d), FREE);
234   while ((d = pop_list (&WEBS(COLORED))) != NULL)
235     put_web (DLIST_WEB (d), INITIAL);
236 
237   /* All free webs have no conflicts anymore.  */
238   for (d = WEBS(FREE); d; d = d->next)
239     {
240       struct web *web = DLIST_WEB (d);
241       BITMAP_XFREE (web->useless_conflicts);
242       web->useless_conflicts = NULL;
243     }
244 
245   /* Sanity check, that we only have free, initial or precolored webs.  */
246   for (i = 0; i < num_webs; i++)
247     {
248       struct web *web = ID2WEB (i);
249       if (web->type != INITIAL && web->type != FREE && web->type != PRECOLORED)
250 	abort ();
251     }
252   free_dlist (&mv_worklist);
253   free_dlist (&mv_coalesced);
254   free_dlist (&mv_constrained);
255   free_dlist (&mv_frozen);
256   free_dlist (&mv_active);
257 }
258 
259 /* Similar to put_web(), but add the web to the end of the appropriate
260    list.  Additionally TYPE may not be SIMPLIFY.  */
261 
262 static void
put_web_at_end(struct web * web,enum node_type type)263 put_web_at_end (struct web *web, enum node_type type)
264 {
265   if (type == PRECOLORED)
266     type = INITIAL;
267   else if (type == SIMPLIFY)
268     abort ();
269   push_list_end (web->dlink, &WEBS(type));
270   web->type = type;
271 }
272 
273 /* Unlink WEB from the list it's currently on (which corresponds to
274    its current type).  */
275 
276 void
remove_web_from_list(struct web * web)277 remove_web_from_list (struct web *web)
278 {
279   if (web->type == PRECOLORED)
280     remove_list (web->dlink, &WEBS(INITIAL));
281   else
282     remove_list (web->dlink, &WEBS(web->type));
283 }
284 
285 /* Give MOVE the TYPE, and link it into the correct list.  */
286 
287 static inline void
put_move(struct move * move,enum move_type type)288 put_move (struct move *move, enum move_type type)
289 {
290   switch (type)
291     {
292       case WORKLIST:
293 	push_list (move->dlink, &mv_worklist);
294 	break;
295       case MV_COALESCED:
296 	push_list (move->dlink, &mv_coalesced);
297 	break;
298       case CONSTRAINED:
299 	push_list (move->dlink, &mv_constrained);
300 	break;
301       case FROZEN:
302 	push_list (move->dlink, &mv_frozen);
303 	break;
304       case ACTIVE:
305 	push_list (move->dlink, &mv_active);
306 	break;
307       default:
308 	abort ();
309     }
310   move->type = type;
311 }
312 
313 /* Build the worklists we are going to process.  */
314 
315 static void
build_worklists(struct df * df ATTRIBUTE_UNUSED)316 build_worklists (struct df *df ATTRIBUTE_UNUSED)
317 {
318   struct dlist *d, *d_next;
319   struct move_list *ml;
320 
321   /* If we are not the first pass, put all stackwebs (which are still
322      backed by a new pseudo, but conceptually can stand for a stackslot,
323      i.e. it doesn't really matter if they get a color or not), on
324      the SELECT stack first, those with lowest cost first.  This way
325      they will be colored last, so do not constrain the coloring of the
326      normal webs.  But still those with the highest count are colored
327      before, i.e. get a color more probable.  The use of stackregs is
328      a pure optimization, and all would work, if we used real stackslots
329      from the begin.  */
330   if (ra_pass > 1)
331     {
332       unsigned int i, num, max_num;
333       struct web **order2web;
334       max_num = num_webs - num_subwebs;
335       order2web = xmalloc (max_num * sizeof (order2web[0]));
336       for (i = 0, num = 0; i < max_num; i++)
337 	if (id2web[i]->regno >= max_normal_pseudo)
338 	  order2web[num++] = id2web[i];
339       if (num)
340 	{
341 	  qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost);
342 	  for (i = num - 1;; i--)
343 	    {
344 	      struct web *web = order2web[i];
345 	      struct conflict_link *wl;
346 	      remove_list (web->dlink, &WEBS(INITIAL));
347 	      put_web (web, SELECT);
348 	      for (wl = web->conflict_list; wl; wl = wl->next)
349 		{
350 		  struct web *pweb = wl->t;
351 		  pweb->num_conflicts -= 1 + web->add_hardregs;
352 		}
353 	      if (i == 0)
354 		break;
355 	    }
356 	}
357       free (order2web);
358     }
359 
360   /* For all remaining initial webs, classify them.  */
361   for (d = WEBS(INITIAL); d; d = d_next)
362     {
363       struct web *web = DLIST_WEB (d);
364       d_next = d->next;
365       if (web->type == PRECOLORED)
366         continue;
367 
368       remove_list (d, &WEBS(INITIAL));
369       if (web->num_conflicts >= NUM_REGS (web))
370 	put_web (web, SPILL);
371       else if (web->moves)
372 	put_web (web, FREEZE);
373       else
374 	put_web (web, SIMPLIFY);
375     }
376 
377   /* And put all moves on the worklist for iterated coalescing.
378      Note, that if iterated coalescing is off, then wl_moves doesn't
379      contain any moves.  */
380   for (ml = wl_moves; ml; ml = ml->next)
381     if (ml->move)
382       {
383 	struct move *m = ml->move;
384         d = ra_calloc (sizeof (struct dlist));
385         DLIST_MOVE (d) = m;
386         m->dlink = d;
387 	put_move (m, WORKLIST);
388       }
389 }
390 
391 /* Enable the active moves, in which WEB takes part, to be processed.  */
392 
393 static void
enable_move(struct web * web)394 enable_move (struct web *web)
395 {
396   struct move_list *ml;
397   for (ml = web->moves; ml; ml = ml->next)
398     if (ml->move->type == ACTIVE)
399       {
400 	remove_list (ml->move->dlink, &mv_active);
401 	put_move (ml->move, WORKLIST);
402       }
403 }
404 
405 /* Decrement the degree of node WEB by the amount DEC.
406    Possibly change the type of WEB, if the number of conflicts is
407    now smaller than its freedom.  */
408 
409 static void
decrement_degree(struct web * web,int dec)410 decrement_degree (struct web *web, int dec)
411 {
412   int before = web->num_conflicts;
413   web->num_conflicts -= dec;
414   if (web->num_conflicts < NUM_REGS (web) && before >= NUM_REGS (web))
415     {
416       struct conflict_link *a;
417       enable_move (web);
418       for (a = web->conflict_list; a; a = a->next)
419 	{
420 	  struct web *aweb = a->t;
421 	  if (aweb->type != SELECT && aweb->type != COALESCED)
422 	    enable_move (aweb);
423 	}
424       if (web->type != FREEZE)
425 	{
426 	  remove_web_from_list (web);
427 	  if (web->moves)
428 	    put_web (web, FREEZE);
429 	  else
430 	    put_web (web, SIMPLIFY);
431 	}
432     }
433 }
434 
435 /* Repeatedly simplify the nodes on the simplify worklists.  */
436 
437 static void
simplify(void)438 simplify (void)
439 {
440   struct dlist *d;
441   struct web *web;
442   struct conflict_link *wl;
443   while (1)
444     {
445       /* We try hard to color all the webs resulting from spills first.
446 	 Without that on register starved machines (x86 e.g) with some live
447 	 DImode pseudos, -fPIC, and an asm requiring %edx, it might be, that
448 	 we do rounds over rounds, because the conflict graph says, we can
449 	 simplify those short webs, but later due to irregularities we can't
450 	 color those pseudos.  So we have to spill them, which in later rounds
451 	 leads to other spills.  */
452       d = pop_list (&WEBS(SIMPLIFY));
453       if (!d)
454 	d = pop_list (&WEBS(SIMPLIFY_FAT));
455       if (!d)
456 	d = pop_list (&WEBS(SIMPLIFY_SPILL));
457       if (!d)
458 	break;
459       web = DLIST_WEB (d);
460       ra_debug_msg (DUMP_PROCESS, " simplifying web %3d, conflicts = %d\n",
461 		 web->id, web->num_conflicts);
462       put_web (web, SELECT);
463       for (wl = web->conflict_list; wl; wl = wl->next)
464 	{
465 	  struct web *pweb = wl->t;
466 	  if (pweb->type != SELECT && pweb->type != COALESCED)
467 	    {
468 	      decrement_degree (pweb, 1 + web->add_hardregs);
469 	    }
470 	}
471     }
472 }
473 
474 /* Helper function to remove a move from the movelist of the web.  */
475 
476 static void
remove_move_1(struct web * web,struct move * move)477 remove_move_1 (struct web *web, struct move *move)
478 {
479   struct move_list *ml = web->moves;
480   if (!ml)
481     return;
482   if (ml->move == move)
483     {
484       web->moves = ml->next;
485       return;
486     }
487   for (; ml->next && ml->next->move != move; ml = ml->next) ;
488   if (!ml->next)
489     return;
490   ml->next = ml->next->next;
491 }
492 
493 /* Remove a move from the movelist of the web.  Actually this is just a
494    wrapper around remove_move_1(), making sure, the removed move really is
495    not in the list anymore.  */
496 
497 static void
remove_move(struct web * web,struct move * move)498 remove_move (struct web *web, struct move *move)
499 {
500   struct move_list *ml;
501   remove_move_1 (web, move);
502   for (ml = web->moves; ml; ml = ml->next)
503     if (ml->move == move)
504       abort ();
505 }
506 
507 /* Merge the moves for the two webs into the first web's movelist.  */
508 
509 void
merge_moves(struct web * u,struct web * v)510 merge_moves (struct web *u, struct web *v)
511 {
512   regset seen;
513   struct move_list *ml, *ml_next;
514 
515   seen = BITMAP_XMALLOC ();
516   for (ml = u->moves; ml; ml = ml->next)
517     bitmap_set_bit (seen, INSN_UID (ml->move->insn));
518   for (ml = v->moves; ml; ml = ml_next)
519     {
520       ml_next = ml->next;
521       if (! bitmap_bit_p (seen, INSN_UID (ml->move->insn)))
522         {
523 	  ml->next = u->moves;
524 	  u->moves = ml;
525 	}
526     }
527   BITMAP_XFREE (seen);
528   v->moves = NULL;
529 }
530 
531 /* Add a web to the simplify worklist, from the freeze worklist.  */
532 
533 static void
add_worklist(struct web * web)534 add_worklist (struct web *web)
535 {
536   if (web->type != PRECOLORED && !web->moves
537       && web->num_conflicts < NUM_REGS (web))
538     {
539       remove_list (web->dlink, &WEBS(FREEZE));
540       put_web (web, SIMPLIFY);
541     }
542 }
543 
544 /* Precolored node coalescing heuristic.  */
545 
546 static int
ok(struct web * target,struct web * source)547 ok (struct web *target, struct web *source)
548 {
549   struct conflict_link *wl;
550   int i;
551   int color = source->color;
552   int size;
553 
554   /* Normally one would think, the next test wouldn't be needed.
555      We try to coalesce S and T, and S has already a color, and we checked
556      when processing the insns, that both have the same mode.  So naively
557      we could conclude, that of course that mode was valid for this color.
558      Hah.  But there is sparc.  Before reload there are copy insns
559      (e.g. the ones copying arguments to locals) which happily refer to
560      colors in invalid modes.  We can't coalesce those things.  */
561   if (! HARD_REGNO_MODE_OK (source->color, GET_MODE (target->orig_x)))
562     return 0;
563 
564   /* Sanity for funny modes.  */
565   size = HARD_REGNO_NREGS (color, GET_MODE (target->orig_x));
566   if (!size)
567     return 0;
568 
569   /* We can't coalesce target with a precolored register which isn't in
570      usable_regs.  */
571   for (i = size; i--;)
572     if (TEST_HARD_REG_BIT (never_use_colors, color + i)
573 	|| !TEST_HARD_REG_BIT (target->usable_regs, color + i)
574 	/* Before usually calling ok() at all, we already test, if the
575 	   candidates conflict in sup_igraph.  But when wide webs are
576 	   coalesced to hardregs, we only test the hardweb coalesced into.
577 	   This is only the begin color.  When actually coalescing both,
578 	   it will also take the following size colors, i.e. their webs.
579 	   We nowhere checked if the candidate possibly conflicts with
580 	   one of _those_, which is possible with partial conflicts,
581 	   so we simply do it here (this does one bit-test more than
582 	   necessary, the first color).  Note, that if X is precolored
583 	   bit [X*num_webs + Y] can't be set (see add_conflict_edge()).  */
584 	|| TEST_BIT (sup_igraph,
585 		     target->id * num_webs + hardreg2web[color + i]->id))
586       return 0;
587 
588   for (wl = target->conflict_list; wl; wl = wl->next)
589     {
590       struct web *pweb = wl->t;
591       if (pweb->type == SELECT || pweb->type == COALESCED)
592 	continue;
593 
594       /* Coalescing target (T) and source (S) is o.k, if for
595 	 all conflicts C of T it is true, that:
596 	  1) C will be colored, or
597 	  2) C is a hardreg (precolored), or
598 	  3) C already conflicts with S too, or
599 	  4) a web which contains C conflicts already with S.
600 	 XXX: we handle here only the special case of 4), that C is
601 	 a subreg, and the containing thing is the reg itself, i.e.
602 	 we dont handle the situation, were T conflicts with
603 	 (subreg:SI x 1), and S conflicts with (subreg:DI x 0), which
604 	 would be allowed also, as the S-conflict overlaps
605 	 the T-conflict.
606          So, we first test the whole web for any of these conditions, and
607          continue with the next C, if 1, 2 or 3 is true.  */
608       if (pweb->num_conflicts < NUM_REGS (pweb)
609 	  || pweb->type == PRECOLORED
610 	  || TEST_BIT (igraph, igraph_index (source->id, pweb->id)) )
611 	continue;
612 
613       /* This is reached, if not one of 1, 2 or 3 was true.  In the case C has
614          no subwebs, 4 can't be true either, so we can't coalesce S and T.  */
615       if (wl->sub == NULL)
616         return 0;
617       else
618 	{
619 	  /* The main webs do _not_ conflict, only some parts of both.  This
620 	     means, that 4 is possibly true, so we need to check this too.
621 	     For this we go through all sub conflicts between T and C, and see if
622 	     the target part of C already conflicts with S.  When this is not
623 	     the case we disallow coalescing.  */
624 	  struct sub_conflict *sl;
625 	  for (sl = wl->sub; sl; sl = sl->next)
626 	    {
627               if (!TEST_BIT (igraph, igraph_index (source->id, sl->t->id)))
628 	        return 0;
629 	    }
630         }
631     }
632   return 1;
633 }
634 
635 /* Non-precolored node coalescing heuristic.  */
636 
637 static int
conservative(struct web * target,struct web * source)638 conservative (struct web *target, struct web *source)
639 {
640   unsigned int k;
641   unsigned int loop;
642   regset seen;
643   struct conflict_link *wl;
644   unsigned int num_regs = NUM_REGS (target); /* XXX */
645 
646   /* k counts the resulting conflict weight, if target and source
647      would be merged, and all low-degree neighbors would be
648      removed.  */
649   k = 0 * MAX (target->add_hardregs, source->add_hardregs);
650   seen = BITMAP_XMALLOC ();
651   for (loop = 0; loop < 2; loop++)
652     for (wl = ((loop == 0) ? target : source)->conflict_list;
653 	 wl; wl = wl->next)
654       {
655 	struct web *pweb = wl->t;
656 	if (pweb->type != SELECT && pweb->type != COALESCED
657 	    && pweb->num_conflicts >= NUM_REGS (pweb)
658 	    && ! REGNO_REG_SET_P (seen, pweb->id))
659 	  {
660 	    SET_REGNO_REG_SET (seen, pweb->id);
661 	    k += 1 + pweb->add_hardregs;
662 	  }
663       }
664   BITMAP_XFREE (seen);
665 
666   if (k >= num_regs)
667     return 0;
668   return 1;
669 }
670 
671 /* If the web is coalesced, return it's alias.  Otherwise, return what
672    was passed in.  */
673 
674 struct web *
alias(struct web * web)675 alias (struct web *web)
676 {
677   while (web->type == COALESCED)
678     web = web->alias;
679   return web;
680 }
681 
682 /* Returns nonzero, if the TYPE belongs to one of those representing
683    SIMPLIFY types.  */
684 
685 static inline unsigned int
simplify_p(enum node_type type)686 simplify_p (enum node_type type)
687 {
688   return type == SIMPLIFY || type == SIMPLIFY_SPILL || type == SIMPLIFY_FAT;
689 }
690 
691 /* Actually combine two webs, that can be coalesced.  */
692 
693 static void
combine(struct web * u,struct web * v)694 combine (struct web *u, struct web *v)
695 {
696   int i;
697   struct conflict_link *wl;
698   if (u == v || v->type == COALESCED)
699     abort ();
700   if ((u->regno >= max_normal_pseudo) != (v->regno >= max_normal_pseudo))
701     abort ();
702   remove_web_from_list (v);
703   put_web (v, COALESCED);
704   v->alias = u;
705   u->is_coalesced = 1;
706   v->is_coalesced = 1;
707   u->num_aliased += 1 + v->num_aliased;
708   if (flag_ra_merge_spill_costs && u->type != PRECOLORED)
709     u->spill_cost += v->spill_cost;
710     /*u->spill_cost = MAX (u->spill_cost, v->spill_cost);*/
711   merge_moves (u, v);
712   /* combine add_hardregs's of U and V.  */
713 
714   for (wl = v->conflict_list; wl; wl = wl->next)
715     {
716       struct web *pweb = wl->t;
717       /* We don't strictly need to move conflicts between webs which are
718 	 already coalesced or selected, if we do iterated coalescing, or
719 	 better if we need not to be able to break aliases again.
720 	 I.e. normally we would use the condition
721 	 (pweb->type != SELECT && pweb->type != COALESCED).
722 	 But for now we simply merge all conflicts.  It doesn't take that
723          much time.  */
724       if (1)
725 	{
726 	  struct web *web = u;
727 	  int nregs = 1 + v->add_hardregs;
728 	  if (u->type == PRECOLORED)
729 	    nregs = HARD_REGNO_NREGS (u->color, GET_MODE (v->orig_x));
730 
731 	  /* For precolored U's we need to make conflicts between V's
732 	     neighbors and as many hardregs from U as V needed if it gets
733 	     color U.  For now we approximate this by V->add_hardregs, which
734 	     could be too much in multi-length classes.  We should really
735 	     count how many hardregs are needed for V with color U.  When U
736 	     isn't precolored this loop breaks out after one iteration.  */
737 	  for (i = 0; i < nregs; i++)
738 	    {
739 	      if (u->type == PRECOLORED)
740 		web = hardreg2web[i + u->color];
741 	      if (wl->sub == NULL)
742 		record_conflict (web, pweb);
743 	      else
744 		{
745 		  struct sub_conflict *sl;
746 		  /* So, between V and PWEB there are sub_conflicts.  We
747 		     need to relocate those conflicts to be between WEB (==
748 		     U when it wasn't precolored) and PWEB.  In the case
749 		     only a part of V conflicted with (part of) PWEB we
750 		     nevertheless make the new conflict between the whole U
751 		     and the (part of) PWEB.  Later we might try to find in
752 		     U the correct subpart corresponding (by size and
753 		     offset) to the part of V (sl->s) which was the source
754 		     of the conflict.  */
755 		  for (sl = wl->sub; sl; sl = sl->next)
756 		    {
757 		      /* Beware: sl->s is no subweb of web (== U) but of V.
758 			 We try to search a corresponding subpart of U.
759 			 If we found none we let it conflict with the whole U.
760 			 Note that find_subweb() only looks for mode and
761 			 subreg_byte of the REG rtx but not for the pseudo
762 			 reg number (otherwise it would be guaranteed to
763 			 _not_ find any subpart).  */
764 		      struct web *sweb = NULL;
765 		      if (SUBWEB_P (sl->s))
766 			sweb = find_subweb (web, sl->s->orig_x);
767 		      if (!sweb)
768 			sweb = web;
769 		      record_conflict (sweb, sl->t);
770 		    }
771 		}
772 	      if (u->type != PRECOLORED)
773 		break;
774 	    }
775 	  if (pweb->type != SELECT && pweb->type != COALESCED)
776 	    decrement_degree (pweb, 1 + v->add_hardregs);
777 	}
778     }
779 
780   /* Now merge the usable_regs together.  */
781   /* XXX That merging might normally make it necessary to
782      adjust add_hardregs, which also means to adjust neighbors.  This can
783      result in making some more webs trivially colorable, (or the opposite,
784      if this increases our add_hardregs).  Because we intersect the
785      usable_regs it should only be possible to decrease add_hardregs.  So a
786      conservative solution for now is to simply don't change it.  */
787   u->use_my_regs = 1;
788   AND_HARD_REG_SET (u->usable_regs, v->usable_regs);
789   u->regclass = reg_class_subunion[u->regclass][v->regclass];
790   /* Count number of possible hardregs.  This might make U a spillweb,
791      but that could also happen, if U and V together had too many
792      conflicts.  */
793   u->num_freedom = hard_regs_count (u->usable_regs);
794   u->num_freedom -= u->add_hardregs;
795   /* The next would mean an invalid coalesced move (both webs have no
796      possible hardreg in common), so abort.  */
797   if (!u->num_freedom)
798     abort();
799 
800   if (u->num_conflicts >= NUM_REGS (u)
801       && (u->type == FREEZE || simplify_p (u->type)))
802     {
803       remove_web_from_list (u);
804       put_web (u, SPILL);
805     }
806 
807   /* We want the most relaxed combination of spill_temp state.
808      I.e. if any was no spilltemp or a spilltemp2, the result is so too,
809      otherwise if any is short, the result is too.  It remains, when both
810      are normal spilltemps.  */
811   if (v->spill_temp == 0)
812     u->spill_temp = 0;
813   else if (v->spill_temp == 2 && u->spill_temp != 0)
814     u->spill_temp = 2;
815   else if (v->spill_temp == 3 && u->spill_temp == 1)
816     u->spill_temp = 3;
817 }
818 
819 /* Attempt to coalesce the first thing on the move worklist.
820    This is used only for iterated coalescing.  */
821 
822 static void
coalesce(void)823 coalesce (void)
824 {
825   struct dlist *d = pop_list (&mv_worklist);
826   struct move *m = DLIST_MOVE (d);
827   struct web *source = alias (m->source_web);
828   struct web *target = alias (m->target_web);
829 
830   if (target->type == PRECOLORED)
831     {
832       struct web *h = source;
833       source = target;
834       target = h;
835     }
836   if (source == target)
837     {
838       remove_move (source, m);
839       put_move (m, MV_COALESCED);
840       add_worklist (source);
841     }
842   else if (target->type == PRECOLORED
843 	   || TEST_BIT (sup_igraph, source->id * num_webs + target->id)
844 	   || TEST_BIT (sup_igraph, target->id * num_webs + source->id))
845     {
846       remove_move (source, m);
847       remove_move (target, m);
848       put_move (m, CONSTRAINED);
849       add_worklist (source);
850       add_worklist (target);
851     }
852   else if ((source->type == PRECOLORED && ok (target, source))
853 	   || (source->type != PRECOLORED
854 	       && conservative (target, source)))
855     {
856       remove_move (source, m);
857       remove_move (target, m);
858       put_move (m, MV_COALESCED);
859       combine (source, target);
860       add_worklist (source);
861     }
862   else
863     put_move (m, ACTIVE);
864 }
865 
866 /* Freeze the moves associated with the web.  Used for iterated coalescing.  */
867 
868 static void
freeze_moves(struct web * web)869 freeze_moves (struct web *web)
870 {
871   struct move_list *ml, *ml_next;
872   for (ml = web->moves; ml; ml = ml_next)
873     {
874       struct move *m = ml->move;
875       struct web *src, *dest;
876       ml_next = ml->next;
877       if (m->type == ACTIVE)
878 	remove_list (m->dlink, &mv_active);
879       else
880 	remove_list (m->dlink, &mv_worklist);
881       put_move (m, FROZEN);
882       remove_move (web, m);
883       src = alias (m->source_web);
884       dest = alias (m->target_web);
885       src = (src == web) ? dest : src;
886       remove_move (src, m);
887       /* XXX GA use the original v, instead of alias(v) */
888       if (!src->moves && src->num_conflicts < NUM_REGS (src))
889 	{
890 	  remove_list (src->dlink, &WEBS(FREEZE));
891 	  put_web (src, SIMPLIFY);
892 	}
893     }
894 }
895 
896 /* Freeze the first thing on the freeze worklist (only for iterated
897    coalescing).  */
898 
899 static void
freeze(void)900 freeze (void)
901 {
902   struct dlist *d = pop_list (&WEBS(FREEZE));
903   put_web (DLIST_WEB (d), SIMPLIFY);
904   freeze_moves (DLIST_WEB (d));
905 }
906 
907 /* The current spill heuristic.  Returns a number for a WEB.
908    Webs with higher numbers are selected later.  */
909 
910 static unsigned HOST_WIDE_INT (*spill_heuristic) (struct web *);
911 
912 static unsigned HOST_WIDE_INT default_spill_heuristic (struct web *);
913 
914 /* Our default heuristic is similar to spill_cost / num_conflicts.
915    Just scaled for integer arithmetic, and it favors coalesced webs,
916    and webs which span more insns with deaths.  */
917 
918 static unsigned HOST_WIDE_INT
default_spill_heuristic(struct web * web)919 default_spill_heuristic (struct web *web)
920 {
921   unsigned HOST_WIDE_INT ret;
922   unsigned int divisor = 1;
923   /* Make coalesce targets cheaper to spill, because they will be broken
924      up again into smaller parts.  */
925   if (flag_ra_break_aliases)
926     divisor += web->num_aliased;
927   divisor += web->num_conflicts;
928   ret = ((web->spill_cost << 8) + divisor - 1) / divisor;
929   /* It is better to spill webs that span more insns (deaths in our
930      case) than other webs with the otherwise same spill_cost.  So make
931      them a little bit cheaper.  Remember that spill_cost is unsigned.  */
932   if (web->span_deaths < ret)
933     ret -= web->span_deaths;
934   return ret;
935 }
936 
937 /* Select the cheapest spill to be potentially spilled (we don't
938    *actually* spill until we need to).  */
939 
940 static void
select_spill(void)941 select_spill (void)
942 {
943   unsigned HOST_WIDE_INT best = (unsigned HOST_WIDE_INT) -1;
944   struct dlist *bestd = NULL;
945   unsigned HOST_WIDE_INT best2 = (unsigned HOST_WIDE_INT) -1;
946   struct dlist *bestd2 = NULL;
947   struct dlist *d;
948   for (d = WEBS(SPILL); d; d = d->next)
949     {
950       struct web *w = DLIST_WEB (d);
951       unsigned HOST_WIDE_INT cost = spill_heuristic (w);
952       if ((!w->spill_temp) && cost < best)
953 	{
954 	  best = cost;
955 	  bestd = d;
956 	}
957       /* Specially marked spill temps can be spilled.  Also coalesce
958 	 targets can.  Eventually they will be broken up later in the
959 	 colorizing process, so if we have nothing better take that.  */
960       else if ((w->spill_temp == 2 || w->is_coalesced) && cost < best2)
961 	{
962 	  best2 = cost;
963 	  bestd2 = d;
964 	}
965     }
966   if (!bestd)
967     {
968       bestd = bestd2;
969       best = best2;
970     }
971   if (!bestd)
972     abort ();
973 
974   /* Note the potential spill.  */
975   DLIST_WEB (bestd)->was_spilled = 1;
976   remove_list (bestd, &WEBS(SPILL));
977   put_web (DLIST_WEB (bestd), SIMPLIFY);
978   freeze_moves (DLIST_WEB (bestd));
979   ra_debug_msg (DUMP_PROCESS, " potential spill web %3d, conflicts = %d\n",
980 	     DLIST_WEB (bestd)->id, DLIST_WEB (bestd)->num_conflicts);
981 }
982 
983 /* Given a set of forbidden colors to begin at, and a set of still
984    free colors, and MODE, returns nonzero of color C is still usable.  */
985 
986 static int
color_usable_p(int c,HARD_REG_SET dont_begin_colors,HARD_REG_SET free_colors,enum machine_mode mode)987 color_usable_p (int c, HARD_REG_SET dont_begin_colors,
988 		HARD_REG_SET free_colors, enum machine_mode  mode)
989 {
990   if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
991       && TEST_HARD_REG_BIT (free_colors, c)
992       && HARD_REGNO_MODE_OK (c, mode))
993     {
994       int i, size;
995       size = HARD_REGNO_NREGS (c, mode);
996       for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
997       if (i == size)
998 	return 1;
999     }
1000   return 0;
1001 }
1002 
1003 /* I don't want to clutter up the actual code with ifdef's.  */
1004 #ifdef REG_ALLOC_ORDER
1005 #define INV_REG_ALLOC_ORDER(c) inv_reg_alloc_order[c]
1006 #else
1007 #define INV_REG_ALLOC_ORDER(c) c
1008 #endif
1009 
1010 /* Searches in FREE_COLORS for a block of hardregs of the right length
1011    for MODE, which doesn't begin at a hardreg mentioned in DONT_BEGIN_COLORS.
1012    If it needs more than one hardreg it prefers blocks beginning
1013    at an even hardreg, and only gives an odd begin reg if no other
1014    block could be found.  */
1015 
1016 int
get_free_reg(HARD_REG_SET dont_begin_colors,HARD_REG_SET free_colors,enum machine_mode mode)1017 get_free_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET free_colors,
1018 	      enum machine_mode mode)
1019 {
1020   int c;
1021   int last_resort_reg = -1;
1022   int pref_reg = -1;
1023   int pref_reg_order = INT_MAX;
1024   int last_resort_reg_order = INT_MAX;
1025 
1026   for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
1027     if (!TEST_HARD_REG_BIT (dont_begin_colors, c)
1028 	&& TEST_HARD_REG_BIT (free_colors, c)
1029 	&& HARD_REGNO_MODE_OK (c, mode))
1030       {
1031 	int i, size;
1032 	size = HARD_REGNO_NREGS (c, mode);
1033 	for (i = 1; i < size && TEST_HARD_REG_BIT (free_colors, c + i); i++);
1034 	if (i != size)
1035 	  {
1036 	    c += i;
1037 	    continue;
1038 	  }
1039 	if (i == size)
1040 	  {
1041 	    if (size < 2 || (c & 1) == 0)
1042 	      {
1043 		if (INV_REG_ALLOC_ORDER (c) < pref_reg_order)
1044 		  {
1045 		    pref_reg = c;
1046 		    pref_reg_order = INV_REG_ALLOC_ORDER (c);
1047 		  }
1048 	      }
1049 	    else if (INV_REG_ALLOC_ORDER (c) < last_resort_reg_order)
1050 	      {
1051 		last_resort_reg = c;
1052 		last_resort_reg_order = INV_REG_ALLOC_ORDER (c);
1053 	      }
1054 	  }
1055 	else
1056 	  c += i;
1057       }
1058   return pref_reg >= 0 ? pref_reg : last_resort_reg;
1059 }
1060 
1061 /* Similar to get_free_reg(), but first search in colors provided
1062    by BIAS _and_ PREFER_COLORS, then in BIAS alone, then in PREFER_COLORS
1063    alone, and only then for any free color.  If flag_ra_biased is zero
1064    only do the last two steps.  */
1065 
1066 static int
get_biased_reg(HARD_REG_SET dont_begin_colors,HARD_REG_SET bias,HARD_REG_SET prefer_colors,HARD_REG_SET free_colors,enum machine_mode mode)1067 get_biased_reg (HARD_REG_SET dont_begin_colors, HARD_REG_SET bias,
1068 		HARD_REG_SET prefer_colors, HARD_REG_SET free_colors,
1069 		enum machine_mode mode)
1070 {
1071   int c = -1;
1072   HARD_REG_SET s;
1073   if (flag_ra_biased)
1074     {
1075       COPY_HARD_REG_SET (s, dont_begin_colors);
1076       IOR_COMPL_HARD_REG_SET (s, bias);
1077       IOR_COMPL_HARD_REG_SET (s, prefer_colors);
1078       c = get_free_reg (s, free_colors, mode);
1079       if (c >= 0)
1080 	return c;
1081       COPY_HARD_REG_SET (s, dont_begin_colors);
1082       IOR_COMPL_HARD_REG_SET (s, bias);
1083       c = get_free_reg (s, free_colors, mode);
1084       if (c >= 0)
1085 	return c;
1086     }
1087   COPY_HARD_REG_SET (s, dont_begin_colors);
1088   IOR_COMPL_HARD_REG_SET (s, prefer_colors);
1089   c = get_free_reg (s, free_colors, mode);
1090   if (c >= 0)
1091       return c;
1092   c = get_free_reg (dont_begin_colors, free_colors, mode);
1093   return c;
1094 }
1095 
1096 /* Counts the number of non-overlapping bitblocks of length LEN
1097    in FREE_COLORS.  */
1098 
1099 static int
count_long_blocks(HARD_REG_SET free_colors,int len)1100 count_long_blocks (HARD_REG_SET free_colors, int len)
1101 {
1102   int i, j;
1103   int count = 0;
1104   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1105     {
1106       if (!TEST_HARD_REG_BIT (free_colors, i))
1107 	continue;
1108       for (j = 1; j < len; j++)
1109 	if (!TEST_HARD_REG_BIT (free_colors, i + j))
1110 	  break;
1111       /* Bits [i .. i+j-1] are free.  */
1112       if (j == len)
1113 	count++;
1114       i += j - 1;
1115     }
1116   return count;
1117 }
1118 
1119 /* Given a hardreg set S, return a string representing it.
1120    Either as 0/1 string, or as hex value depending on the implementation
1121    of hardreg sets.  Note that this string is statically allocated.  */
1122 
1123 static char *
hardregset_to_string(HARD_REG_SET s)1124 hardregset_to_string (HARD_REG_SET s)
1125 {
1126   static char string[/*FIRST_PSEUDO_REGISTER + 30*/1024];
1127 #if FIRST_PSEUDO_REGISTER <= HOST_BITS_PER_WIDE_INT
1128   sprintf (string, HOST_WIDE_INT_PRINT_HEX, s);
1129 #else
1130   char *c = string;
1131   int i,j;
1132   c += sprintf (c, "{ ");
1133   for (i = 0;i < HARD_REG_SET_LONGS; i++)
1134     {
1135       for (j = 0; j < HOST_BITS_PER_WIDE_INT; j++)
1136 	  c += sprintf (c, "%s", ( 1 << j) & s[i] ? "1" : "0");
1137       c += sprintf (c, "%s", i ? ", " : "");
1138     }
1139   c += sprintf (c, " }");
1140 #endif
1141   return string;
1142 }
1143 
1144 /* For WEB, look at its already colored neighbors, and calculate
1145    the set of hardregs which is not allowed as color for WEB.  Place
1146    that set int *RESULT.  Note that the set of forbidden begin colors
1147    is not the same as all colors taken up by neighbors.  E.g. suppose
1148    two DImode webs, but only the lo-part from one conflicts with the
1149    hipart from the other, and suppose the other gets colors 2 and 3
1150    (it needs two SImode hardregs).  Now the first can take also color
1151    1 or 2, although in those cases there's a partial overlap.  Only
1152    3 can't be used as begin color.  */
1153 
1154 static void
calculate_dont_begin(struct web * web,HARD_REG_SET * result)1155 calculate_dont_begin (struct web *web, HARD_REG_SET *result)
1156 {
1157   struct conflict_link *wl;
1158   HARD_REG_SET dont_begin;
1159   /* The bits set in dont_begin correspond to the hardregs, at which
1160      WEB may not begin.  This differs from the set of _all_ hardregs which
1161      are taken by WEB's conflicts in the presence of wide webs, where only
1162      some parts conflict with others.  */
1163   CLEAR_HARD_REG_SET (dont_begin);
1164   for (wl = web->conflict_list; wl; wl = wl->next)
1165     {
1166       struct web *w;
1167       struct web *ptarget = alias (wl->t);
1168       struct sub_conflict *sl = wl->sub;
1169       w = sl ? sl->t : wl->t;
1170       while (w)
1171 	{
1172 	  if (ptarget->type == COLORED || ptarget->type == PRECOLORED)
1173 	    {
1174 	      struct web *source = (sl) ? sl->s : web;
1175 	      unsigned int tsize = HARD_REGNO_NREGS (ptarget->color,
1176 						     GET_MODE (w->orig_x));
1177 	      /* ssize is only a first guess for the size.  */
1178 	      unsigned int ssize = HARD_REGNO_NREGS (ptarget->color, GET_MODE
1179 					             (source->orig_x));
1180 	      unsigned int tofs = 0;
1181 	      unsigned int sofs = 0;
1182 	      /* C1 and C2 can become negative, so unsigned
1183 		 would be wrong.  */
1184 	      int c1, c2;
1185 
1186 	      if (SUBWEB_P (w)
1187 		  && GET_MODE_SIZE (GET_MODE (w->orig_x)) >= UNITS_PER_WORD)
1188 		tofs = (SUBREG_BYTE (w->orig_x) / UNITS_PER_WORD);
1189 	      if (SUBWEB_P (source)
1190 		  && GET_MODE_SIZE (GET_MODE (source->orig_x))
1191 		     >= UNITS_PER_WORD)
1192 		sofs = (SUBREG_BYTE (source->orig_x) / UNITS_PER_WORD);
1193 	      c1 = ptarget->color + tofs - sofs - ssize + 1;
1194 	      c2 = ptarget->color + tofs + tsize - 1 - sofs;
1195 	      if (c2 >= 0)
1196 		{
1197 		  if (c1 < 0)
1198 		    c1 = 0;
1199 		  /* Because ssize was only guessed above, which influenced our
1200 		     begin color (c1), we need adjustment, if for that color
1201 		     another size would be needed.  This is done by moving
1202 		     c1 to a place, where the last of sources hardregs does not
1203 		     overlap the first of targets colors.  */
1204 		  while (c1 + sofs
1205 			 + HARD_REGNO_NREGS (c1, GET_MODE (source->orig_x)) - 1
1206 			 < ptarget->color + tofs)
1207 		    c1++;
1208 		  while (c1 > 0 && c1 + sofs
1209 			 + HARD_REGNO_NREGS (c1, GET_MODE (source->orig_x)) - 1
1210 			 > ptarget->color + tofs)
1211 		    c1--;
1212 		  for (; c1 <= c2; c1++)
1213 		    SET_HARD_REG_BIT (dont_begin, c1);
1214 		}
1215 	    }
1216 	  /* The next if() only gets true, if there was no wl->sub at all, in
1217 	     which case we are only making one go through this loop with W being
1218 	     a whole web.  */
1219 	  if (!sl)
1220 	    break;
1221 	  sl = sl->next;
1222 	  w = sl ? sl->t : NULL;
1223 	}
1224     }
1225   COPY_HARD_REG_SET (*result, dont_begin);
1226 }
1227 
1228 /* Try to assign a color to WEB.  If HARD if nonzero, we try many
1229    tricks to get it one color, including respilling already colored
1230    neighbors.
1231 
1232    We also trie very hard, to not constrain the uncolored non-spill
1233    neighbors, which need more hardregs than we.  Consider a situation, 2
1234    hardregs free for us (0 and 1), and one of our neighbors needs 2
1235    hardregs, and only conflicts with us.  There are 3 hardregs at all.  Now
1236    a simple minded method might choose 1 as color for us.  Then our neighbor
1237    has two free colors (0 and 2) as it should, but they are not consecutive,
1238    so coloring it later would fail.  This leads to nasty problems on
1239    register starved machines, so we try to avoid this.  */
1240 
1241 static void
colorize_one_web(struct web * web,int hard)1242 colorize_one_web (struct web *web, int hard)
1243 {
1244   struct conflict_link *wl;
1245   HARD_REG_SET colors, dont_begin;
1246   int c = -1;
1247   int bestc = -1;
1248   int neighbor_needs= 0;
1249   struct web *fats_parent = NULL;
1250   int num_fat = 0;
1251   int long_blocks = 0;
1252   int best_long_blocks = -1;
1253   HARD_REG_SET fat_colors;
1254   HARD_REG_SET bias;
1255 
1256   CLEAR_HARD_REG_SET (fat_colors);
1257 
1258   if (web->regno >= max_normal_pseudo)
1259     hard = 0;
1260 
1261   /* First we want to know the colors at which we can't begin.  */
1262   calculate_dont_begin (web, &dont_begin);
1263   CLEAR_HARD_REG_SET (bias);
1264 
1265   /* Now setup the set of colors used by our neighbors neighbors,
1266      and search the biggest noncolored neighbor.  */
1267   neighbor_needs = web->add_hardregs + 1;
1268   for (wl = web->conflict_list; wl; wl = wl->next)
1269     {
1270       struct web *w;
1271       struct web *ptarget = alias (wl->t);
1272       struct sub_conflict *sl = wl->sub;
1273       IOR_HARD_REG_SET (bias, ptarget->bias_colors);
1274       w = sl ? sl->t : wl->t;
1275       if (ptarget->type != COLORED && ptarget->type != PRECOLORED
1276 	  && !ptarget->was_spilled)
1277         while (w)
1278 	  {
1279 	    if (find_web_for_subweb (w)->type != COALESCED
1280 		&& w->add_hardregs >= neighbor_needs)
1281 	      {
1282 		neighbor_needs = w->add_hardregs;
1283 		fats_parent = ptarget;
1284 		num_fat++;
1285 	      }
1286 	    if (!sl)
1287 	      break;
1288 	    sl = sl->next;
1289 	    w = sl ? sl->t : NULL;
1290 	  }
1291     }
1292 
1293   ra_debug_msg (DUMP_COLORIZE, "colorize web %d [don't begin at %s]", web->id,
1294              hardregset_to_string (dont_begin));
1295 
1296   /* If there are some fat neighbors, remember their usable regs,
1297      and how many blocks are free in it for that neighbor.  */
1298   if (num_fat)
1299     {
1300       COPY_HARD_REG_SET (fat_colors, fats_parent->usable_regs);
1301       long_blocks = count_long_blocks (fat_colors, neighbor_needs + 1);
1302     }
1303 
1304   /* We break out, if we found a color which doesn't constrain
1305      neighbors, or if we can't find any colors.  */
1306   while (1)
1307     {
1308       HARD_REG_SET call_clobbered;
1309 
1310       /* Here we choose a hard-reg for the current web.  For non spill
1311          temporaries we first search in the hardregs for it's preferred
1312 	 class, then, if we found nothing appropriate, in those of the
1313 	 alternate class.  For spill temporaries we only search in
1314 	 usable_regs of this web (which is probably larger than that of
1315 	 the preferred or alternate class).  All searches first try to
1316 	 find a non-call-clobbered hard-reg.
1317          XXX this should be more finegraned... First look into preferred
1318          non-callclobbered hardregs, then _if_ the web crosses calls, in
1319          alternate non-cc hardregs, and only _then_ also in preferred cc
1320          hardregs (and alternate ones).  Currently we don't track the number
1321          of calls crossed for webs.  We should.  */
1322       if (web->use_my_regs)
1323 	{
1324 	  COPY_HARD_REG_SET (colors, web->usable_regs);
1325 	  AND_HARD_REG_SET (colors,
1326 			    usable_regs[reg_preferred_class (web->regno)]);
1327 	}
1328       else
1329 	COPY_HARD_REG_SET (colors,
1330 			   usable_regs[reg_preferred_class (web->regno)]);
1331 #ifdef CANNOT_CHANGE_MODE_CLASS
1332       if (web->mode_changed)
1333         AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs);
1334 #endif
1335       COPY_HARD_REG_SET (call_clobbered, colors);
1336       AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
1337 
1338       /* If this web got a color in the last pass, try to give it the
1339 	 same color again.  This will to much better colorization
1340 	 down the line, as we spilled for a certain coloring last time.  */
1341       if (web->old_color)
1342 	{
1343 	  c = web->old_color - 1;
1344 	  if (!color_usable_p (c, dont_begin, colors,
1345 			       PSEUDO_REGNO_MODE (web->regno)))
1346 	    c = -1;
1347 	}
1348       else
1349 	c = -1;
1350       if (c < 0)
1351 	c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1352 			    call_clobbered, PSEUDO_REGNO_MODE (web->regno));
1353       if (c < 0)
1354 	c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1355 			  colors, PSEUDO_REGNO_MODE (web->regno));
1356 
1357       if (c < 0)
1358 	{
1359 	  if (web->use_my_regs)
1360 	    IOR_HARD_REG_SET (colors, web->usable_regs);
1361 	  else
1362 	    IOR_HARD_REG_SET (colors, usable_regs
1363 			      [reg_alternate_class (web->regno)]);
1364 #ifdef CANNOT_CHANGE_MODE_CLASS
1365 	  if (web->mode_changed)
1366 	    AND_COMPL_HARD_REG_SET (colors, invalid_mode_change_regs);
1367 #endif
1368 	  COPY_HARD_REG_SET (call_clobbered, colors);
1369 	  AND_HARD_REG_SET (call_clobbered, call_used_reg_set);
1370 
1371 	  c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1372 			    call_clobbered, PSEUDO_REGNO_MODE (web->regno));
1373 	  if (c < 0)
1374 	    c = get_biased_reg (dont_begin, bias, web->prefer_colors,
1375 			      colors, PSEUDO_REGNO_MODE (web->regno));
1376 	}
1377       if (c < 0)
1378 	break;
1379       if (bestc < 0)
1380         bestc = c;
1381       /* If one of the yet uncolored neighbors, which is not a potential
1382 	 spill needs a block of hardregs be sure, not to destroy such a block
1383 	 by coloring one reg in the middle.  */
1384       if (num_fat)
1385 	{
1386 	  int i;
1387 	  int new_long;
1388 	  HARD_REG_SET colors1;
1389 	  COPY_HARD_REG_SET (colors1, fat_colors);
1390 	  for (i = 0; i < 1 + web->add_hardregs; i++)
1391 	    CLEAR_HARD_REG_BIT (colors1, c + i);
1392 	  new_long = count_long_blocks (colors1, neighbor_needs + 1);
1393 	  /* If we changed the number of long blocks, and it's now smaller
1394 	     than needed, we try to avoid this color.  */
1395 	  if (long_blocks != new_long && new_long < num_fat)
1396 	    {
1397 	      if (new_long > best_long_blocks)
1398 		{
1399 		  best_long_blocks = new_long;
1400 		  bestc = c;
1401 		}
1402 	      SET_HARD_REG_BIT (dont_begin, c);
1403 	      ra_debug_msg (DUMP_COLORIZE, " avoid %d", c);
1404 	    }
1405 	  else
1406 	    /* We found a color which doesn't destroy a block.  */
1407 	    break;
1408 	}
1409       /* If we havee no fat neighbors, the current color won't become
1410 	 "better", so we've found it.  */
1411       else
1412 	break;
1413     }
1414   ra_debug_msg (DUMP_COLORIZE, " --> got %d", c < 0 ? bestc : c);
1415   if (bestc >= 0 && c < 0 && !web->was_spilled)
1416     {
1417       /* This is a non-potential-spill web, which got a color, which did
1418 	 destroy a hardreg block for one of it's neighbors.  We color
1419 	 this web anyway and hope for the best for the neighbor, if we are
1420 	 a spill temp.  */
1421       if (1 || web->spill_temp)
1422         c = bestc;
1423       ra_debug_msg (DUMP_COLORIZE, " [constrains neighbors]");
1424     }
1425   ra_debug_msg (DUMP_COLORIZE, "\n");
1426 
1427   if (c < 0)
1428     {
1429       /* Guard against a simplified node being spilled.  */
1430       /* Don't abort.  This can happen, when e.g. enough registers
1431 	 are available in colors, but they are not consecutive.  This is a
1432 	 very serious issue if this web is a short live one, because
1433 	 even if we spill this one here, the situation won't become better
1434 	 in the next iteration.  It probably will have the same conflicts,
1435 	 those will have the same colors, and we would come here again, for
1436 	 all parts, in which this one gets split by the spill.  This
1437 	 can result in endless iteration spilling the same register again and
1438 	 again.  That's why we try to find a neighbor, which spans more
1439 	 instructions that ourself, and got a color, and try to spill _that_.
1440 
1441 	 if (DLIST_WEB (d)->was_spilled < 0)
1442 	 abort (); */
1443       if (hard && (!web->was_spilled || web->spill_temp))
1444 	{
1445 	  unsigned int loop;
1446 	  struct web *try = NULL;
1447 	  struct web *candidates[8];
1448 
1449 	  ra_debug_msg (DUMP_COLORIZE, "  *** %d spilled, although %s ***\n",
1450 		     web->id, web->spill_temp ? "spilltemp" : "non-spill");
1451 	  /* We make multiple passes over our conflicts, first trying to
1452 	     spill those webs, which only got a color by chance, but
1453 	     were potential spill ones, and if that isn't enough, in a second
1454 	     pass also to spill normal colored webs.  If we still didn't find
1455 	     a candidate, but we are a spill-temp, we make a third pass
1456 	     and include also webs, which were targets for coalescing, and
1457 	     spill those.  */
1458 	  memset (candidates, 0, sizeof candidates);
1459 #define set_cand(i, w) \
1460 	  do { \
1461 	      if (!candidates[(i)] \
1462 		  || (candidates[(i)]->spill_cost < (w)->spill_cost)) \
1463 		candidates[(i)] = (w); \
1464 	  } while (0)
1465 	  for (wl = web->conflict_list; wl; wl = wl->next)
1466 	    {
1467 	      struct web *w = wl->t;
1468 	      struct web *aw = alias (w);
1469 	      /* If we are a spill-temp, we also look at webs coalesced
1470 		 to precolored ones.  Otherwise we only look at webs which
1471 		 themselves were colored, or coalesced to one.  */
1472 	      if (aw->type == PRECOLORED && w != aw && web->spill_temp
1473 		  && flag_ra_optimistic_coalescing)
1474 		{
1475 		  if (!w->spill_temp)
1476 		    set_cand (4, w);
1477 		  else if (web->spill_temp == 2
1478 			   && w->spill_temp == 2
1479 			   && w->spill_cost < web->spill_cost)
1480 		    set_cand (5, w);
1481 		  else if (web->spill_temp != 2
1482 			   && (w->spill_temp == 2
1483 			       || w->spill_cost < web->spill_cost))
1484 		    set_cand (6, w);
1485 		  continue;
1486 		}
1487 	      if (aw->type != COLORED)
1488 		continue;
1489 	      if (w->type == COLORED && !w->spill_temp && !w->is_coalesced
1490 		  && w->was_spilled)
1491 		{
1492 		  if (w->spill_cost < web->spill_cost)
1493 		    set_cand (0, w);
1494 		  else if (web->spill_temp)
1495 		    set_cand (1, w);
1496 		}
1497 	      if (w->type == COLORED && !w->spill_temp && !w->is_coalesced
1498 		  && !w->was_spilled)
1499 		{
1500 		  if (w->spill_cost < web->spill_cost)
1501 		    set_cand (2, w);
1502 		  else if (web->spill_temp && web->spill_temp != 2)
1503 		    set_cand (3, w);
1504 		}
1505 	      if (web->spill_temp)
1506 		{
1507 		  if (w->type == COLORED && w->spill_temp == 2
1508 		      && !w->is_coalesced
1509 		      && (w->spill_cost < web->spill_cost
1510 			  || web->spill_temp != 2))
1511 		    set_cand (4, w);
1512 		  if (!aw->spill_temp)
1513 		    set_cand (5, aw);
1514 		  if (aw->spill_temp == 2
1515 		      && (aw->spill_cost < web->spill_cost
1516 			  || web->spill_temp != 2))
1517 		    set_cand (6, aw);
1518 		  /* For boehm-gc/misc.c.  If we are a difficult spilltemp,
1519 		     also coalesced neighbors are a chance, _even_ if they
1520 		     too are spilltemps.  At least their coalescing can be
1521 		     broken up, which may be reset usable_regs, and makes
1522 		     it easier colorable.  */
1523 		  if (web->spill_temp != 2 && aw->is_coalesced
1524 		      && flag_ra_optimistic_coalescing)
1525 		    set_cand (7, aw);
1526 		}
1527 	    }
1528 	  for (loop = 0; try == NULL && loop < 8; loop++)
1529 	    if (candidates[loop])
1530 	      try = candidates[loop];
1531 #undef set_cand
1532 	  if (try)
1533 	    {
1534 	      int old_c = try->color;
1535 	      if (try->type == COALESCED)
1536 		{
1537 		  if (alias (try)->type != PRECOLORED)
1538 		    abort ();
1539 		  ra_debug_msg (DUMP_COLORIZE, "  breaking alias %d -> %d\n",
1540 			     try->id, alias (try)->id);
1541 		  break_precolored_alias (try);
1542 		  colorize_one_web (web, hard);
1543 		}
1544 	      else
1545 		{
1546 		  remove_list (try->dlink, &WEBS(COLORED));
1547 		  put_web (try, SPILLED);
1548 		  /* Now try to colorize us again.  Can recursively make other
1549 		     webs also spill, until there are no more unspilled
1550 		     neighbors.  */
1551 		  ra_debug_msg (DUMP_COLORIZE, "  trying to spill %d\n", try->id);
1552 		  colorize_one_web (web, hard);
1553 		  if (web->type != COLORED)
1554 		    {
1555 		      /* We tried recursively to spill all already colored
1556 			 neighbors, but we are still uncolorable.  So it made
1557 			 no sense to spill those neighbors.  Recolor them.  */
1558 		      remove_list (try->dlink, &WEBS(SPILLED));
1559 		      put_web (try, COLORED);
1560 		      try->color = old_c;
1561 		      ra_debug_msg (DUMP_COLORIZE,
1562 				    "  spilling %d was useless\n", try->id);
1563 		    }
1564 		  else
1565 		    {
1566 		      ra_debug_msg (DUMP_COLORIZE,
1567 				    "  to spill %d was a good idea\n",
1568 				    try->id);
1569 		      remove_list (try->dlink, &WEBS(SPILLED));
1570 		      if (try->was_spilled)
1571 			colorize_one_web (try, 0);
1572 		      else
1573 			colorize_one_web (try, hard - 1);
1574 		    }
1575 		}
1576 	    }
1577 	  else
1578 	    /* No more chances to get a color, so give up hope and
1579 	       spill us.  */
1580 	    put_web (web, SPILLED);
1581 	}
1582       else
1583         put_web (web, SPILLED);
1584     }
1585   else
1586     {
1587       put_web (web, COLORED);
1588       web->color = c;
1589       if (flag_ra_biased)
1590 	{
1591 	  int nregs = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
1592 	  for (wl = web->conflict_list; wl; wl = wl->next)
1593 	    {
1594 	      struct web *ptarget = alias (wl->t);
1595 	      int i;
1596 	      for (i = 0; i < nregs; i++)
1597 		SET_HARD_REG_BIT (ptarget->bias_colors, c + i);
1598 	    }
1599 	}
1600     }
1601   if (web->regno >= max_normal_pseudo && web->type == SPILLED)
1602     {
1603       web->color = an_unusable_color;
1604       remove_list (web->dlink, &WEBS(SPILLED));
1605       put_web (web, COLORED);
1606     }
1607   if (web->type == SPILLED && flag_ra_optimistic_coalescing
1608       && web->is_coalesced)
1609     {
1610       ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id);
1611       restore_conflicts_from_coalesce (web);
1612       break_aliases_to_web (web);
1613       insert_coalesced_conflicts ();
1614       ra_debug_msg (DUMP_COLORIZE, "\n");
1615       remove_list (web->dlink, &WEBS(SPILLED));
1616       put_web (web, SELECT);
1617       web->color = -1;
1618     }
1619 }
1620 
1621 /* Assign the colors to all nodes on the select stack.  And update the
1622    colors of coalesced webs.  */
1623 
1624 static void
assign_colors(void)1625 assign_colors (void)
1626 {
1627   struct dlist *d;
1628 
1629   while (WEBS(SELECT))
1630     {
1631       d = pop_list (&WEBS(SELECT));
1632       colorize_one_web (DLIST_WEB (d), 1);
1633     }
1634 
1635   for (d = WEBS(COALESCED); d; d = d->next)
1636     {
1637       struct web *a = alias (DLIST_WEB (d));
1638       DLIST_WEB (d)->color = a->color;
1639     }
1640 }
1641 
1642 /* WEB is a spilled web.  Look if we can improve the cost of the graph,
1643    by coloring WEB, even if we then need to spill some of it's neighbors.
1644    For this we calculate the cost for each color C, that results when we
1645    _would_ give WEB color C (i.e. the cost of the then spilled neighbors).
1646    If the lowest cost among them is smaller than the spillcost of WEB, we
1647    do that recoloring, and instead spill the neighbors.
1648 
1649    This can sometime help, when due to irregularities in register file,
1650    and due to multi word pseudos, the colorization is suboptimal.  But
1651    be aware, that currently this pass is quite slow.  */
1652 
1653 static void
try_recolor_web(struct web * web)1654 try_recolor_web (struct web *web)
1655 {
1656   struct conflict_link *wl;
1657   unsigned HOST_WIDE_INT *cost_neighbors;
1658   unsigned int *min_color;
1659   int newcol, c;
1660   HARD_REG_SET precolored_neighbors, spill_temps;
1661   HARD_REG_SET possible_begin, wide_seen;
1662   cost_neighbors = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (cost_neighbors[0]));
1663   /* For each hard-regs count the number of preceding hardregs, which
1664      would overlap this color, if used in WEB's mode.  */
1665   min_color = xcalloc (FIRST_PSEUDO_REGISTER, sizeof (int));
1666   CLEAR_HARD_REG_SET (possible_begin);
1667   for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
1668     {
1669       int i, nregs;
1670       if (!HARD_REGNO_MODE_OK (c, GET_MODE (web->orig_x)))
1671 	continue;
1672       nregs = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
1673       for (i = 0; i < nregs; i++)
1674 	if (!TEST_HARD_REG_BIT (web->usable_regs, c + i))
1675 	  break;
1676       if (i < nregs || nregs == 0)
1677 	continue;
1678       SET_HARD_REG_BIT (possible_begin, c);
1679       for (; nregs--;)
1680 	if (!min_color[c + nregs])
1681 	  min_color[c + nregs] = 1 + c;
1682     }
1683   CLEAR_HARD_REG_SET (precolored_neighbors);
1684   CLEAR_HARD_REG_SET (spill_temps);
1685   CLEAR_HARD_REG_SET (wide_seen);
1686   for (wl = web->conflict_list; wl; wl = wl->next)
1687     {
1688       HARD_REG_SET dont_begin;
1689       struct web *web2 = alias (wl->t);
1690       struct conflict_link *nn;
1691       int c1, c2;
1692       int wide_p = 0;
1693       if (wl->t->type == COALESCED || web2->type != COLORED)
1694 	{
1695 	  if (web2->type == PRECOLORED)
1696 	    {
1697 	      c1 = min_color[web2->color];
1698 	      c1 = (c1 == 0) ? web2->color : (c1 - 1);
1699 	      c2 = web2->color;
1700 	      for (; c1 <= c2; c1++)
1701 	        SET_HARD_REG_BIT (precolored_neighbors, c1);
1702 	    }
1703 	  continue;
1704 	}
1705       /* Mark colors for which some wide webs are involved.  For
1706 	 those the independent sets are not simply one-node graphs, so
1707 	 they can't be recolored independent from their neighborhood.  This
1708 	 means, that our cost calculation can be incorrect (assuming it
1709 	 can avoid spilling a web because it thinks some colors are available,
1710 	 although it's neighbors which itself need recoloring might take
1711 	 away exactly those colors).  */
1712       if (web2->add_hardregs)
1713 	wide_p = 1;
1714       for (nn = web2->conflict_list; nn && !wide_p; nn = nn->next)
1715 	if (alias (nn->t)->add_hardregs)
1716 	  wide_p = 1;
1717       calculate_dont_begin (web2, &dont_begin);
1718       c1 = min_color[web2->color];
1719       /* Note that min_color[] contains 1-based values (zero means
1720 	 undef).  */
1721       c1 = c1 == 0 ? web2->color : (c1 - 1);
1722       c2 = web2->color + HARD_REGNO_NREGS (web2->color, GET_MODE
1723 					   (web2->orig_x)) - 1;
1724       for (; c1 <= c2; c1++)
1725 	if (TEST_HARD_REG_BIT (possible_begin, c1))
1726 	  {
1727 	    int nregs;
1728 	    HARD_REG_SET colors;
1729 	    nregs = HARD_REGNO_NREGS (c1, GET_MODE (web->orig_x));
1730 	    COPY_HARD_REG_SET (colors, web2->usable_regs);
1731 	    for (; nregs--;)
1732 	      CLEAR_HARD_REG_BIT (colors, c1 + nregs);
1733 	    if (wide_p)
1734 	      SET_HARD_REG_BIT (wide_seen, c1);
1735 	    if (get_free_reg (dont_begin, colors,
1736 			      GET_MODE (web2->orig_x)) < 0)
1737 	      {
1738 		if (web2->spill_temp)
1739 		  SET_HARD_REG_BIT (spill_temps, c1);
1740 		else
1741 		  cost_neighbors[c1] += web2->spill_cost;
1742 	      }
1743 	  }
1744     }
1745   newcol = -1;
1746   for (c = 0; c < FIRST_PSEUDO_REGISTER; c++)
1747     if (TEST_HARD_REG_BIT (possible_begin, c)
1748 	&& !TEST_HARD_REG_BIT (precolored_neighbors, c)
1749 	&& !TEST_HARD_REG_BIT (spill_temps, c)
1750 	&& (newcol == -1
1751 	    || cost_neighbors[c] < cost_neighbors[newcol]))
1752       newcol = c;
1753   if (newcol >= 0 && cost_neighbors[newcol] < web->spill_cost)
1754     {
1755       int nregs = HARD_REGNO_NREGS (newcol, GET_MODE (web->orig_x));
1756       unsigned HOST_WIDE_INT cost = 0;
1757       int *old_colors;
1758       struct conflict_link *wl_next;
1759       ra_debug_msg (DUMP_COLORIZE, "try to set web %d to color %d\n", web->id,
1760 		 newcol);
1761       remove_list (web->dlink, &WEBS(SPILLED));
1762       put_web (web, COLORED);
1763       web->color = newcol;
1764       old_colors = xcalloc (num_webs, sizeof (int));
1765       for (wl = web->conflict_list; wl; wl = wl_next)
1766 	{
1767 	  struct web *web2 = alias (wl->t);
1768 	  /* If web2 is a coalesce-target, and will become spilled
1769 	     below in colorize_one_web(), and the current conflict wl
1770 	     between web and web2 was only the result of that coalescing
1771 	     this conflict will be deleted, making wl invalid.  So save
1772 	     the next conflict right now.  Note that if web2 has indeed
1773 	     such state, then wl->next can not be deleted in this
1774 	     iteration.  */
1775 	  wl_next = wl->next;
1776 	  if (web2->type == COLORED)
1777 	    {
1778 	      int nregs2 = HARD_REGNO_NREGS (web2->color, GET_MODE
1779 					     (web2->orig_x));
1780 	      if (web->color >= web2->color + nregs2
1781 		  || web2->color >= web->color + nregs)
1782 		continue;
1783 	      old_colors[web2->id] = web2->color + 1;
1784 	      web2->color = -1;
1785 	      remove_list (web2->dlink, &WEBS(COLORED));
1786 	      web2->type = SELECT;
1787 	      /* Allow webs to be spilled.  */
1788 	      if (web2->spill_temp == 0 || web2->spill_temp == 2)
1789 		web2->was_spilled = 1;
1790 	      colorize_one_web (web2, 1);
1791 	      if (web2->type == SPILLED)
1792 		cost += web2->spill_cost;
1793 	    }
1794 	}
1795       /* The actual cost may be smaller than the guessed one, because
1796 	 partial conflicts could result in some conflicting webs getting
1797 	 a color, where we assumed it must be spilled.  See the comment
1798          above what happens, when wide webs are involved, and why in that
1799          case there might actually be some webs spilled although thought to
1800          be colorable.  */
1801       if (cost > cost_neighbors[newcol]
1802 	  && nregs == 1 && !TEST_HARD_REG_BIT (wide_seen, newcol))
1803 	abort ();
1804       /* But if the new spill-cost is higher than our own, then really loose.
1805 	 Respill us and recolor neighbors as before.  */
1806       if (cost > web->spill_cost)
1807 	{
1808 	  ra_debug_msg (DUMP_COLORIZE,
1809 		     "reset coloring of web %d, too expensive\n", web->id);
1810 	  remove_list (web->dlink, &WEBS(COLORED));
1811 	  web->color = -1;
1812 	  put_web (web, SPILLED);
1813 	  for (wl = web->conflict_list; wl; wl = wl->next)
1814 	    {
1815 	      struct web *web2 = alias (wl->t);
1816 	      if (old_colors[web2->id])
1817 		{
1818 		  if (web2->type == SPILLED)
1819 		    {
1820 		      remove_list (web2->dlink, &WEBS(SPILLED));
1821 		      web2->color = old_colors[web2->id] - 1;
1822 		      put_web (web2, COLORED);
1823 		    }
1824 		  else if (web2->type == COLORED)
1825 		    web2->color = old_colors[web2->id] - 1;
1826 		  else if (web2->type == SELECT)
1827 		    /* This means, that WEB2 once was a part of a coalesced
1828 		       web, which got spilled in the above colorize_one_web()
1829 		       call, and whose parts then got split and put back
1830 		       onto the SELECT stack.  As the cause for that splitting
1831 		       (the coloring of WEB) was worthless, we should again
1832 		       coalesce the parts, as they were before.  For now we
1833 		       simply leave them SELECTed, for our caller to take
1834 		       care.  */
1835 		    ;
1836 		  else
1837 		    abort ();
1838 		}
1839 	    }
1840 	}
1841       free (old_colors);
1842     }
1843   free (min_color);
1844   free (cost_neighbors);
1845 }
1846 
1847 /* This ensures that all conflicts of coalesced webs are seen from
1848    the webs coalesced into.  combine() only adds the conflicts which
1849    at the time of combining were not already SELECTed or COALESCED
1850    to not destroy num_conflicts.  Here we add all remaining conflicts
1851    and thereby destroy num_conflicts.  This should be used when num_conflicts
1852    isn't used anymore, e.g. on a completely colored graph.  */
1853 
1854 static void
insert_coalesced_conflicts(void)1855 insert_coalesced_conflicts (void)
1856 {
1857   struct dlist *d;
1858   for (d = WEBS(COALESCED); 0 && d; d = d->next)
1859     {
1860       struct web *web = DLIST_WEB (d);
1861       struct web *aweb = alias (web);
1862       struct conflict_link *wl;
1863       for (wl = web->conflict_list; wl; wl = wl->next)
1864 	{
1865 	  struct web *tweb = aweb;
1866 	  int i;
1867 	  int nregs = 1 + web->add_hardregs;
1868 	  if (aweb->type == PRECOLORED)
1869 	    nregs = HARD_REGNO_NREGS (aweb->color, GET_MODE (web->orig_x));
1870 	  for (i = 0; i < nregs; i++)
1871 	    {
1872 	      if (aweb->type == PRECOLORED)
1873 		tweb = hardreg2web[i + aweb->color];
1874 	      /* There might be some conflict edges laying around
1875 		 where the usable_regs don't intersect.  This can happen
1876 		 when first some webs were coalesced and conflicts
1877 		 propagated, then some combining narrowed usable_regs and
1878 		 further coalescing ignored those conflicts.  Now there are
1879 		 some edges to COALESCED webs but not to it's alias.
1880 		 So abort only when they really should conflict.  */
1881 	      if ((!(tweb->type == PRECOLORED
1882 		     || TEST_BIT (sup_igraph, tweb->id * num_webs + wl->t->id))
1883 		   || !(wl->t->type == PRECOLORED
1884 		        || TEST_BIT (sup_igraph,
1885 				     wl->t->id * num_webs + tweb->id)))
1886 		  && hard_regs_intersect_p (&tweb->usable_regs,
1887 					    &wl->t->usable_regs))
1888 		abort ();
1889 	      /*if (wl->sub == NULL)
1890 		record_conflict (tweb, wl->t);
1891 	      else
1892 		{
1893 		  struct sub_conflict *sl;
1894 		  for (sl = wl->sub; sl; sl = sl->next)
1895 		    record_conflict (tweb, sl->t);
1896 		}*/
1897 	      if (aweb->type != PRECOLORED)
1898 		break;
1899 	    }
1900 	}
1901     }
1902 }
1903 
1904 /* A function suitable to pass to qsort().  Compare the spill costs
1905    of webs W1 and W2.  When used by qsort, this would order webs with
1906    largest cost first.  */
1907 
1908 static int
comp_webs_maxcost(const void * w1,const void * w2)1909 comp_webs_maxcost (const void *w1, const void *w2)
1910 {
1911   struct web *web1 = *(struct web **)w1;
1912   struct web *web2 = *(struct web **)w2;
1913   if (web1->spill_cost > web2->spill_cost)
1914     return -1;
1915   else if (web1->spill_cost < web2->spill_cost)
1916     return 1;
1917   else
1918     return 0;
1919 }
1920 
1921 /* This tries to recolor all spilled webs.  See try_recolor_web()
1922    how this is done.  This just calls it for each spilled web.  */
1923 
1924 static void
recolor_spills(void)1925 recolor_spills (void)
1926 {
1927   unsigned int i, num;
1928   struct web **order2web;
1929   num = num_webs - num_subwebs;
1930   order2web = xmalloc (num * sizeof (order2web[0]));
1931   for (i = 0; i < num; i++)
1932     {
1933       order2web[i] = id2web[i];
1934       /* If we aren't breaking aliases, combine() wasn't merging the
1935          spill_costs.  So do that here to have sane measures.  */
1936       if (!flag_ra_merge_spill_costs && id2web[i]->type == COALESCED)
1937 	alias (id2web[i])->spill_cost += id2web[i]->spill_cost;
1938     }
1939   qsort (order2web, num, sizeof (order2web[0]), comp_webs_maxcost);
1940   insert_coalesced_conflicts ();
1941   dump_graph_cost (DUMP_COSTS, "before spill-recolor");
1942   for (i = 0; i < num; i++)
1943     {
1944       struct web *web = order2web[i];
1945       if (web->type == SPILLED)
1946 	try_recolor_web (web);
1947     }
1948   /* It might have been decided in try_recolor_web() (in colorize_one_web())
1949      that a coalesced web should be spilled, so it was put on the
1950      select stack.  Those webs need recoloring again, and all remaining
1951      coalesced webs might need their color updated, so simply call
1952      assign_colors() again.  */
1953   assign_colors ();
1954   free (order2web);
1955 }
1956 
1957 /* This checks the current color assignment for obvious errors,
1958    like two conflicting webs overlapping in colors, or the used colors
1959    not being in usable regs.  */
1960 
1961 static void
check_colors(void)1962 check_colors (void)
1963 {
1964   unsigned int i;
1965   for (i = 0; i < num_webs - num_subwebs; i++)
1966     {
1967       struct web *web = id2web[i];
1968       struct web *aweb = alias (web);
1969       struct conflict_link *wl;
1970       int nregs, c;
1971       if (aweb->type == SPILLED || web->regno >= max_normal_pseudo)
1972 	continue;
1973       else if (aweb->type == COLORED)
1974 	nregs = HARD_REGNO_NREGS (aweb->color, GET_MODE (web->orig_x));
1975       else if (aweb->type == PRECOLORED)
1976 	nregs = 1;
1977       else
1978 	abort ();
1979       /* The color must be valid for the original usable_regs.  */
1980       for (c = 0; c < nregs; c++)
1981 	if (!TEST_HARD_REG_BIT (web->usable_regs, aweb->color + c))
1982 	  abort ();
1983       /* Search the original (pre-coalesce) conflict list.  In the current
1984 	 one some imprecise conflicts may be noted (due to combine() or
1985 	 insert_coalesced_conflicts() relocating partial conflicts) making
1986 	 it look like some wide webs are in conflict and having the same
1987 	 color.  */
1988       wl = (web->have_orig_conflicts ? web->orig_conflict_list
1989 	    : web->conflict_list);
1990       for (; wl; wl = wl->next)
1991 	if (wl->t->regno >= max_normal_pseudo)
1992 	  continue;
1993 	else if (!wl->sub)
1994 	  {
1995 	    struct web *web2 = alias (wl->t);
1996 	    int nregs2;
1997 	    if (web2->type == COLORED)
1998 	      nregs2 = HARD_REGNO_NREGS (web2->color, GET_MODE (web2->orig_x));
1999 	    else if (web2->type == PRECOLORED)
2000 	      nregs2 = 1;
2001 	    else
2002 	      continue;
2003 	    if (aweb->color >= web2->color + nregs2
2004 	        || web2->color >= aweb->color + nregs)
2005 	      continue;
2006 	    abort ();
2007 	  }
2008 	else
2009 	  {
2010 	    struct sub_conflict *sl;
2011 	    int scol = aweb->color;
2012 	    int tcol = alias (wl->t)->color;
2013 	    if (alias (wl->t)->type == SPILLED)
2014 	      continue;
2015 	    for (sl = wl->sub; sl; sl = sl->next)
2016 	      {
2017 		int ssize = HARD_REGNO_NREGS (scol, GET_MODE (sl->s->orig_x));
2018 		int tsize = HARD_REGNO_NREGS (tcol, GET_MODE (sl->t->orig_x));
2019 		int sofs = 0, tofs = 0;
2020 	        if (SUBWEB_P (sl->t)
2021 		    && GET_MODE_SIZE (GET_MODE (sl->t->orig_x)) >= UNITS_PER_WORD)
2022 		  tofs = (SUBREG_BYTE (sl->t->orig_x) / UNITS_PER_WORD);
2023 	        if (SUBWEB_P (sl->s)
2024 		    && GET_MODE_SIZE (GET_MODE (sl->s->orig_x))
2025 		       >= UNITS_PER_WORD)
2026 		  sofs = (SUBREG_BYTE (sl->s->orig_x) / UNITS_PER_WORD);
2027 		if ((tcol + tofs >= scol + sofs + ssize)
2028 		    || (scol + sofs >= tcol + tofs + tsize))
2029 		  continue;
2030 		abort ();
2031 	      }
2032 	  }
2033     }
2034 }
2035 
2036 /* WEB was a coalesced web.  Make it unaliased again, and put it
2037    back onto SELECT stack.  */
2038 
2039 static void
unalias_web(struct web * web)2040 unalias_web (struct web *web)
2041 {
2042   web->alias = NULL;
2043   web->is_coalesced = 0;
2044   web->color = -1;
2045   /* Well, initially everything was spilled, so it isn't incorrect,
2046      that also the individual parts can be spilled.
2047      XXX this isn't entirely correct, as we also relaxed the
2048      spill_temp flag in combine(), which might have made components
2049      spill, although they were a short or spilltemp web.  */
2050   web->was_spilled = 1;
2051   remove_list (web->dlink, &WEBS(COALESCED));
2052   /* Spilltemps must be colored right now (i.e. as early as possible),
2053      other webs can be deferred to the end (the code building the
2054      stack assumed that in this stage only one web was colored).  */
2055   if (web->spill_temp && web->spill_temp != 2)
2056     put_web (web, SELECT);
2057   else
2058     put_web_at_end (web, SELECT);
2059 }
2060 
2061 /* WEB is a _target_ for coalescing which got spilled.
2062    Break all aliases to WEB, and restore some of its member to the state
2063    they were before coalescing.  Due to the suboptimal structure of
2064    the interference graph we need to go through all coalesced webs.
2065    Somewhen we'll change this to be more sane.  */
2066 
2067 static void
break_aliases_to_web(struct web * web)2068 break_aliases_to_web (struct web *web)
2069 {
2070   struct dlist *d, *d_next;
2071   if (web->type != SPILLED)
2072     abort ();
2073   for (d = WEBS(COALESCED); d; d = d_next)
2074     {
2075       struct web *other = DLIST_WEB (d);
2076       d_next = d->next;
2077       /* Beware: Don't use alias() here.  We really want to check only
2078 	 one level of aliasing, i.e. only break up webs directly
2079 	 aliased to WEB, not also those aliased through other webs.  */
2080       if (other->alias == web)
2081 	{
2082 	  unalias_web (other);
2083 	  ra_debug_msg (DUMP_COLORIZE, " %d", other->id);
2084 	}
2085     }
2086   web->spill_temp = web->orig_spill_temp;
2087   web->spill_cost = web->orig_spill_cost;
2088   /* Beware: The following possibly widens usable_regs again.  While
2089      it was narrower there might have been some conflicts added which got
2090      ignored because of non-intersecting hardregsets.  All those conflicts
2091      would now matter again.  Fortunately we only add conflicts when
2092      coalescing, which is also the time of narrowing.  And we remove all
2093      those added conflicts again now that we unalias this web.
2094      Therefore this is safe to do.  */
2095   COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
2096   web->is_coalesced = 0;
2097   web->num_aliased = 0;
2098   web->was_spilled = 1;
2099   /* Reset is_coalesced flag for webs which itself are target of coalescing.
2100      It was cleared above if it was coalesced to WEB.  */
2101   for (d = WEBS(COALESCED); d; d = d->next)
2102     DLIST_WEB (d)->alias->is_coalesced = 1;
2103 }
2104 
2105 /* WEB is a web coalesced into a precolored one.  Break that alias,
2106    making WEB SELECTed again.  Also restores the conflicts which resulted
2107    from initially coalescing both.  */
2108 
2109 static void
break_precolored_alias(struct web * web)2110 break_precolored_alias (struct web *web)
2111 {
2112   struct web *pre = web->alias;
2113   struct conflict_link *wl;
2114   unsigned int c = pre->color;
2115   unsigned int nregs = HARD_REGNO_NREGS (c, GET_MODE (web->orig_x));
2116   if (pre->type != PRECOLORED)
2117     abort ();
2118   unalias_web (web);
2119   /* Now we need to look at each conflict X of WEB, if it conflicts
2120      with [PRE, PRE+nregs), and remove such conflicts, of X has not other
2121      conflicts, which are coalesced into those precolored webs.  */
2122   for (wl = web->conflict_list; wl; wl = wl->next)
2123     {
2124       struct web *x = wl->t;
2125       struct web *y;
2126       unsigned int i;
2127       struct conflict_link *wl2;
2128       struct conflict_link **pcl;
2129       HARD_REG_SET regs;
2130       if (!x->have_orig_conflicts)
2131 	continue;
2132       /* First look at which colors can not go away, due to other coalesces
2133 	 still existing.  */
2134       CLEAR_HARD_REG_SET (regs);
2135       for (i = 0; i < nregs; i++)
2136 	SET_HARD_REG_BIT (regs, c + i);
2137       for (wl2 = x->conflict_list; wl2; wl2 = wl2->next)
2138 	if (wl2->t->type == COALESCED && alias (wl2->t)->type == PRECOLORED)
2139 	  CLEAR_HARD_REG_BIT (regs, alias (wl2->t)->color);
2140       /* Now also remove the colors of those conflicts which already
2141 	 were there before coalescing at all.  */
2142       for (wl2 = x->orig_conflict_list; wl2; wl2 = wl2->next)
2143 	if (wl2->t->type == PRECOLORED)
2144 	  CLEAR_HARD_REG_BIT (regs, wl2->t->color);
2145       /* The colors now still set are those for which WEB was the last
2146 	 cause, i.e. those which can be removed.  */
2147       y = NULL;
2148       for (i = 0; i < nregs; i++)
2149 	if (TEST_HARD_REG_BIT (regs, c + i))
2150 	  {
2151 	    struct web *sub;
2152 	    y = hardreg2web[c + i];
2153 	    RESET_BIT (sup_igraph, x->id * num_webs + y->id);
2154 	    RESET_BIT (sup_igraph, y->id * num_webs + x->id);
2155 	    RESET_BIT (igraph, igraph_index (x->id, y->id));
2156 	    for (sub = x->subreg_next; sub; sub = sub->subreg_next)
2157 	      RESET_BIT (igraph, igraph_index (sub->id, y->id));
2158 	  }
2159       if (!y)
2160 	continue;
2161       pcl = &(x->conflict_list);
2162       while (*pcl)
2163 	{
2164 	  struct web *y = (*pcl)->t;
2165 	  if (y->type != PRECOLORED || !TEST_HARD_REG_BIT (regs, y->color))
2166 	    pcl = &((*pcl)->next);
2167 	  else
2168 	    *pcl = (*pcl)->next;
2169 	}
2170     }
2171 }
2172 
2173 /* WEB is a spilled web which was target for coalescing.
2174    Delete all interference edges which were added due to that coalescing,
2175    and break up the coalescing.  */
2176 
2177 static void
restore_conflicts_from_coalesce(struct web * web)2178 restore_conflicts_from_coalesce (struct web *web)
2179 {
2180   struct conflict_link **pcl;
2181   struct conflict_link *wl;
2182   pcl = &(web->conflict_list);
2183   /* No original conflict list means no conflict was added at all
2184      after building the graph.  So neither we nor any neighbors have
2185      conflicts due to this coalescing.  */
2186   if (!web->have_orig_conflicts)
2187     return;
2188   while (*pcl)
2189     {
2190       struct web *other = (*pcl)->t;
2191       for (wl = web->orig_conflict_list; wl; wl = wl->next)
2192 	if (wl->t == other)
2193 	  break;
2194       if (wl)
2195 	{
2196 	  /* We found this conflict also in the original list, so this
2197 	     was no new conflict.  */
2198 	  pcl = &((*pcl)->next);
2199 	}
2200       else
2201 	{
2202 	  /* This is a new conflict, so delete it from us and
2203 	     the neighbor.  */
2204 	  struct conflict_link **opcl;
2205 	  struct conflict_link *owl;
2206 	  struct sub_conflict *sl;
2207 	  wl = *pcl;
2208 	  *pcl = wl->next;
2209 	  if (!other->have_orig_conflicts && other->type != PRECOLORED)
2210 	    abort ();
2211 	  for (owl = other->orig_conflict_list; owl; owl = owl->next)
2212 	    if (owl->t == web)
2213 	      break;
2214 	  if (owl)
2215 	    abort ();
2216 	  opcl = &(other->conflict_list);
2217 	  while (*opcl)
2218 	    {
2219 	      if ((*opcl)->t == web)
2220 		{
2221 		  owl = *opcl;
2222 		  *opcl = owl->next;
2223 		  break;
2224 		}
2225 	      else
2226 		{
2227 		  opcl = &((*opcl)->next);
2228 		}
2229 	    }
2230 	  if (!owl && other->type != PRECOLORED)
2231 	    abort ();
2232 	  /* wl and owl contain the edge data to be deleted.  */
2233 	  RESET_BIT (sup_igraph, web->id * num_webs + other->id);
2234 	  RESET_BIT (sup_igraph, other->id * num_webs + web->id);
2235 	  RESET_BIT (igraph, igraph_index (web->id, other->id));
2236 	  for (sl = wl->sub; sl; sl = sl->next)
2237 	    RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2238 	  if (other->type != PRECOLORED)
2239 	    {
2240 	      for (sl = owl->sub; sl; sl = sl->next)
2241 		RESET_BIT (igraph, igraph_index (sl->s->id, sl->t->id));
2242 	    }
2243 	}
2244     }
2245 
2246   /* We must restore usable_regs because record_conflict will use it.  */
2247   COPY_HARD_REG_SET (web->usable_regs, web->orig_usable_regs);
2248   /* We might have deleted some conflicts above, which really are still
2249      there (diamond pattern coalescing).  This is because we don't reference
2250      count interference edges but some of them were the result of different
2251      coalesces.  */
2252   for (wl = web->conflict_list; wl; wl = wl->next)
2253     if (wl->t->type == COALESCED)
2254       {
2255 	struct web *tweb;
2256 	for (tweb = wl->t->alias; tweb; tweb = tweb->alias)
2257 	  {
2258 	    if (wl->sub == NULL)
2259 	      record_conflict (web, tweb);
2260 	    else
2261 	      {
2262 		struct sub_conflict *sl;
2263 		for (sl = wl->sub; sl; sl = sl->next)
2264 		  {
2265 		    struct web *sweb = NULL;
2266 		    if (SUBWEB_P (sl->t))
2267 		      sweb = find_subweb (tweb, sl->t->orig_x);
2268 		    if (!sweb)
2269 		      sweb = tweb;
2270 		    record_conflict (sl->s, sweb);
2271 		  }
2272 	      }
2273 	    if (tweb->type != COALESCED)
2274 	      break;
2275 	  }
2276       }
2277 }
2278 
2279 /* Repeatedly break aliases for spilled webs, which were target for
2280    coalescing, and recolorize the resulting parts.  Do this as long as
2281    there are any spilled coalesce targets.  */
2282 
2283 static void
break_coalesced_spills(void)2284 break_coalesced_spills (void)
2285 {
2286   int changed = 0;
2287   while (1)
2288     {
2289       struct dlist *d;
2290       struct web *web;
2291       for (d = WEBS(SPILLED); d; d = d->next)
2292 	if (DLIST_WEB (d)->is_coalesced)
2293 	  break;
2294       if (!d)
2295 	break;
2296       changed = 1;
2297       web = DLIST_WEB (d);
2298       ra_debug_msg (DUMP_COLORIZE, "breaking aliases to web %d:", web->id);
2299       restore_conflicts_from_coalesce (web);
2300       break_aliases_to_web (web);
2301       /* WEB was a spilled web and isn't anymore.  Everything coalesced
2302 	 to WEB is now SELECTed and might potentially get a color.
2303 	 If those other webs were itself targets of coalescing it might be
2304 	 that there are still some conflicts from aliased webs missing,
2305 	 because they were added in combine() right into the now
2306 	 SELECTed web.  So we need to add those missing conflicts here.  */
2307       insert_coalesced_conflicts ();
2308       ra_debug_msg (DUMP_COLORIZE, "\n");
2309       remove_list (d, &WEBS(SPILLED));
2310       put_web (web, SELECT);
2311       web->color = -1;
2312       while (WEBS(SELECT))
2313 	{
2314 	  d = pop_list (&WEBS(SELECT));
2315 	  colorize_one_web (DLIST_WEB (d), 1);
2316 	}
2317     }
2318   if (changed)
2319     {
2320       struct dlist *d;
2321       for (d = WEBS(COALESCED); d; d = d->next)
2322 	{
2323 	  struct web *a = alias (DLIST_WEB (d));
2324 	  DLIST_WEB (d)->color = a->color;
2325 	}
2326     }
2327   dump_graph_cost (DUMP_COSTS, "after alias-breaking");
2328 }
2329 
2330 /* A structure for fast hashing of a pair of webs.
2331    Used to cumulate savings (from removing copy insns) for coalesced webs.
2332    All the pairs are also put into a single linked list.  */
2333 struct web_pair
2334 {
2335   struct web_pair *next_hash;
2336   struct web_pair *next_list;
2337   struct web *smaller;
2338   struct web *larger;
2339   unsigned int conflicts;
2340   unsigned HOST_WIDE_INT cost;
2341 };
2342 
2343 /* The actual hash table.  */
2344 #define WEB_PAIR_HASH_SIZE 8192
2345 static struct web_pair *web_pair_hash[WEB_PAIR_HASH_SIZE];
2346 static struct web_pair *web_pair_list;
2347 static unsigned int num_web_pairs;
2348 
2349 /* Clear the hash table of web pairs.  */
2350 
2351 static void
init_web_pairs(void)2352 init_web_pairs (void)
2353 {
2354   memset (web_pair_hash, 0, sizeof web_pair_hash);
2355   num_web_pairs = 0;
2356   web_pair_list = NULL;
2357 }
2358 
2359 /* Given two webs connected by a move with cost COST which together
2360    have CONFLICTS conflicts, add that pair to the hash table, or if
2361    already in, cumulate the costs and conflict number.  */
2362 
2363 static void
add_web_pair_cost(struct web * web1,struct web * web2,unsigned HOST_WIDE_INT cost,unsigned int conflicts)2364 add_web_pair_cost (struct web *web1, struct web *web2,
2365 		   unsigned HOST_WIDE_INT cost, unsigned int conflicts)
2366 {
2367   unsigned int hash;
2368   struct web_pair *p;
2369   if (web1->id > web2->id)
2370     {
2371       struct web *h = web1;
2372       web1 = web2;
2373       web2 = h;
2374     }
2375   hash = (web1->id * num_webs + web2->id) % WEB_PAIR_HASH_SIZE;
2376   for (p = web_pair_hash[hash]; p; p = p->next_hash)
2377     if (p->smaller == web1 && p->larger == web2)
2378       {
2379 	p->cost += cost;
2380 	p->conflicts += conflicts;
2381 	return;
2382       }
2383   p = ra_alloc (sizeof *p);
2384   p->next_hash = web_pair_hash[hash];
2385   p->next_list = web_pair_list;
2386   p->smaller = web1;
2387   p->larger = web2;
2388   p->conflicts = conflicts;
2389   p->cost = cost;
2390   web_pair_hash[hash] = p;
2391   web_pair_list = p;
2392   num_web_pairs++;
2393 }
2394 
2395 /* Suitable to be passed to qsort().  Sort web pairs so, that those
2396    with more conflicts and higher cost (which actually is a saving
2397    when the moves are removed) come first.  */
2398 
2399 static int
comp_web_pairs(const void * w1,const void * w2)2400 comp_web_pairs (const void *w1, const void *w2)
2401 {
2402   struct web_pair *p1 = *(struct web_pair **)w1;
2403   struct web_pair *p2 = *(struct web_pair **)w2;
2404   if (p1->conflicts > p2->conflicts)
2405     return -1;
2406   else if (p1->conflicts < p2->conflicts)
2407     return 1;
2408   else if (p1->cost > p2->cost)
2409     return -1;
2410   else if (p1->cost < p2->cost)
2411     return 1;
2412   else
2413     return 0;
2414 }
2415 
2416 /* Given the list of web pairs, begin to combine them from the one
2417    with the most savings.  */
2418 
2419 static void
sort_and_combine_web_pairs(int for_move)2420 sort_and_combine_web_pairs (int for_move)
2421 {
2422   unsigned int i;
2423   struct web_pair **sorted;
2424   struct web_pair *p;
2425   if (!num_web_pairs)
2426     return;
2427   sorted = xmalloc (num_web_pairs * sizeof (sorted[0]));
2428   for (p = web_pair_list, i = 0; p; p = p->next_list)
2429     sorted[i++] = p;
2430   if (i != num_web_pairs)
2431     abort ();
2432   qsort (sorted, num_web_pairs, sizeof (sorted[0]), comp_web_pairs);
2433 
2434   /* After combining one pair, we actually should adjust the savings
2435      of the other pairs, if they are connected to one of the just coalesced
2436      pair.  Later.  */
2437   for (i = 0; i < num_web_pairs; i++)
2438     {
2439       struct web *w1, *w2;
2440       p = sorted[i];
2441       w1 = alias (p->smaller);
2442       w2 = alias (p->larger);
2443       if (!for_move && (w1->type == PRECOLORED || w2->type == PRECOLORED))
2444 	continue;
2445       else if (w2->type == PRECOLORED)
2446 	{
2447 	  struct web *h = w1;
2448 	  w1 = w2;
2449 	  w2 = h;
2450 	}
2451       if (w1 != w2
2452 	  && !TEST_BIT (sup_igraph, w1->id * num_webs + w2->id)
2453 	  && !TEST_BIT (sup_igraph, w2->id * num_webs + w1->id)
2454 	  && w2->type != PRECOLORED
2455 	  && hard_regs_intersect_p (&w1->usable_regs, &w2->usable_regs))
2456 	  {
2457 	    if (w1->type != PRECOLORED
2458 		|| (w1->type == PRECOLORED && ok (w2, w1)))
2459 	      combine (w1, w2);
2460 	    else if (w1->type == PRECOLORED)
2461 	      SET_HARD_REG_BIT (w2->prefer_colors, w1->color);
2462 	  }
2463     }
2464   free (sorted);
2465 }
2466 
2467 /* Greedily coalesce all moves possible.  Begin with the web pair
2468    giving the most saving if coalesced.  */
2469 
2470 static void
aggressive_coalesce(void)2471 aggressive_coalesce (void)
2472 {
2473   struct dlist *d;
2474   struct move *m;
2475   init_web_pairs ();
2476   while ((d = pop_list (&mv_worklist)) != NULL)
2477     if ((m = DLIST_MOVE (d)))
2478       {
2479 	struct web *s = alias (m->source_web);
2480 	struct web *t = alias (m->target_web);
2481 	if (t->type == PRECOLORED)
2482 	  {
2483 	    struct web *h = s;
2484 	    s = t;
2485 	    t = h;
2486 	  }
2487 	if (s != t
2488 	    && t->type != PRECOLORED
2489 	    && !TEST_BIT (sup_igraph, s->id * num_webs + t->id)
2490 	    && !TEST_BIT (sup_igraph, t->id * num_webs + s->id))
2491 	  {
2492 	    if ((s->type == PRECOLORED && ok (t, s))
2493 		|| s->type != PRECOLORED)
2494 	      {
2495 	        put_move (m, MV_COALESCED);
2496 		add_web_pair_cost (s, t, BLOCK_FOR_INSN (m->insn)->frequency,
2497 				   0);
2498 	      }
2499 	    else if (s->type == PRECOLORED)
2500 	      /* It is !ok(t, s).  But later when coloring the graph it might
2501 		 be possible to take that color.  So we remember the preferred
2502 		 color to try that first.  */
2503 	      {
2504 		put_move (m, CONSTRAINED);
2505 		SET_HARD_REG_BIT (t->prefer_colors, s->color);
2506 	      }
2507 	  }
2508 	else
2509 	  {
2510 	    put_move (m, CONSTRAINED);
2511 	  }
2512       }
2513   sort_and_combine_web_pairs (1);
2514 }
2515 
2516 /* This is the difference between optimistic coalescing and
2517    optimistic coalescing+.  Extended coalesce tries to coalesce also
2518    non-conflicting nodes, not related by a move.  The criteria here is,
2519    the one web must be a source, the other a destination of the same insn.
2520    This actually makes sense, as (because they are in the same insn) they
2521    share many of their neighbors, and if they are coalesced, reduce the
2522    number of conflicts of those neighbors by one.  For this we sort the
2523    candidate pairs again according to savings (and this time also conflict
2524    number).
2525 
2526    This is also a comparatively slow operation, as we need to go through
2527    all insns, and for each insn, through all defs and uses.  */
2528 
2529 static void
extended_coalesce_2(void)2530 extended_coalesce_2 (void)
2531 {
2532   rtx insn;
2533   struct ra_insn_info info;
2534   unsigned int n;
2535   init_web_pairs ();
2536   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2537     if (INSN_P (insn) && (info = insn_df[INSN_UID (insn)]).num_defs)
2538       for (n = 0; n < info.num_defs; n++)
2539 	{
2540 	  struct web *dest = def2web[DF_REF_ID (info.defs[n])];
2541 	  dest = alias (find_web_for_subweb (dest));
2542 	  if (dest->type != PRECOLORED && dest->regno < max_normal_pseudo)
2543 	    {
2544 	      unsigned int n2;
2545 	      for (n2 = 0; n2 < info.num_uses; n2++)
2546 		{
2547 		  struct web *source = use2web[DF_REF_ID (info.uses[n2])];
2548 		  source = alias (find_web_for_subweb (source));
2549 		  if (source->type != PRECOLORED
2550 		      && source != dest
2551 		      && source->regno < max_normal_pseudo
2552 		      /* Coalesced webs end up using the same REG rtx in
2553 			 emit_colors().  So we can only coalesce something
2554 			 of equal modes.  */
2555 		      && GET_MODE (source->orig_x) == GET_MODE (dest->orig_x)
2556 		      && !TEST_BIT (sup_igraph,
2557 				    dest->id * num_webs + source->id)
2558 		      && !TEST_BIT (sup_igraph,
2559 				    source->id * num_webs + dest->id)
2560 		      && hard_regs_intersect_p (&source->usable_regs,
2561 						&dest->usable_regs))
2562 		    add_web_pair_cost (dest, source,
2563 				       BLOCK_FOR_INSN (insn)->frequency,
2564 				       dest->num_conflicts
2565 				       + source->num_conflicts);
2566 		}
2567 	    }
2568 	}
2569   sort_and_combine_web_pairs (0);
2570 }
2571 
2572 /* Check if we forgot to coalesce some moves.  */
2573 
2574 static void
check_uncoalesced_moves(void)2575 check_uncoalesced_moves (void)
2576 {
2577   struct move_list *ml;
2578   struct move *m;
2579   for (ml = wl_moves; ml; ml = ml->next)
2580     if ((m = ml->move))
2581       {
2582 	struct web *s = alias (m->source_web);
2583 	struct web *t = alias (m->target_web);
2584 	if (t->type == PRECOLORED)
2585 	  {
2586 	    struct web *h = s;
2587 	    s = t;
2588 	    t = h;
2589 	  }
2590 	if (s != t
2591 	    && m->type != CONSTRAINED
2592 	    /* Following can happen when a move was coalesced, but later
2593 	       broken up again.  Then s!=t, but m is still MV_COALESCED.  */
2594 	    && m->type != MV_COALESCED
2595 	    && t->type != PRECOLORED
2596 	    && ((s->type == PRECOLORED && ok (t, s))
2597 		|| s->type != PRECOLORED)
2598 	    && !TEST_BIT (sup_igraph, s->id * num_webs + t->id)
2599 	    && !TEST_BIT (sup_igraph, t->id * num_webs + s->id))
2600 	  abort ();
2601       }
2602 }
2603 
2604 /* The toplevel function in this file.  Precondition is, that
2605    the interference graph is built completely by ra-build.c.  This
2606    produces a list of spilled, colored and coalesced nodes.  */
2607 
2608 void
ra_colorize_graph(struct df * df)2609 ra_colorize_graph (struct df *df)
2610 {
2611   if (rtl_dump_file)
2612     dump_igraph (df);
2613   build_worklists (df);
2614 
2615   /* With optimistic coalescing we coalesce everything we can.  */
2616   if (flag_ra_optimistic_coalescing)
2617     {
2618       aggressive_coalesce ();
2619       extended_coalesce_2 ();
2620     }
2621 
2622   /* Now build the select stack.  */
2623   do
2624     {
2625       simplify ();
2626       if (mv_worklist)
2627 	coalesce ();
2628       else if (WEBS(FREEZE))
2629 	freeze ();
2630       else if (WEBS(SPILL))
2631 	select_spill ();
2632     }
2633   while (WEBS(SIMPLIFY) || WEBS(SIMPLIFY_FAT) || WEBS(SIMPLIFY_SPILL)
2634 	 || mv_worklist || WEBS(FREEZE) || WEBS(SPILL));
2635   if (flag_ra_optimistic_coalescing)
2636     check_uncoalesced_moves ();
2637 
2638   /* Actually colorize the webs from the select stack.  */
2639   assign_colors ();
2640   check_colors ();
2641   dump_graph_cost (DUMP_COSTS, "initially");
2642   if (flag_ra_break_aliases)
2643     break_coalesced_spills ();
2644   check_colors ();
2645 
2646   /* And try to improve the cost by recoloring spilled webs.  */
2647   recolor_spills ();
2648   dump_graph_cost (DUMP_COSTS, "after spill-recolor");
2649   check_colors ();
2650 }
2651 
2652 /* Initialize this module.  */
2653 
ra_colorize_init(void)2654 void ra_colorize_init (void)
2655 {
2656   /* FIXME: Choose spill heuristic for platform if we have one */
2657   spill_heuristic = default_spill_heuristic;
2658 }
2659 
2660 /* Free all memory.  (Note that we don't need to free any per pass
2661    memory).  */
2662 
2663 void
ra_colorize_free_all(void)2664 ra_colorize_free_all (void)
2665 {
2666   struct dlist *d;
2667   while ((d = pop_list (&WEBS(FREE))) != NULL)
2668     put_web (DLIST_WEB (d), INITIAL);
2669   while ((d = pop_list (&WEBS(INITIAL))) != NULL)
2670     {
2671       struct web *web = DLIST_WEB (d);
2672       struct web *wnext;
2673       web->orig_conflict_list = NULL;
2674       web->conflict_list = NULL;
2675       for (web = web->subreg_next; web; web = wnext)
2676 	{
2677 	  wnext = web->subreg_next;
2678 	  free (web);
2679 	}
2680       free (DLIST_WEB (d));
2681     }
2682 }
2683 
2684 /*
2685 vim:cinoptions={.5s,g0,p5,t0,(0,^-0.5s,n-0.5s:tw=78:cindent:sw=4:
2686 */
2687