xref: /dragonfly/contrib/gcc-4.7/gcc/ira-build.c (revision 8af44722)
1 /* Building internal representation for IRA.
2    Copyright (C) 2006, 2007, 2008, 2009, 2010
3    Free Software Foundation, Inc.
4    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "target.h"
29 #include "regs.h"
30 #include "flags.h"
31 #include "hard-reg-set.h"
32 #include "basic-block.h"
33 #include "insn-config.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "params.h"
37 #include "df.h"
38 #include "output.h"
39 #include "reload.h"
40 #include "sparseset.h"
41 #include "ira-int.h"
42 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
43 
44 static ira_copy_t find_allocno_copy (ira_allocno_t, ira_allocno_t, rtx,
45 				     ira_loop_tree_node_t);
46 
47 /* The root of the loop tree corresponding to the all function.  */
48 ira_loop_tree_node_t ira_loop_tree_root;
49 
50 /* Height of the loop tree.  */
51 int ira_loop_tree_height;
52 
53 /* All nodes representing basic blocks are referred through the
54    following array.  We can not use basic block member `aux' for this
55    because it is used for insertion of insns on edges.  */
56 ira_loop_tree_node_t ira_bb_nodes;
57 
58 /* All nodes representing loops are referred through the following
59    array.  */
60 ira_loop_tree_node_t ira_loop_nodes;
61 
62 /* Map regno -> allocnos with given regno (see comments for
63    allocno member `next_regno_allocno').  */
64 ira_allocno_t *ira_regno_allocno_map;
65 
66 /* Array of references to all allocnos.  The order number of the
67    allocno corresponds to the index in the array.  Removed allocnos
68    have NULL element value.  */
69 ira_allocno_t *ira_allocnos;
70 
71 /* Sizes of the previous array.  */
72 int ira_allocnos_num;
73 
74 /* Count of conflict record structures we've created, used when creating
75    a new conflict id.  */
76 int ira_objects_num;
77 
78 /* Map a conflict id to its conflict record.  */
79 ira_object_t *ira_object_id_map;
80 
81 /* Array of references to all copies.  The order number of the copy
82    corresponds to the index in the array.  Removed copies have NULL
83    element value.  */
84 ira_copy_t *ira_copies;
85 
86 /* Size of the previous array.  */
87 int ira_copies_num;
88 
89 
90 
91 /* LAST_BASIC_BLOCK before generating additional insns because of live
92    range splitting.  Emitting insns on a critical edge creates a new
93    basic block.  */
94 static int last_basic_block_before_change;
95 
96 /* Initialize some members in loop tree node NODE.  Use LOOP_NUM for
97    the member loop_num.  */
98 static void
99 init_loop_tree_node (struct ira_loop_tree_node *node, int loop_num)
100 {
101   int max_regno = max_reg_num ();
102 
103   node->regno_allocno_map
104     = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t) * max_regno);
105   memset (node->regno_allocno_map, 0, sizeof (ira_allocno_t) * max_regno);
106   memset (node->reg_pressure, 0, sizeof (node->reg_pressure));
107   node->all_allocnos = ira_allocate_bitmap ();
108   node->modified_regnos = ira_allocate_bitmap ();
109   node->border_allocnos = ira_allocate_bitmap ();
110   node->local_copies = ira_allocate_bitmap ();
111   node->loop_num = loop_num;
112   node->children = NULL;
113   node->subloops = NULL;
114 }
115 
116 
117 /* The following function allocates the loop tree nodes.  If
118    CURRENT_LOOPS is NULL, the nodes corresponding to the loops (except
119    the root which corresponds the all function) will be not allocated
120    but nodes will still be allocated for basic blocks.  */
121 static void
122 create_loop_tree_nodes (void)
123 {
124   unsigned int i, j;
125   bool skip_p;
126   edge_iterator ei;
127   edge e;
128   VEC (edge, heap) *edges;
129   loop_p loop;
130 
131   ira_bb_nodes
132     = ((struct ira_loop_tree_node *)
133        ira_allocate (sizeof (struct ira_loop_tree_node) * last_basic_block));
134   last_basic_block_before_change = last_basic_block;
135   for (i = 0; i < (unsigned int) last_basic_block; i++)
136     {
137       ira_bb_nodes[i].regno_allocno_map = NULL;
138       memset (ira_bb_nodes[i].reg_pressure, 0,
139 	      sizeof (ira_bb_nodes[i].reg_pressure));
140       ira_bb_nodes[i].all_allocnos = NULL;
141       ira_bb_nodes[i].modified_regnos = NULL;
142       ira_bb_nodes[i].border_allocnos = NULL;
143       ira_bb_nodes[i].local_copies = NULL;
144     }
145   if (current_loops == NULL)
146     {
147       ira_loop_nodes = ((struct ira_loop_tree_node *)
148 			ira_allocate (sizeof (struct ira_loop_tree_node)));
149       init_loop_tree_node (ira_loop_nodes, 0);
150       return;
151     }
152   ira_loop_nodes = ((struct ira_loop_tree_node *)
153 		    ira_allocate (sizeof (struct ira_loop_tree_node)
154 				  * VEC_length (loop_p, ira_loops.larray)));
155   FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
156     {
157       if (loop != ira_loops.tree_root)
158 	{
159 	  ira_loop_nodes[i].regno_allocno_map = NULL;
160 	  skip_p = false;
161 	  FOR_EACH_EDGE (e, ei, loop->header->preds)
162 	    if (e->src != loop->latch
163 		&& (e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
164 	      {
165 		skip_p = true;
166 		break;
167 	      }
168 	  if (skip_p)
169 	    continue;
170 	  edges = get_loop_exit_edges (loop);
171 	  FOR_EACH_VEC_ELT (edge, edges, j, e)
172 	    if ((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e))
173 	      {
174 		skip_p = true;
175 		break;
176 	      }
177 	  VEC_free (edge, heap, edges);
178 	  if (skip_p)
179 	    continue;
180 	}
181       init_loop_tree_node (&ira_loop_nodes[i], loop->num);
182     }
183 }
184 
185 /* The function returns TRUE if there are more one allocation
186    region.  */
187 static bool
188 more_one_region_p (void)
189 {
190   unsigned int i;
191   loop_p loop;
192 
193   if (current_loops != NULL)
194     FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
195       if (ira_loop_nodes[i].regno_allocno_map != NULL
196 	  && ira_loop_tree_root != &ira_loop_nodes[i])
197 	return true;
198   return false;
199 }
200 
201 /* Free the loop tree node of a loop.  */
202 static void
203 finish_loop_tree_node (ira_loop_tree_node_t loop)
204 {
205   if (loop->regno_allocno_map != NULL)
206     {
207       ira_assert (loop->bb == NULL);
208       ira_free_bitmap (loop->local_copies);
209       ira_free_bitmap (loop->border_allocnos);
210       ira_free_bitmap (loop->modified_regnos);
211       ira_free_bitmap (loop->all_allocnos);
212       ira_free (loop->regno_allocno_map);
213       loop->regno_allocno_map = NULL;
214     }
215 }
216 
217 /* Free the loop tree nodes.  */
218 static void
219 finish_loop_tree_nodes (void)
220 {
221   unsigned int i;
222   loop_p loop;
223 
224   if (current_loops == NULL)
225     finish_loop_tree_node (&ira_loop_nodes[0]);
226   else
227     FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
228       finish_loop_tree_node (&ira_loop_nodes[i]);
229   ira_free (ira_loop_nodes);
230   for (i = 0; i < (unsigned int) last_basic_block_before_change; i++)
231     {
232       if (ira_bb_nodes[i].local_copies != NULL)
233 	ira_free_bitmap (ira_bb_nodes[i].local_copies);
234       if (ira_bb_nodes[i].border_allocnos != NULL)
235 	ira_free_bitmap (ira_bb_nodes[i].border_allocnos);
236       if (ira_bb_nodes[i].modified_regnos != NULL)
237 	ira_free_bitmap (ira_bb_nodes[i].modified_regnos);
238       if (ira_bb_nodes[i].all_allocnos != NULL)
239 	ira_free_bitmap (ira_bb_nodes[i].all_allocnos);
240       if (ira_bb_nodes[i].regno_allocno_map != NULL)
241 	ira_free (ira_bb_nodes[i].regno_allocno_map);
242     }
243   ira_free (ira_bb_nodes);
244 }
245 
246 
247 
248 /* The following recursive function adds LOOP to the loop tree
249    hierarchy.  LOOP is added only once.  If LOOP is NULL we adding
250    loop designating the whole function when CFG loops are not
251    built.  */
252 static void
253 add_loop_to_tree (struct loop *loop)
254 {
255   int loop_num;
256   struct loop *parent;
257   ira_loop_tree_node_t loop_node, parent_node;
258 
259   /* We can not use loop node access macros here because of potential
260      checking and because the nodes are not initialized enough
261      yet.  */
262   if (loop != NULL && loop_outer (loop) != NULL)
263     add_loop_to_tree (loop_outer (loop));
264   loop_num = loop != NULL ? loop->num : 0;
265   if (ira_loop_nodes[loop_num].regno_allocno_map != NULL
266       && ira_loop_nodes[loop_num].children == NULL)
267     {
268       /* We have not added loop node to the tree yet.  */
269       loop_node = &ira_loop_nodes[loop_num];
270       loop_node->loop = loop;
271       loop_node->bb = NULL;
272       if (loop == NULL)
273 	parent = NULL;
274       else
275 	{
276 	  for (parent = loop_outer (loop);
277 	       parent != NULL;
278 	       parent = loop_outer (parent))
279 	    if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
280 	      break;
281 	}
282       if (parent == NULL)
283 	{
284 	  loop_node->next = NULL;
285 	  loop_node->subloop_next = NULL;
286 	  loop_node->parent = NULL;
287 	}
288       else
289 	{
290 	  parent_node = &ira_loop_nodes[parent->num];
291 	  loop_node->next = parent_node->children;
292 	  parent_node->children = loop_node;
293 	  loop_node->subloop_next = parent_node->subloops;
294 	  parent_node->subloops = loop_node;
295 	  loop_node->parent = parent_node;
296 	}
297     }
298 }
299 
300 /* The following recursive function sets up levels of nodes of the
301    tree given its root LOOP_NODE.  The enumeration starts with LEVEL.
302    The function returns maximal value of level in the tree + 1.  */
303 static int
304 setup_loop_tree_level (ira_loop_tree_node_t loop_node, int level)
305 {
306   int height, max_height;
307   ira_loop_tree_node_t subloop_node;
308 
309   ira_assert (loop_node->bb == NULL);
310   loop_node->level = level;
311   max_height = level + 1;
312   for (subloop_node = loop_node->subloops;
313        subloop_node != NULL;
314        subloop_node = subloop_node->subloop_next)
315     {
316       ira_assert (subloop_node->bb == NULL);
317       height = setup_loop_tree_level (subloop_node, level + 1);
318       if (height > max_height)
319 	max_height = height;
320     }
321   return max_height;
322 }
323 
324 /* Create the loop tree.  The algorithm is designed to provide correct
325    order of loops (they are ordered by their last loop BB) and basic
326    blocks in the chain formed by member next.  */
327 static void
328 form_loop_tree (void)
329 {
330   basic_block bb;
331   struct loop *parent;
332   ira_loop_tree_node_t bb_node, loop_node;
333 
334   /* We can not use loop/bb node access macros because of potential
335      checking and because the nodes are not initialized enough
336      yet.  */
337   FOR_EACH_BB (bb)
338     {
339       bb_node = &ira_bb_nodes[bb->index];
340       bb_node->bb = bb;
341       bb_node->loop = NULL;
342       bb_node->subloops = NULL;
343       bb_node->children = NULL;
344       bb_node->subloop_next = NULL;
345       bb_node->next = NULL;
346       if (current_loops == NULL)
347 	parent = NULL;
348       else
349 	{
350 	  for (parent = bb->loop_father;
351 	       parent != NULL;
352 	       parent = loop_outer (parent))
353 	    if (ira_loop_nodes[parent->num].regno_allocno_map != NULL)
354 	      break;
355 	}
356       add_loop_to_tree (parent);
357       loop_node = &ira_loop_nodes[parent == NULL ? 0 : parent->num];
358       bb_node->next = loop_node->children;
359       bb_node->parent = loop_node;
360       loop_node->children = bb_node;
361     }
362   ira_loop_tree_root = IRA_LOOP_NODE_BY_INDEX (0);
363   ira_loop_tree_height = setup_loop_tree_level (ira_loop_tree_root, 0);
364   ira_assert (ira_loop_tree_root->regno_allocno_map != NULL);
365 }
366 
367 
368 
369 /* Rebuild IRA_REGNO_ALLOCNO_MAP and REGNO_ALLOCNO_MAPs of the loop
370    tree nodes.  */
371 static void
372 rebuild_regno_allocno_maps (void)
373 {
374   unsigned int l;
375   int max_regno, regno;
376   ira_allocno_t a;
377   ira_loop_tree_node_t loop_tree_node;
378   loop_p loop;
379   ira_allocno_iterator ai;
380 
381   ira_assert (current_loops != NULL);
382   max_regno = max_reg_num ();
383   FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, l, loop)
384     if (ira_loop_nodes[l].regno_allocno_map != NULL)
385       {
386 	ira_free (ira_loop_nodes[l].regno_allocno_map);
387 	ira_loop_nodes[l].regno_allocno_map
388 	  = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
389 					    * max_regno);
390 	memset (ira_loop_nodes[l].regno_allocno_map, 0,
391 		sizeof (ira_allocno_t) * max_regno);
392       }
393   ira_free (ira_regno_allocno_map);
394   ira_regno_allocno_map
395     = (ira_allocno_t *) ira_allocate (max_regno * sizeof (ira_allocno_t));
396   memset (ira_regno_allocno_map, 0, max_regno * sizeof (ira_allocno_t));
397   FOR_EACH_ALLOCNO (a, ai)
398     {
399       if (ALLOCNO_CAP_MEMBER (a) != NULL)
400 	/* Caps are not in the regno allocno maps.  */
401 	continue;
402       regno = ALLOCNO_REGNO (a);
403       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
404       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
405       ira_regno_allocno_map[regno] = a;
406       if (loop_tree_node->regno_allocno_map[regno] == NULL)
407 	/* Remember that we can create temporary allocnos to break
408 	   cycles in register shuffle.  */
409 	loop_tree_node->regno_allocno_map[regno] = a;
410     }
411 }
412 
413 
414 /* Pools for allocnos, allocno live ranges and objects.  */
415 static alloc_pool allocno_pool, live_range_pool, object_pool;
416 
417 /* Vec containing references to all created allocnos.  It is a
418    container of array allocnos.  */
419 static VEC(ira_allocno_t,heap) *allocno_vec;
420 
421 /* Vec containing references to all created ira_objects.  It is a
422    container of ira_object_id_map.  */
423 static VEC(ira_object_t,heap) *ira_object_id_map_vec;
424 
425 /* Initialize data concerning allocnos.  */
426 static void
427 initiate_allocnos (void)
428 {
429   live_range_pool
430     = create_alloc_pool ("live ranges",
431 			 sizeof (struct live_range), 100);
432   allocno_pool
433     = create_alloc_pool ("allocnos", sizeof (struct ira_allocno), 100);
434   object_pool
435     = create_alloc_pool ("objects", sizeof (struct ira_object), 100);
436   allocno_vec = VEC_alloc (ira_allocno_t, heap, max_reg_num () * 2);
437   ira_allocnos = NULL;
438   ira_allocnos_num = 0;
439   ira_objects_num = 0;
440   ira_object_id_map_vec
441     = VEC_alloc (ira_object_t, heap, max_reg_num () * 2);
442   ira_object_id_map = NULL;
443   ira_regno_allocno_map
444     = (ira_allocno_t *) ira_allocate (max_reg_num ()
445 				      * sizeof (ira_allocno_t));
446   memset (ira_regno_allocno_map, 0, max_reg_num () * sizeof (ira_allocno_t));
447 }
448 
449 /* Create and return an object corresponding to a new allocno A.  */
450 static ira_object_t
451 ira_create_object (ira_allocno_t a, int subword)
452 {
453   enum reg_class aclass = ALLOCNO_CLASS (a);
454   ira_object_t obj = (ira_object_t) pool_alloc (object_pool);
455 
456   OBJECT_ALLOCNO (obj) = a;
457   OBJECT_SUBWORD (obj) = subword;
458   OBJECT_CONFLICT_ID (obj) = ira_objects_num;
459   OBJECT_CONFLICT_VEC_P (obj) = false;
460   OBJECT_CONFLICT_ARRAY (obj) = NULL;
461   OBJECT_NUM_CONFLICTS (obj) = 0;
462   COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
463   COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), ira_no_alloc_regs);
464   IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
465 			  reg_class_contents[aclass]);
466   IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
467 			  reg_class_contents[aclass]);
468   OBJECT_MIN (obj) = INT_MAX;
469   OBJECT_MAX (obj) = -1;
470   OBJECT_LIVE_RANGES (obj) = NULL;
471 
472   VEC_safe_push (ira_object_t, heap, ira_object_id_map_vec, obj);
473   ira_object_id_map
474     = VEC_address (ira_object_t, ira_object_id_map_vec);
475   ira_objects_num = VEC_length (ira_object_t, ira_object_id_map_vec);
476 
477   return obj;
478 }
479 
480 /* Create and return the allocno corresponding to REGNO in
481    LOOP_TREE_NODE.  Add the allocno to the list of allocnos with the
482    same regno if CAP_P is FALSE.  */
483 ira_allocno_t
484 ira_create_allocno (int regno, bool cap_p,
485 		    ira_loop_tree_node_t loop_tree_node)
486 {
487   ira_allocno_t a;
488 
489   a = (ira_allocno_t) pool_alloc (allocno_pool);
490   ALLOCNO_REGNO (a) = regno;
491   ALLOCNO_LOOP_TREE_NODE (a) = loop_tree_node;
492   if (! cap_p)
493     {
494       ALLOCNO_NEXT_REGNO_ALLOCNO (a) = ira_regno_allocno_map[regno];
495       ira_regno_allocno_map[regno] = a;
496       if (loop_tree_node->regno_allocno_map[regno] == NULL)
497 	/* Remember that we can create temporary allocnos to break
498 	   cycles in register shuffle on region borders (see
499 	   ira-emit.c).  */
500 	loop_tree_node->regno_allocno_map[regno] = a;
501     }
502   ALLOCNO_CAP (a) = NULL;
503   ALLOCNO_CAP_MEMBER (a) = NULL;
504   ALLOCNO_NUM (a) = ira_allocnos_num;
505   bitmap_set_bit (loop_tree_node->all_allocnos, ALLOCNO_NUM (a));
506   ALLOCNO_NREFS (a) = 0;
507   ALLOCNO_FREQ (a) = 0;
508   ALLOCNO_HARD_REGNO (a) = -1;
509   ALLOCNO_CALL_FREQ (a) = 0;
510   ALLOCNO_CALLS_CROSSED_NUM (a) = 0;
511 #ifdef STACK_REGS
512   ALLOCNO_NO_STACK_REG_P (a) = false;
513   ALLOCNO_TOTAL_NO_STACK_REG_P (a) = false;
514 #endif
515   ALLOCNO_DONT_REASSIGN_P (a) = false;
516   ALLOCNO_BAD_SPILL_P (a) = false;
517   ALLOCNO_ASSIGNED_P (a) = false;
518   ALLOCNO_MODE (a) = (regno < 0 ? VOIDmode : PSEUDO_REGNO_MODE (regno));
519   ALLOCNO_COPIES (a) = NULL;
520   ALLOCNO_HARD_REG_COSTS (a) = NULL;
521   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
522   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
523   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
524   ALLOCNO_CLASS (a) = NO_REGS;
525   ALLOCNO_UPDATED_CLASS_COST (a) = 0;
526   ALLOCNO_CLASS_COST (a) = 0;
527   ALLOCNO_MEMORY_COST (a) = 0;
528   ALLOCNO_UPDATED_MEMORY_COST (a) = 0;
529   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) = 0;
530   ALLOCNO_NUM_OBJECTS (a) = 0;
531 
532   ALLOCNO_ADD_DATA (a) = NULL;
533   VEC_safe_push (ira_allocno_t, heap, allocno_vec, a);
534   ira_allocnos = VEC_address (ira_allocno_t, allocno_vec);
535   ira_allocnos_num = VEC_length (ira_allocno_t, allocno_vec);
536 
537   return a;
538 }
539 
540 /* Set up register class for A and update its conflict hard
541    registers.  */
542 void
543 ira_set_allocno_class (ira_allocno_t a, enum reg_class aclass)
544 {
545   ira_allocno_object_iterator oi;
546   ira_object_t obj;
547 
548   ALLOCNO_CLASS (a) = aclass;
549   FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
550     {
551       IOR_COMPL_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
552 			      reg_class_contents[aclass]);
553       IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
554 			      reg_class_contents[aclass]);
555     }
556 }
557 
558 /* Determine the number of objects we should associate with allocno A
559    and allocate them.  */
560 void
561 ira_create_allocno_objects (ira_allocno_t a)
562 {
563   enum machine_mode mode = ALLOCNO_MODE (a);
564   enum reg_class aclass = ALLOCNO_CLASS (a);
565   int n = ira_reg_class_max_nregs[aclass][mode];
566   int i;
567 
568   if (GET_MODE_SIZE (mode) != 2 * UNITS_PER_WORD || n != 2)
569     n = 1;
570 
571   ALLOCNO_NUM_OBJECTS (a) = n;
572   for (i = 0; i < n; i++)
573     ALLOCNO_OBJECT (a, i) = ira_create_object (a, i);
574 }
575 
576 /* For each allocno, set ALLOCNO_NUM_OBJECTS and create the
577    ALLOCNO_OBJECT structures.  This must be called after the allocno
578    classes are known.  */
579 static void
580 create_allocno_objects (void)
581 {
582   ira_allocno_t a;
583   ira_allocno_iterator ai;
584 
585   FOR_EACH_ALLOCNO (a, ai)
586     ira_create_allocno_objects (a);
587 }
588 
589 /* Merge hard register conflict information for all objects associated with
590    allocno TO into the corresponding objects associated with FROM.
591    If TOTAL_ONLY is true, we only merge OBJECT_TOTAL_CONFLICT_HARD_REGS.  */
592 static void
593 merge_hard_reg_conflicts (ira_allocno_t from, ira_allocno_t to,
594 			  bool total_only)
595 {
596   int i;
597   gcc_assert (ALLOCNO_NUM_OBJECTS (to) == ALLOCNO_NUM_OBJECTS (from));
598   for (i = 0; i < ALLOCNO_NUM_OBJECTS (to); i++)
599     {
600       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
601       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
602 
603       if (!total_only)
604 	IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (to_obj),
605 			  OBJECT_CONFLICT_HARD_REGS (from_obj));
606       IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (to_obj),
607 			OBJECT_TOTAL_CONFLICT_HARD_REGS (from_obj));
608     }
609 #ifdef STACK_REGS
610   if (!total_only && ALLOCNO_NO_STACK_REG_P (from))
611     ALLOCNO_NO_STACK_REG_P (to) = true;
612   if (ALLOCNO_TOTAL_NO_STACK_REG_P (from))
613     ALLOCNO_TOTAL_NO_STACK_REG_P (to) = true;
614 #endif
615 }
616 
617 /* Update hard register conflict information for all objects associated with
618    A to include the regs in SET.  */
619 void
620 ior_hard_reg_conflicts (ira_allocno_t a, HARD_REG_SET *set)
621 {
622   ira_allocno_object_iterator i;
623   ira_object_t obj;
624 
625   FOR_EACH_ALLOCNO_OBJECT (a, obj, i)
626     {
627       IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), *set);
628       IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), *set);
629     }
630 }
631 
632 /* Return TRUE if a conflict vector with NUM elements is more
633    profitable than a conflict bit vector for OBJ.  */
634 bool
635 ira_conflict_vector_profitable_p (ira_object_t obj, int num)
636 {
637   int nw;
638   int max = OBJECT_MAX (obj);
639   int min = OBJECT_MIN (obj);
640 
641   if (max < min)
642     /* We prefer a bit vector in such case because it does not result
643        in allocation.  */
644     return false;
645 
646   nw = (max - min + IRA_INT_BITS) / IRA_INT_BITS;
647   return (2 * sizeof (ira_object_t) * (num + 1)
648 	  < 3 * nw * sizeof (IRA_INT_TYPE));
649 }
650 
651 /* Allocates and initialize the conflict vector of OBJ for NUM
652    conflicting objects.  */
653 void
654 ira_allocate_conflict_vec (ira_object_t obj, int num)
655 {
656   int size;
657   ira_object_t *vec;
658 
659   ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
660   num++; /* for NULL end marker  */
661   size = sizeof (ira_object_t) * num;
662   OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
663   vec = (ira_object_t *) OBJECT_CONFLICT_ARRAY (obj);
664   vec[0] = NULL;
665   OBJECT_NUM_CONFLICTS (obj) = 0;
666   OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
667   OBJECT_CONFLICT_VEC_P (obj) = true;
668 }
669 
670 /* Allocate and initialize the conflict bit vector of OBJ.  */
671 static void
672 allocate_conflict_bit_vec (ira_object_t obj)
673 {
674   unsigned int size;
675 
676   ira_assert (OBJECT_CONFLICT_ARRAY (obj) == NULL);
677   size = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
678 	  / IRA_INT_BITS * sizeof (IRA_INT_TYPE));
679   OBJECT_CONFLICT_ARRAY (obj) = ira_allocate (size);
680   memset (OBJECT_CONFLICT_ARRAY (obj), 0, size);
681   OBJECT_CONFLICT_ARRAY_SIZE (obj) = size;
682   OBJECT_CONFLICT_VEC_P (obj) = false;
683 }
684 
685 /* Allocate and initialize the conflict vector or conflict bit vector
686    of OBJ for NUM conflicting allocnos whatever is more profitable.  */
687 void
688 ira_allocate_object_conflicts (ira_object_t obj, int num)
689 {
690   if (ira_conflict_vector_profitable_p (obj, num))
691     ira_allocate_conflict_vec (obj, num);
692   else
693     allocate_conflict_bit_vec (obj);
694 }
695 
696 /* Add OBJ2 to the conflicts of OBJ1.  */
697 static void
698 add_to_conflicts (ira_object_t obj1, ira_object_t obj2)
699 {
700   int num;
701   unsigned int size;
702 
703   if (OBJECT_CONFLICT_VEC_P (obj1))
704     {
705       ira_object_t *vec = OBJECT_CONFLICT_VEC (obj1);
706       int curr_num = OBJECT_NUM_CONFLICTS (obj1);
707       num = curr_num + 2;
708       if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < num * sizeof (ira_object_t))
709 	{
710 	  ira_object_t *newvec;
711 	  size = (3 * num / 2 + 1) * sizeof (ira_allocno_t);
712 	  newvec = (ira_object_t *) ira_allocate (size);
713 	  memcpy (newvec, vec, curr_num * sizeof (ira_object_t));
714 	  ira_free (vec);
715 	  vec = newvec;
716 	  OBJECT_CONFLICT_ARRAY (obj1) = vec;
717 	  OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
718 	}
719       vec[num - 2] = obj2;
720       vec[num - 1] = NULL;
721       OBJECT_NUM_CONFLICTS (obj1)++;
722     }
723   else
724     {
725       int nw, added_head_nw, id;
726       IRA_INT_TYPE *vec = OBJECT_CONFLICT_BITVEC (obj1);
727 
728       id = OBJECT_CONFLICT_ID (obj2);
729       if (OBJECT_MIN (obj1) > id)
730 	{
731 	  /* Expand head of the bit vector.  */
732 	  added_head_nw = (OBJECT_MIN (obj1) - id - 1) / IRA_INT_BITS + 1;
733 	  nw = (OBJECT_MAX (obj1) - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
734 	  size = (nw + added_head_nw) * sizeof (IRA_INT_TYPE);
735 	  if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) >= size)
736 	    {
737 	      memmove ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
738 		       vec, nw * sizeof (IRA_INT_TYPE));
739 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
740 	    }
741 	  else
742 	    {
743 	      size
744 		= (3 * (nw + added_head_nw) / 2 + 1) * sizeof (IRA_INT_TYPE);
745 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
746 	      memcpy ((char *) vec + added_head_nw * sizeof (IRA_INT_TYPE),
747 		      OBJECT_CONFLICT_ARRAY (obj1), nw * sizeof (IRA_INT_TYPE));
748 	      memset (vec, 0, added_head_nw * sizeof (IRA_INT_TYPE));
749 	      memset ((char *) vec
750 		      + (nw + added_head_nw) * sizeof (IRA_INT_TYPE),
751 		      0, size - (nw + added_head_nw) * sizeof (IRA_INT_TYPE));
752 	      ira_free (OBJECT_CONFLICT_ARRAY (obj1));
753 	      OBJECT_CONFLICT_ARRAY (obj1) = vec;
754 	      OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
755 	    }
756 	  OBJECT_MIN (obj1) -= added_head_nw * IRA_INT_BITS;
757 	}
758       else if (OBJECT_MAX (obj1) < id)
759 	{
760 	  nw = (id - OBJECT_MIN (obj1)) / IRA_INT_BITS + 1;
761 	  size = nw * sizeof (IRA_INT_TYPE);
762 	  if (OBJECT_CONFLICT_ARRAY_SIZE (obj1) < size)
763 	    {
764 	      /* Expand tail of the bit vector.  */
765 	      size = (3 * nw / 2 + 1) * sizeof (IRA_INT_TYPE);
766 	      vec = (IRA_INT_TYPE *) ira_allocate (size);
767 	      memcpy (vec, OBJECT_CONFLICT_ARRAY (obj1), OBJECT_CONFLICT_ARRAY_SIZE (obj1));
768 	      memset ((char *) vec + OBJECT_CONFLICT_ARRAY_SIZE (obj1),
769 		      0, size - OBJECT_CONFLICT_ARRAY_SIZE (obj1));
770 	      ira_free (OBJECT_CONFLICT_ARRAY (obj1));
771 	      OBJECT_CONFLICT_ARRAY (obj1) = vec;
772 	      OBJECT_CONFLICT_ARRAY_SIZE (obj1) = size;
773 	    }
774 	  OBJECT_MAX (obj1) = id;
775 	}
776       SET_MINMAX_SET_BIT (vec, id, OBJECT_MIN (obj1), OBJECT_MAX (obj1));
777     }
778 }
779 
780 /* Add OBJ1 to the conflicts of OBJ2 and vice versa.  */
781 static void
782 ira_add_conflict (ira_object_t obj1, ira_object_t obj2)
783 {
784   add_to_conflicts (obj1, obj2);
785   add_to_conflicts (obj2, obj1);
786 }
787 
788 /* Clear all conflicts of OBJ.  */
789 static void
790 clear_conflicts (ira_object_t obj)
791 {
792   if (OBJECT_CONFLICT_VEC_P (obj))
793     {
794       OBJECT_NUM_CONFLICTS (obj) = 0;
795       OBJECT_CONFLICT_VEC (obj)[0] = NULL;
796     }
797   else if (OBJECT_CONFLICT_ARRAY_SIZE (obj) != 0)
798     {
799       int nw;
800 
801       nw = (OBJECT_MAX (obj) - OBJECT_MIN (obj)) / IRA_INT_BITS + 1;
802       memset (OBJECT_CONFLICT_BITVEC (obj), 0, nw * sizeof (IRA_INT_TYPE));
803     }
804 }
805 
806 /* The array used to find duplications in conflict vectors of
807    allocnos.  */
808 static int *conflict_check;
809 
810 /* The value used to mark allocation presence in conflict vector of
811    the current allocno.  */
812 static int curr_conflict_check_tick;
813 
814 /* Remove duplications in conflict vector of OBJ.  */
815 static void
816 compress_conflict_vec (ira_object_t obj)
817 {
818   ira_object_t *vec, conflict_obj;
819   int i, j;
820 
821   ira_assert (OBJECT_CONFLICT_VEC_P (obj));
822   vec = OBJECT_CONFLICT_VEC (obj);
823   curr_conflict_check_tick++;
824   for (i = j = 0; (conflict_obj = vec[i]) != NULL; i++)
825     {
826       int id = OBJECT_CONFLICT_ID (conflict_obj);
827       if (conflict_check[id] != curr_conflict_check_tick)
828 	{
829 	  conflict_check[id] = curr_conflict_check_tick;
830 	  vec[j++] = conflict_obj;
831 	}
832     }
833   OBJECT_NUM_CONFLICTS (obj) = j;
834   vec[j] = NULL;
835 }
836 
837 /* Remove duplications in conflict vectors of all allocnos.  */
838 static void
839 compress_conflict_vecs (void)
840 {
841   ira_object_t obj;
842   ira_object_iterator oi;
843 
844   conflict_check = (int *) ira_allocate (sizeof (int) * ira_objects_num);
845   memset (conflict_check, 0, sizeof (int) * ira_objects_num);
846   curr_conflict_check_tick = 0;
847   FOR_EACH_OBJECT (obj, oi)
848     {
849       if (OBJECT_CONFLICT_VEC_P (obj))
850 	compress_conflict_vec (obj);
851     }
852   ira_free (conflict_check);
853 }
854 
855 /* This recursive function outputs allocno A and if it is a cap the
856    function outputs its members.  */
857 void
858 ira_print_expanded_allocno (ira_allocno_t a)
859 {
860   basic_block bb;
861 
862   fprintf (ira_dump_file, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
863   if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
864     fprintf (ira_dump_file, ",b%d", bb->index);
865   else
866     fprintf (ira_dump_file, ",l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
867   if (ALLOCNO_CAP_MEMBER (a) != NULL)
868     {
869       fprintf (ira_dump_file, ":");
870       ira_print_expanded_allocno (ALLOCNO_CAP_MEMBER (a));
871     }
872   fprintf (ira_dump_file, ")");
873 }
874 
875 /* Create and return the cap representing allocno A in the
876    parent loop.  */
877 static ira_allocno_t
878 create_cap_allocno (ira_allocno_t a)
879 {
880   ira_allocno_t cap;
881   ira_loop_tree_node_t parent;
882   enum reg_class aclass;
883 
884   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
885   cap = ira_create_allocno (ALLOCNO_REGNO (a), true, parent);
886   ALLOCNO_MODE (cap) = ALLOCNO_MODE (a);
887   aclass = ALLOCNO_CLASS (a);
888   ira_set_allocno_class (cap, aclass);
889   ira_create_allocno_objects (cap);
890   ALLOCNO_CAP_MEMBER (cap) = a;
891   ALLOCNO_CAP (a) = cap;
892   ALLOCNO_CLASS_COST (cap) = ALLOCNO_CLASS_COST (a);
893   ALLOCNO_MEMORY_COST (cap) = ALLOCNO_MEMORY_COST (a);
894   ira_allocate_and_copy_costs
895     (&ALLOCNO_HARD_REG_COSTS (cap), aclass, ALLOCNO_HARD_REG_COSTS (a));
896   ira_allocate_and_copy_costs
897     (&ALLOCNO_CONFLICT_HARD_REG_COSTS (cap), aclass,
898      ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
899   ALLOCNO_BAD_SPILL_P (cap) = ALLOCNO_BAD_SPILL_P (a);
900   ALLOCNO_NREFS (cap) = ALLOCNO_NREFS (a);
901   ALLOCNO_FREQ (cap) = ALLOCNO_FREQ (a);
902   ALLOCNO_CALL_FREQ (cap) = ALLOCNO_CALL_FREQ (a);
903 
904   merge_hard_reg_conflicts (a, cap, false);
905 
906   ALLOCNO_CALLS_CROSSED_NUM (cap) = ALLOCNO_CALLS_CROSSED_NUM (a);
907   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
908     {
909       fprintf (ira_dump_file, "    Creating cap ");
910       ira_print_expanded_allocno (cap);
911       fprintf (ira_dump_file, "\n");
912     }
913   return cap;
914 }
915 
916 /* Create and return a live range for OBJECT with given attributes.  */
917 live_range_t
918 ira_create_live_range (ira_object_t obj, int start, int finish,
919 		       live_range_t next)
920 {
921   live_range_t p;
922 
923   p = (live_range_t) pool_alloc (live_range_pool);
924   p->object = obj;
925   p->start = start;
926   p->finish = finish;
927   p->next = next;
928   return p;
929 }
930 
931 /* Create a new live range for OBJECT and queue it at the head of its
932    live range list.  */
933 void
934 ira_add_live_range_to_object (ira_object_t object, int start, int finish)
935 {
936   live_range_t p;
937   p = ira_create_live_range (object, start, finish,
938 			     OBJECT_LIVE_RANGES (object));
939   OBJECT_LIVE_RANGES (object) = p;
940 }
941 
942 /* Copy allocno live range R and return the result.  */
943 static live_range_t
944 copy_live_range (live_range_t r)
945 {
946   live_range_t p;
947 
948   p = (live_range_t) pool_alloc (live_range_pool);
949   *p = *r;
950   return p;
951 }
952 
953 /* Copy allocno live range list given by its head R and return the
954    result.  */
955 live_range_t
956 ira_copy_live_range_list (live_range_t r)
957 {
958   live_range_t p, first, last;
959 
960   if (r == NULL)
961     return NULL;
962   for (first = last = NULL; r != NULL; r = r->next)
963     {
964       p = copy_live_range (r);
965       if (first == NULL)
966 	first = p;
967       else
968 	last->next = p;
969       last = p;
970     }
971   return first;
972 }
973 
974 /* Merge ranges R1 and R2 and returns the result.  The function
975    maintains the order of ranges and tries to minimize number of the
976    result ranges.  */
977 live_range_t
978 ira_merge_live_ranges (live_range_t r1, live_range_t r2)
979 {
980   live_range_t first, last, temp;
981 
982   if (r1 == NULL)
983     return r2;
984   if (r2 == NULL)
985     return r1;
986   for (first = last = NULL; r1 != NULL && r2 != NULL;)
987     {
988       if (r1->start < r2->start)
989 	{
990 	  temp = r1;
991 	  r1 = r2;
992 	  r2 = temp;
993 	}
994       if (r1->start <= r2->finish + 1)
995 	{
996 	  /* Intersected ranges: merge r1 and r2 into r1.  */
997 	  r1->start = r2->start;
998 	  if (r1->finish < r2->finish)
999 	    r1->finish = r2->finish;
1000 	  temp = r2;
1001 	  r2 = r2->next;
1002 	  ira_finish_live_range (temp);
1003 	  if (r2 == NULL)
1004 	    {
1005 	      /* To try to merge with subsequent ranges in r1.  */
1006 	      r2 = r1->next;
1007 	      r1->next = NULL;
1008 	    }
1009 	}
1010       else
1011 	{
1012 	  /* Add r1 to the result.  */
1013 	  if (first == NULL)
1014 	    first = last = r1;
1015 	  else
1016 	    {
1017 	      last->next = r1;
1018 	      last = r1;
1019 	    }
1020 	  r1 = r1->next;
1021 	  if (r1 == NULL)
1022 	    {
1023 	      /* To try to merge with subsequent ranges in r2.  */
1024 	      r1 = r2->next;
1025 	      r2->next = NULL;
1026 	    }
1027 	}
1028     }
1029   if (r1 != NULL)
1030     {
1031       if (first == NULL)
1032 	first = r1;
1033       else
1034 	last->next = r1;
1035       ira_assert (r1->next == NULL);
1036     }
1037   else if (r2 != NULL)
1038     {
1039       if (first == NULL)
1040 	first = r2;
1041       else
1042 	last->next = r2;
1043       ira_assert (r2->next == NULL);
1044     }
1045   else
1046     {
1047       ira_assert (last->next == NULL);
1048     }
1049   return first;
1050 }
1051 
1052 /* Return TRUE if live ranges R1 and R2 intersect.  */
1053 bool
1054 ira_live_ranges_intersect_p (live_range_t r1, live_range_t r2)
1055 {
1056   /* Remember the live ranges are always kept ordered.  */
1057   while (r1 != NULL && r2 != NULL)
1058     {
1059       if (r1->start > r2->finish)
1060 	r1 = r1->next;
1061       else if (r2->start > r1->finish)
1062 	r2 = r2->next;
1063       else
1064 	return true;
1065     }
1066   return false;
1067 }
1068 
1069 /* Free allocno live range R.  */
1070 void
1071 ira_finish_live_range (live_range_t r)
1072 {
1073   pool_free (live_range_pool, r);
1074 }
1075 
1076 /* Free list of allocno live ranges starting with R.  */
1077 void
1078 ira_finish_live_range_list (live_range_t r)
1079 {
1080   live_range_t next_r;
1081 
1082   for (; r != NULL; r = next_r)
1083     {
1084       next_r = r->next;
1085       ira_finish_live_range (r);
1086     }
1087 }
1088 
1089 /* Free updated register costs of allocno A.  */
1090 void
1091 ira_free_allocno_updated_costs (ira_allocno_t a)
1092 {
1093   enum reg_class aclass;
1094 
1095   aclass = ALLOCNO_CLASS (a);
1096   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1097     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1098   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1099   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1100     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1101 			  aclass);
1102   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1103 }
1104 
1105 /* Free and nullify all cost vectors allocated earlier for allocno
1106    A.  */
1107 static void
1108 ira_free_allocno_costs (ira_allocno_t a)
1109 {
1110   enum reg_class aclass = ALLOCNO_CLASS (a);
1111   ira_object_t obj;
1112   ira_allocno_object_iterator oi;
1113 
1114   FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
1115     {
1116       ira_finish_live_range_list (OBJECT_LIVE_RANGES (obj));
1117       ira_object_id_map[OBJECT_CONFLICT_ID (obj)] = NULL;
1118       if (OBJECT_CONFLICT_ARRAY (obj) != NULL)
1119 	ira_free (OBJECT_CONFLICT_ARRAY (obj));
1120       pool_free (object_pool, obj);
1121     }
1122 
1123   ira_allocnos[ALLOCNO_NUM (a)] = NULL;
1124   if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
1125     ira_free_cost_vector (ALLOCNO_HARD_REG_COSTS (a), aclass);
1126   if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL)
1127     ira_free_cost_vector (ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass);
1128   if (ALLOCNO_UPDATED_HARD_REG_COSTS (a) != NULL)
1129     ira_free_cost_vector (ALLOCNO_UPDATED_HARD_REG_COSTS (a), aclass);
1130   if (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) != NULL)
1131     ira_free_cost_vector (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a),
1132 			  aclass);
1133   ALLOCNO_HARD_REG_COSTS (a) = NULL;
1134   ALLOCNO_CONFLICT_HARD_REG_COSTS (a) = NULL;
1135   ALLOCNO_UPDATED_HARD_REG_COSTS (a) = NULL;
1136   ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) = NULL;
1137 }
1138 
1139 /* Free the memory allocated for allocno A.  */
1140 static void
1141 finish_allocno (ira_allocno_t a)
1142 {
1143   ira_free_allocno_costs (a);
1144   pool_free (allocno_pool, a);
1145 }
1146 
1147 /* Free the memory allocated for all allocnos.  */
1148 static void
1149 finish_allocnos (void)
1150 {
1151   ira_allocno_t a;
1152   ira_allocno_iterator ai;
1153 
1154   FOR_EACH_ALLOCNO (a, ai)
1155     finish_allocno (a);
1156   ira_free (ira_regno_allocno_map);
1157   VEC_free (ira_object_t, heap, ira_object_id_map_vec);
1158   VEC_free (ira_allocno_t, heap, allocno_vec);
1159   free_alloc_pool (allocno_pool);
1160   free_alloc_pool (object_pool);
1161   free_alloc_pool (live_range_pool);
1162 }
1163 
1164 
1165 
1166 /* Pools for copies.  */
1167 static alloc_pool copy_pool;
1168 
1169 /* Vec containing references to all created copies.  It is a
1170    container of array ira_copies.  */
1171 static VEC(ira_copy_t,heap) *copy_vec;
1172 
1173 /* The function initializes data concerning allocno copies.  */
1174 static void
1175 initiate_copies (void)
1176 {
1177   copy_pool
1178     = create_alloc_pool ("copies", sizeof (struct ira_allocno_copy), 100);
1179   copy_vec = VEC_alloc (ira_copy_t, heap, get_max_uid ());
1180   ira_copies = NULL;
1181   ira_copies_num = 0;
1182 }
1183 
1184 /* Return copy connecting A1 and A2 and originated from INSN of
1185    LOOP_TREE_NODE if any.  */
1186 static ira_copy_t
1187 find_allocno_copy (ira_allocno_t a1, ira_allocno_t a2, rtx insn,
1188 		   ira_loop_tree_node_t loop_tree_node)
1189 {
1190   ira_copy_t cp, next_cp;
1191   ira_allocno_t another_a;
1192 
1193   for (cp = ALLOCNO_COPIES (a1); cp != NULL; cp = next_cp)
1194     {
1195       if (cp->first == a1)
1196 	{
1197 	  next_cp = cp->next_first_allocno_copy;
1198 	  another_a = cp->second;
1199 	}
1200       else if (cp->second == a1)
1201 	{
1202 	  next_cp = cp->next_second_allocno_copy;
1203 	  another_a = cp->first;
1204 	}
1205       else
1206 	gcc_unreachable ();
1207       if (another_a == a2 && cp->insn == insn
1208 	  && cp->loop_tree_node == loop_tree_node)
1209 	return cp;
1210     }
1211   return NULL;
1212 }
1213 
1214 /* Create and return copy with given attributes LOOP_TREE_NODE, FIRST,
1215    SECOND, FREQ, CONSTRAINT_P, and INSN.  */
1216 ira_copy_t
1217 ira_create_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1218 		 bool constraint_p, rtx insn,
1219 		 ira_loop_tree_node_t loop_tree_node)
1220 {
1221   ira_copy_t cp;
1222 
1223   cp = (ira_copy_t) pool_alloc (copy_pool);
1224   cp->num = ira_copies_num;
1225   cp->first = first;
1226   cp->second = second;
1227   cp->freq = freq;
1228   cp->constraint_p = constraint_p;
1229   cp->insn = insn;
1230   cp->loop_tree_node = loop_tree_node;
1231   VEC_safe_push (ira_copy_t, heap, copy_vec, cp);
1232   ira_copies = VEC_address (ira_copy_t, copy_vec);
1233   ira_copies_num = VEC_length (ira_copy_t, copy_vec);
1234   return cp;
1235 }
1236 
1237 /* Attach a copy CP to allocnos involved into the copy.  */
1238 void
1239 ira_add_allocno_copy_to_list (ira_copy_t cp)
1240 {
1241   ira_allocno_t first = cp->first, second = cp->second;
1242 
1243   cp->prev_first_allocno_copy = NULL;
1244   cp->prev_second_allocno_copy = NULL;
1245   cp->next_first_allocno_copy = ALLOCNO_COPIES (first);
1246   if (cp->next_first_allocno_copy != NULL)
1247     {
1248       if (cp->next_first_allocno_copy->first == first)
1249 	cp->next_first_allocno_copy->prev_first_allocno_copy = cp;
1250       else
1251 	cp->next_first_allocno_copy->prev_second_allocno_copy = cp;
1252     }
1253   cp->next_second_allocno_copy = ALLOCNO_COPIES (second);
1254   if (cp->next_second_allocno_copy != NULL)
1255     {
1256       if (cp->next_second_allocno_copy->second == second)
1257 	cp->next_second_allocno_copy->prev_second_allocno_copy = cp;
1258       else
1259 	cp->next_second_allocno_copy->prev_first_allocno_copy = cp;
1260     }
1261   ALLOCNO_COPIES (first) = cp;
1262   ALLOCNO_COPIES (second) = cp;
1263 }
1264 
1265 /* Make a copy CP a canonical copy where number of the
1266    first allocno is less than the second one.  */
1267 void
1268 ira_swap_allocno_copy_ends_if_necessary (ira_copy_t cp)
1269 {
1270   ira_allocno_t temp;
1271   ira_copy_t temp_cp;
1272 
1273   if (ALLOCNO_NUM (cp->first) <= ALLOCNO_NUM (cp->second))
1274     return;
1275 
1276   temp = cp->first;
1277   cp->first = cp->second;
1278   cp->second = temp;
1279 
1280   temp_cp = cp->prev_first_allocno_copy;
1281   cp->prev_first_allocno_copy = cp->prev_second_allocno_copy;
1282   cp->prev_second_allocno_copy = temp_cp;
1283 
1284   temp_cp = cp->next_first_allocno_copy;
1285   cp->next_first_allocno_copy = cp->next_second_allocno_copy;
1286   cp->next_second_allocno_copy = temp_cp;
1287 }
1288 
1289 /* Create (or update frequency if the copy already exists) and return
1290    the copy of allocnos FIRST and SECOND with frequency FREQ
1291    corresponding to move insn INSN (if any) and originated from
1292    LOOP_TREE_NODE.  */
1293 ira_copy_t
1294 ira_add_allocno_copy (ira_allocno_t first, ira_allocno_t second, int freq,
1295 		      bool constraint_p, rtx insn,
1296 		      ira_loop_tree_node_t loop_tree_node)
1297 {
1298   ira_copy_t cp;
1299 
1300   if ((cp = find_allocno_copy (first, second, insn, loop_tree_node)) != NULL)
1301     {
1302       cp->freq += freq;
1303       return cp;
1304     }
1305   cp = ira_create_copy (first, second, freq, constraint_p, insn,
1306 			loop_tree_node);
1307   ira_assert (first != NULL && second != NULL);
1308   ira_add_allocno_copy_to_list (cp);
1309   ira_swap_allocno_copy_ends_if_necessary (cp);
1310   return cp;
1311 }
1312 
1313 /* Print info about copy CP into file F.  */
1314 static void
1315 print_copy (FILE *f, ira_copy_t cp)
1316 {
1317   fprintf (f, "  cp%d:a%d(r%d)<->a%d(r%d)@%d:%s\n", cp->num,
1318 	   ALLOCNO_NUM (cp->first), ALLOCNO_REGNO (cp->first),
1319 	   ALLOCNO_NUM (cp->second), ALLOCNO_REGNO (cp->second), cp->freq,
1320 	   cp->insn != NULL
1321 	   ? "move" : cp->constraint_p ? "constraint" : "shuffle");
1322 }
1323 
1324 /* Print info about copy CP into stderr.  */
1325 void
1326 ira_debug_copy (ira_copy_t cp)
1327 {
1328   print_copy (stderr, cp);
1329 }
1330 
1331 /* Print info about all copies into file F.  */
1332 static void
1333 print_copies (FILE *f)
1334 {
1335   ira_copy_t cp;
1336   ira_copy_iterator ci;
1337 
1338   FOR_EACH_COPY (cp, ci)
1339     print_copy (f, cp);
1340 }
1341 
1342 /* Print info about all copies into stderr.  */
1343 void
1344 ira_debug_copies (void)
1345 {
1346   print_copies (stderr);
1347 }
1348 
1349 /* Print info about copies involving allocno A into file F.  */
1350 static void
1351 print_allocno_copies (FILE *f, ira_allocno_t a)
1352 {
1353   ira_allocno_t another_a;
1354   ira_copy_t cp, next_cp;
1355 
1356   fprintf (f, " a%d(r%d):", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1357   for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
1358     {
1359       if (cp->first == a)
1360 	{
1361 	  next_cp = cp->next_first_allocno_copy;
1362 	  another_a = cp->second;
1363 	}
1364       else if (cp->second == a)
1365 	{
1366 	  next_cp = cp->next_second_allocno_copy;
1367 	  another_a = cp->first;
1368 	}
1369       else
1370 	gcc_unreachable ();
1371       fprintf (f, " cp%d:a%d(r%d)@%d", cp->num,
1372 	       ALLOCNO_NUM (another_a), ALLOCNO_REGNO (another_a), cp->freq);
1373     }
1374   fprintf (f, "\n");
1375 }
1376 
1377 /* Print info about copies involving allocno A into stderr.  */
1378 void
1379 ira_debug_allocno_copies (ira_allocno_t a)
1380 {
1381   print_allocno_copies (stderr, a);
1382 }
1383 
1384 /* The function frees memory allocated for copy CP.  */
1385 static void
1386 finish_copy (ira_copy_t cp)
1387 {
1388   pool_free (copy_pool, cp);
1389 }
1390 
1391 
1392 /* Free memory allocated for all copies.  */
1393 static void
1394 finish_copies (void)
1395 {
1396   ira_copy_t cp;
1397   ira_copy_iterator ci;
1398 
1399   FOR_EACH_COPY (cp, ci)
1400     finish_copy (cp);
1401   VEC_free (ira_copy_t, heap, copy_vec);
1402   free_alloc_pool (copy_pool);
1403 }
1404 
1405 
1406 
1407 /* Pools for cost vectors.  It is defined only for allocno classes.  */
1408 static alloc_pool cost_vector_pool[N_REG_CLASSES];
1409 
1410 /* The function initiates work with hard register cost vectors.  It
1411    creates allocation pool for each allocno class.  */
1412 static void
1413 initiate_cost_vectors (void)
1414 {
1415   int i;
1416   enum reg_class aclass;
1417 
1418   for (i = 0; i < ira_allocno_classes_num; i++)
1419     {
1420       aclass = ira_allocno_classes[i];
1421       cost_vector_pool[aclass]
1422 	= create_alloc_pool ("cost vectors",
1423 			     sizeof (int) * ira_class_hard_regs_num[aclass],
1424 			     100);
1425     }
1426 }
1427 
1428 /* Allocate and return a cost vector VEC for ACLASS.  */
1429 int *
1430 ira_allocate_cost_vector (reg_class_t aclass)
1431 {
1432   return (int *) pool_alloc (cost_vector_pool[(int) aclass]);
1433 }
1434 
1435 /* Free a cost vector VEC for ACLASS.  */
1436 void
1437 ira_free_cost_vector (int *vec, reg_class_t aclass)
1438 {
1439   ira_assert (vec != NULL);
1440   pool_free (cost_vector_pool[(int) aclass], vec);
1441 }
1442 
1443 /* Finish work with hard register cost vectors.  Release allocation
1444    pool for each allocno class.  */
1445 static void
1446 finish_cost_vectors (void)
1447 {
1448   int i;
1449   enum reg_class aclass;
1450 
1451   for (i = 0; i < ira_allocno_classes_num; i++)
1452     {
1453       aclass = ira_allocno_classes[i];
1454       free_alloc_pool (cost_vector_pool[aclass]);
1455     }
1456 }
1457 
1458 
1459 
1460 /* The current loop tree node and its regno allocno map.  */
1461 ira_loop_tree_node_t ira_curr_loop_tree_node;
1462 ira_allocno_t *ira_curr_regno_allocno_map;
1463 
1464 /* This recursive function traverses loop tree with root LOOP_NODE
1465    calling non-null functions PREORDER_FUNC and POSTORDER_FUNC
1466    correspondingly in preorder and postorder.  The function sets up
1467    IRA_CURR_LOOP_TREE_NODE and IRA_CURR_REGNO_ALLOCNO_MAP.  If BB_P,
1468    basic block nodes of LOOP_NODE is also processed (before its
1469    subloop nodes).  */
1470 void
1471 ira_traverse_loop_tree (bool bb_p, ira_loop_tree_node_t loop_node,
1472 			void (*preorder_func) (ira_loop_tree_node_t),
1473 			void (*postorder_func) (ira_loop_tree_node_t))
1474 {
1475   ira_loop_tree_node_t subloop_node;
1476 
1477   ira_assert (loop_node->bb == NULL);
1478   ira_curr_loop_tree_node = loop_node;
1479   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1480 
1481   if (preorder_func != NULL)
1482     (*preorder_func) (loop_node);
1483 
1484   if (bb_p)
1485     for (subloop_node = loop_node->children;
1486 	 subloop_node != NULL;
1487 	 subloop_node = subloop_node->next)
1488       if (subloop_node->bb != NULL)
1489 	{
1490 	  if (preorder_func != NULL)
1491 	    (*preorder_func) (subloop_node);
1492 
1493 	  if (postorder_func != NULL)
1494 	    (*postorder_func) (subloop_node);
1495 	}
1496 
1497   for (subloop_node = loop_node->subloops;
1498        subloop_node != NULL;
1499        subloop_node = subloop_node->subloop_next)
1500     {
1501       ira_assert (subloop_node->bb == NULL);
1502       ira_traverse_loop_tree (bb_p, subloop_node,
1503 			      preorder_func, postorder_func);
1504     }
1505 
1506   ira_curr_loop_tree_node = loop_node;
1507   ira_curr_regno_allocno_map = ira_curr_loop_tree_node->regno_allocno_map;
1508 
1509   if (postorder_func != NULL)
1510     (*postorder_func) (loop_node);
1511 }
1512 
1513 
1514 
1515 /* The basic block currently being processed.  */
1516 static basic_block curr_bb;
1517 
1518 /* This recursive function creates allocnos corresponding to
1519    pseudo-registers containing in X.  True OUTPUT_P means that X is
1520    a lvalue.  */
1521 static void
1522 create_insn_allocnos (rtx x, bool output_p)
1523 {
1524   int i, j;
1525   const char *fmt;
1526   enum rtx_code code = GET_CODE (x);
1527 
1528   if (code == REG)
1529     {
1530       int regno;
1531 
1532       if ((regno = REGNO (x)) >= FIRST_PSEUDO_REGISTER)
1533 	{
1534 	  ira_allocno_t a;
1535 
1536 	  if ((a = ira_curr_regno_allocno_map[regno]) == NULL)
1537 	    a = ira_create_allocno (regno, false, ira_curr_loop_tree_node);
1538 
1539 	  ALLOCNO_NREFS (a)++;
1540 	  ALLOCNO_FREQ (a) += REG_FREQ_FROM_BB (curr_bb);
1541 	  if (output_p)
1542 	    bitmap_set_bit (ira_curr_loop_tree_node->modified_regnos, regno);
1543 	}
1544       return;
1545     }
1546   else if (code == SET)
1547     {
1548       create_insn_allocnos (SET_DEST (x), true);
1549       create_insn_allocnos (SET_SRC (x), false);
1550       return;
1551     }
1552   else if (code == CLOBBER)
1553     {
1554       create_insn_allocnos (XEXP (x, 0), true);
1555       return;
1556     }
1557   else if (code == MEM)
1558     {
1559       create_insn_allocnos (XEXP (x, 0), false);
1560       return;
1561     }
1562   else if (code == PRE_DEC || code == POST_DEC || code == PRE_INC ||
1563 	   code == POST_INC || code == POST_MODIFY || code == PRE_MODIFY)
1564     {
1565       create_insn_allocnos (XEXP (x, 0), true);
1566       create_insn_allocnos (XEXP (x, 0), false);
1567       return;
1568     }
1569 
1570   fmt = GET_RTX_FORMAT (code);
1571   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1572     {
1573       if (fmt[i] == 'e')
1574 	create_insn_allocnos (XEXP (x, i), output_p);
1575       else if (fmt[i] == 'E')
1576 	for (j = 0; j < XVECLEN (x, i); j++)
1577 	  create_insn_allocnos (XVECEXP (x, i, j), output_p);
1578     }
1579 }
1580 
1581 /* Create allocnos corresponding to pseudo-registers living in the
1582    basic block represented by the corresponding loop tree node
1583    BB_NODE.  */
1584 static void
1585 create_bb_allocnos (ira_loop_tree_node_t bb_node)
1586 {
1587   basic_block bb;
1588   rtx insn;
1589   unsigned int i;
1590   bitmap_iterator bi;
1591 
1592   curr_bb = bb = bb_node->bb;
1593   ira_assert (bb != NULL);
1594   FOR_BB_INSNS_REVERSE (bb, insn)
1595     if (NONDEBUG_INSN_P (insn))
1596       create_insn_allocnos (PATTERN (insn), false);
1597   /* It might be a allocno living through from one subloop to
1598      another.  */
1599   EXECUTE_IF_SET_IN_REG_SET (DF_LR_IN (bb), FIRST_PSEUDO_REGISTER, i, bi)
1600     if (ira_curr_regno_allocno_map[i] == NULL)
1601       ira_create_allocno (i, false, ira_curr_loop_tree_node);
1602 }
1603 
1604 /* Create allocnos corresponding to pseudo-registers living on edge E
1605    (a loop entry or exit).  Also mark the allocnos as living on the
1606    loop border. */
1607 static void
1608 create_loop_allocnos (edge e)
1609 {
1610   unsigned int i;
1611   bitmap live_in_regs, border_allocnos;
1612   bitmap_iterator bi;
1613   ira_loop_tree_node_t parent;
1614 
1615   live_in_regs = DF_LR_IN (e->dest);
1616   border_allocnos = ira_curr_loop_tree_node->border_allocnos;
1617   EXECUTE_IF_SET_IN_REG_SET (DF_LR_OUT (e->src),
1618 			     FIRST_PSEUDO_REGISTER, i, bi)
1619     if (bitmap_bit_p (live_in_regs, i))
1620       {
1621 	if (ira_curr_regno_allocno_map[i] == NULL)
1622 	  {
1623 	    /* The order of creations is important for right
1624 	       ira_regno_allocno_map.  */
1625 	    if ((parent = ira_curr_loop_tree_node->parent) != NULL
1626 		&& parent->regno_allocno_map[i] == NULL)
1627 	      ira_create_allocno (i, false, parent);
1628 	    ira_create_allocno (i, false, ira_curr_loop_tree_node);
1629 	  }
1630 	bitmap_set_bit (border_allocnos,
1631 			ALLOCNO_NUM (ira_curr_regno_allocno_map[i]));
1632       }
1633 }
1634 
1635 /* Create allocnos corresponding to pseudo-registers living in loop
1636    represented by the corresponding loop tree node LOOP_NODE.  This
1637    function is called by ira_traverse_loop_tree.  */
1638 static void
1639 create_loop_tree_node_allocnos (ira_loop_tree_node_t loop_node)
1640 {
1641   if (loop_node->bb != NULL)
1642     create_bb_allocnos (loop_node);
1643   else if (loop_node != ira_loop_tree_root)
1644     {
1645       int i;
1646       edge_iterator ei;
1647       edge e;
1648       VEC (edge, heap) *edges;
1649 
1650       ira_assert (current_loops != NULL);
1651       FOR_EACH_EDGE (e, ei, loop_node->loop->header->preds)
1652 	if (e->src != loop_node->loop->latch)
1653 	  create_loop_allocnos (e);
1654 
1655       edges = get_loop_exit_edges (loop_node->loop);
1656       FOR_EACH_VEC_ELT (edge, edges, i, e)
1657 	create_loop_allocnos (e);
1658       VEC_free (edge, heap, edges);
1659     }
1660 }
1661 
1662 /* Propagate information about allocnos modified inside the loop given
1663    by its LOOP_TREE_NODE to its parent.  */
1664 static void
1665 propagate_modified_regnos (ira_loop_tree_node_t loop_tree_node)
1666 {
1667   if (loop_tree_node == ira_loop_tree_root)
1668     return;
1669   ira_assert (loop_tree_node->bb == NULL);
1670   bitmap_ior_into (loop_tree_node->parent->modified_regnos,
1671 		   loop_tree_node->modified_regnos);
1672 }
1673 
1674 /* Propagate new info about allocno A (see comments about accumulated
1675    info in allocno definition) to the corresponding allocno on upper
1676    loop tree level.  So allocnos on upper levels accumulate
1677    information about the corresponding allocnos in nested regions.
1678    The new info means allocno info finally calculated in this
1679    file.  */
1680 static void
1681 propagate_allocno_info (void)
1682 {
1683   int i;
1684   ira_allocno_t a, parent_a;
1685   ira_loop_tree_node_t parent;
1686   enum reg_class aclass;
1687 
1688   if (flag_ira_region != IRA_REGION_ALL
1689       && flag_ira_region != IRA_REGION_MIXED)
1690     return;
1691   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1692     for (a = ira_regno_allocno_map[i];
1693 	 a != NULL;
1694 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1695       if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1696 	  && (parent_a = parent->regno_allocno_map[i]) != NULL
1697 	  /* There are no caps yet at this point.  So use
1698 	     border_allocnos to find allocnos for the propagation.  */
1699 	  && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE (a)->border_allocnos,
1700 			   ALLOCNO_NUM (a)))
1701 	{
1702 	  if (! ALLOCNO_BAD_SPILL_P (a))
1703 	    ALLOCNO_BAD_SPILL_P (parent_a) = false;
1704 	  ALLOCNO_NREFS (parent_a) += ALLOCNO_NREFS (a);
1705 	  ALLOCNO_FREQ (parent_a) += ALLOCNO_FREQ (a);
1706 	  ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
1707 	  merge_hard_reg_conflicts (a, parent_a, true);
1708 	  ALLOCNO_CALLS_CROSSED_NUM (parent_a)
1709 	    += ALLOCNO_CALLS_CROSSED_NUM (a);
1710 	  ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
1711 	    += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
1712 	  aclass = ALLOCNO_CLASS (a);
1713 	  ira_assert (aclass == ALLOCNO_CLASS (parent_a));
1714 	  ira_allocate_and_accumulate_costs
1715 	    (&ALLOCNO_HARD_REG_COSTS (parent_a), aclass,
1716 	     ALLOCNO_HARD_REG_COSTS (a));
1717 	  ira_allocate_and_accumulate_costs
1718 	    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a),
1719 	     aclass,
1720 	     ALLOCNO_CONFLICT_HARD_REG_COSTS (a));
1721 	  ALLOCNO_CLASS_COST (parent_a)
1722 	    += ALLOCNO_CLASS_COST (a);
1723 	  ALLOCNO_MEMORY_COST (parent_a) += ALLOCNO_MEMORY_COST (a);
1724 	}
1725 }
1726 
1727 /* Create allocnos corresponding to pseudo-registers in the current
1728    function.  Traverse the loop tree for this.  */
1729 static void
1730 create_allocnos (void)
1731 {
1732   /* We need to process BB first to correctly link allocnos by member
1733      next_regno_allocno.  */
1734   ira_traverse_loop_tree (true, ira_loop_tree_root,
1735 			  create_loop_tree_node_allocnos, NULL);
1736   if (optimize)
1737     ira_traverse_loop_tree (false, ira_loop_tree_root, NULL,
1738 			    propagate_modified_regnos);
1739 }
1740 
1741 
1742 
1743 /* The page contains function to remove some regions from a separate
1744    register allocation.  We remove regions whose separate allocation
1745    will hardly improve the result.  As a result we speed up regional
1746    register allocation.  */
1747 
1748 /* The function changes the object in range list given by R to OBJ.  */
1749 static void
1750 change_object_in_range_list (live_range_t r, ira_object_t obj)
1751 {
1752   for (; r != NULL; r = r->next)
1753     r->object = obj;
1754 }
1755 
1756 /* Move all live ranges associated with allocno FROM to allocno TO.  */
1757 static void
1758 move_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1759 {
1760   int i;
1761   int n = ALLOCNO_NUM_OBJECTS (from);
1762 
1763   gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1764 
1765   for (i = 0; i < n; i++)
1766     {
1767       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1768       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1769       live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1770 
1771       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1772 	{
1773 	  fprintf (ira_dump_file,
1774 		   "      Moving ranges of a%dr%d to a%dr%d: ",
1775 		   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1776 		   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1777 	  ira_print_live_range_list (ira_dump_file, lr);
1778 	}
1779       change_object_in_range_list (lr, to_obj);
1780       OBJECT_LIVE_RANGES (to_obj)
1781 	= ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1782       OBJECT_LIVE_RANGES (from_obj) = NULL;
1783     }
1784 }
1785 
1786 static void
1787 copy_allocno_live_ranges (ira_allocno_t from, ira_allocno_t to)
1788 {
1789   int i;
1790   int n = ALLOCNO_NUM_OBJECTS (from);
1791 
1792   gcc_assert (n == ALLOCNO_NUM_OBJECTS (to));
1793 
1794   for (i = 0; i < n; i++)
1795     {
1796       ira_object_t from_obj = ALLOCNO_OBJECT (from, i);
1797       ira_object_t to_obj = ALLOCNO_OBJECT (to, i);
1798       live_range_t lr = OBJECT_LIVE_RANGES (from_obj);
1799 
1800       if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
1801 	{
1802 	  fprintf (ira_dump_file, "      Copying ranges of a%dr%d to a%dr%d: ",
1803 		   ALLOCNO_NUM (from), ALLOCNO_REGNO (from),
1804 		   ALLOCNO_NUM (to), ALLOCNO_REGNO (to));
1805 	  ira_print_live_range_list (ira_dump_file, lr);
1806 	}
1807       lr = ira_copy_live_range_list (lr);
1808       change_object_in_range_list (lr, to_obj);
1809       OBJECT_LIVE_RANGES (to_obj)
1810 	= ira_merge_live_ranges (lr, OBJECT_LIVE_RANGES (to_obj));
1811     }
1812 }
1813 
1814 /* Return TRUE if NODE represents a loop with low register
1815    pressure.  */
1816 static bool
1817 low_pressure_loop_node_p (ira_loop_tree_node_t node)
1818 {
1819   int i;
1820   enum reg_class pclass;
1821 
1822   if (node->bb != NULL)
1823     return false;
1824 
1825   for (i = 0; i < ira_pressure_classes_num; i++)
1826     {
1827       pclass = ira_pressure_classes[i];
1828       if (node->reg_pressure[pclass] > ira_available_class_regs[pclass]
1829 	  && ira_available_class_regs[pclass] > 1)
1830 	return false;
1831     }
1832   return true;
1833 }
1834 
1835 #ifdef STACK_REGS
1836 /* Return TRUE if LOOP has a complex enter or exit edge.  We don't
1837    form a region from such loop if the target use stack register
1838    because reg-stack.c can not deal with such edges.  */
1839 static bool
1840 loop_with_complex_edge_p (struct loop *loop)
1841 {
1842   int i;
1843   edge_iterator ei;
1844   edge e;
1845   VEC (edge, heap) *edges;
1846   bool res;
1847 
1848   FOR_EACH_EDGE (e, ei, loop->header->preds)
1849     if (e->flags & EDGE_EH)
1850       return true;
1851   edges = get_loop_exit_edges (loop);
1852   res = false;
1853   FOR_EACH_VEC_ELT (edge, edges, i, e)
1854     if (e->flags & EDGE_COMPLEX)
1855       {
1856 	res = true;
1857 	break;
1858       }
1859   VEC_free (edge, heap, edges);
1860   return res;
1861 }
1862 #endif
1863 
1864 /* Sort loops for marking them for removal.  We put already marked
1865    loops first, then less frequent loops next, and then outer loops
1866    next.  */
1867 static int
1868 loop_compare_func (const void *v1p, const void *v2p)
1869 {
1870   int diff;
1871   ira_loop_tree_node_t l1 = *(const ira_loop_tree_node_t *) v1p;
1872   ira_loop_tree_node_t l2 = *(const ira_loop_tree_node_t *) v2p;
1873 
1874   ira_assert (l1->parent != NULL && l2->parent != NULL);
1875   if (l1->to_remove_p && ! l2->to_remove_p)
1876     return -1;
1877   if (! l1->to_remove_p && l2->to_remove_p)
1878     return 1;
1879   if ((diff = l1->loop->header->frequency - l2->loop->header->frequency) != 0)
1880     return diff;
1881   if ((diff = (int) loop_depth (l1->loop) - (int) loop_depth (l2->loop)) != 0)
1882     return diff;
1883   /* Make sorting stable.  */
1884   return l1->loop_num - l2->loop_num;
1885 }
1886 
1887 /* Mark loops which should be removed from regional allocation.  We
1888    remove a loop with low register pressure inside another loop with
1889    register pressure.  In this case a separate allocation of the loop
1890    hardly helps (for irregular register file architecture it could
1891    help by choosing a better hard register in the loop but we prefer
1892    faster allocation even in this case).  We also remove cheap loops
1893    if there are more than IRA_MAX_LOOPS_NUM of them.  Loop with EH
1894    exit or enter edges are removed too because the allocation might
1895    require put pseudo moves on the EH edges (we could still do this
1896    for pseudos with caller saved hard registers in some cases but it
1897    is impossible to say here or during top-down allocation pass what
1898    hard register the pseudos get finally).  */
1899 static void
1900 mark_loops_for_removal (void)
1901 {
1902   int i, n;
1903   ira_loop_tree_node_t *sorted_loops;
1904   loop_p loop;
1905 
1906   ira_assert (current_loops != NULL);
1907   sorted_loops
1908     = (ira_loop_tree_node_t *) ira_allocate (sizeof (ira_loop_tree_node_t)
1909 					     * VEC_length (loop_p,
1910 							   ira_loops.larray));
1911   for (n = i = 0; VEC_iterate (loop_p, ira_loops.larray, i, loop); i++)
1912     if (ira_loop_nodes[i].regno_allocno_map != NULL)
1913       {
1914 	if (ira_loop_nodes[i].parent == NULL)
1915 	  {
1916 	    /* Don't remove the root.  */
1917 	    ira_loop_nodes[i].to_remove_p = false;
1918 	    continue;
1919 	  }
1920 	sorted_loops[n++] = &ira_loop_nodes[i];
1921 	ira_loop_nodes[i].to_remove_p
1922 	  = ((low_pressure_loop_node_p (ira_loop_nodes[i].parent)
1923 	      && low_pressure_loop_node_p (&ira_loop_nodes[i]))
1924 #ifdef STACK_REGS
1925 	     || loop_with_complex_edge_p (ira_loop_nodes[i].loop)
1926 #endif
1927 	     );
1928       }
1929   qsort (sorted_loops, n, sizeof (ira_loop_tree_node_t), loop_compare_func);
1930   for (i = 0; n - i + 1 > IRA_MAX_LOOPS_NUM; i++)
1931     {
1932       sorted_loops[i]->to_remove_p = true;
1933       if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1934 	fprintf
1935 	  (ira_dump_file,
1936 	   "  Mark loop %d (header %d, freq %d, depth %d) for removal (%s)\n",
1937 	   sorted_loops[i]->loop_num, sorted_loops[i]->loop->header->index,
1938 	   sorted_loops[i]->loop->header->frequency,
1939 	   loop_depth (sorted_loops[i]->loop),
1940 	   low_pressure_loop_node_p (sorted_loops[i]->parent)
1941 	   && low_pressure_loop_node_p (sorted_loops[i])
1942 	   ? "low pressure" : "cheap loop");
1943     }
1944   ira_free (sorted_loops);
1945 }
1946 
1947 /* Mark all loops but root for removing.  */
1948 static void
1949 mark_all_loops_for_removal (void)
1950 {
1951   int i;
1952   loop_p loop;
1953 
1954   ira_assert (current_loops != NULL);
1955   FOR_EACH_VEC_ELT (loop_p, ira_loops.larray, i, loop)
1956     if (ira_loop_nodes[i].regno_allocno_map != NULL)
1957       {
1958 	if (ira_loop_nodes[i].parent == NULL)
1959 	  {
1960 	    /* Don't remove the root.  */
1961 	    ira_loop_nodes[i].to_remove_p = false;
1962 	    continue;
1963 	  }
1964 	ira_loop_nodes[i].to_remove_p = true;
1965 	if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1966 	  fprintf
1967 	    (ira_dump_file,
1968 	     "  Mark loop %d (header %d, freq %d, depth %d) for removal\n",
1969 	     ira_loop_nodes[i].loop_num,
1970 	     ira_loop_nodes[i].loop->header->index,
1971 	     ira_loop_nodes[i].loop->header->frequency,
1972 	     loop_depth (ira_loop_nodes[i].loop));
1973       }
1974 }
1975 
1976 /* Definition of vector of loop tree nodes.  */
1977 DEF_VEC_P(ira_loop_tree_node_t);
1978 DEF_VEC_ALLOC_P(ira_loop_tree_node_t, heap);
1979 
1980 /* Vec containing references to all removed loop tree nodes.  */
1981 static VEC(ira_loop_tree_node_t,heap) *removed_loop_vec;
1982 
1983 /* Vec containing references to all children of loop tree nodes.  */
1984 static VEC(ira_loop_tree_node_t,heap) *children_vec;
1985 
1986 /* Remove subregions of NODE if their separate allocation will not
1987    improve the result.  */
1988 static void
1989 remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_node_t node)
1990 {
1991   unsigned int start;
1992   bool remove_p;
1993   ira_loop_tree_node_t subnode;
1994 
1995   remove_p = node->to_remove_p;
1996   if (! remove_p)
1997     VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, node);
1998   start = VEC_length (ira_loop_tree_node_t, children_vec);
1999   for (subnode = node->children; subnode != NULL; subnode = subnode->next)
2000     if (subnode->bb == NULL)
2001       remove_uneccesary_loop_nodes_from_loop_tree (subnode);
2002     else
2003       VEC_safe_push (ira_loop_tree_node_t, heap, children_vec, subnode);
2004   node->children = node->subloops = NULL;
2005   if (remove_p)
2006     {
2007       VEC_safe_push (ira_loop_tree_node_t, heap, removed_loop_vec, node);
2008       return;
2009     }
2010   while (VEC_length (ira_loop_tree_node_t, children_vec) > start)
2011     {
2012       subnode = VEC_pop (ira_loop_tree_node_t, children_vec);
2013       subnode->parent = node;
2014       subnode->next = node->children;
2015       node->children = subnode;
2016       if (subnode->bb == NULL)
2017 	{
2018 	  subnode->subloop_next = node->subloops;
2019 	  node->subloops = subnode;
2020 	}
2021     }
2022 }
2023 
2024 /* Return TRUE if NODE is inside PARENT.  */
2025 static bool
2026 loop_is_inside_p (ira_loop_tree_node_t node, ira_loop_tree_node_t parent)
2027 {
2028   for (node = node->parent; node != NULL; node = node->parent)
2029     if (node == parent)
2030       return true;
2031   return false;
2032 }
2033 
2034 /* Sort allocnos according to their order in regno allocno list.  */
2035 static int
2036 regno_allocno_order_compare_func (const void *v1p, const void *v2p)
2037 {
2038   ira_allocno_t a1 = *(const ira_allocno_t *) v1p;
2039   ira_allocno_t a2 = *(const ira_allocno_t *) v2p;
2040   ira_loop_tree_node_t n1 = ALLOCNO_LOOP_TREE_NODE (a1);
2041   ira_loop_tree_node_t n2 = ALLOCNO_LOOP_TREE_NODE (a2);
2042 
2043   if (loop_is_inside_p (n1, n2))
2044     return -1;
2045   else if (loop_is_inside_p (n2, n1))
2046     return 1;
2047   /* If allocnos are equally good, sort by allocno numbers, so that
2048      the results of qsort leave nothing to chance.  We put allocnos
2049      with higher number first in the list because it is the original
2050      order for allocnos from loops on the same levels.  */
2051   return ALLOCNO_NUM (a2) - ALLOCNO_NUM (a1);
2052 }
2053 
2054 /* This array is used to sort allocnos to restore allocno order in
2055    the regno allocno list.  */
2056 static ira_allocno_t *regno_allocnos;
2057 
2058 /* Restore allocno order for REGNO in the regno allocno list.  */
2059 static void
2060 ira_rebuild_regno_allocno_list (int regno)
2061 {
2062   int i, n;
2063   ira_allocno_t a;
2064 
2065   for (n = 0, a = ira_regno_allocno_map[regno];
2066        a != NULL;
2067        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2068     regno_allocnos[n++] = a;
2069   ira_assert (n > 0);
2070   qsort (regno_allocnos, n, sizeof (ira_allocno_t),
2071 	 regno_allocno_order_compare_func);
2072   for (i = 1; i < n; i++)
2073     ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[i - 1]) = regno_allocnos[i];
2074   ALLOCNO_NEXT_REGNO_ALLOCNO (regno_allocnos[n - 1]) = NULL;
2075   ira_regno_allocno_map[regno] = regno_allocnos[0];
2076   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
2077     fprintf (ira_dump_file, " Rebuilding regno allocno list for %d\n", regno);
2078 }
2079 
2080 /* Propagate info from allocno FROM_A to allocno A.  */
2081 static void
2082 propagate_some_info_from_allocno (ira_allocno_t a, ira_allocno_t from_a)
2083 {
2084   enum reg_class aclass;
2085 
2086   merge_hard_reg_conflicts (from_a, a, false);
2087   ALLOCNO_NREFS (a) += ALLOCNO_NREFS (from_a);
2088   ALLOCNO_FREQ (a) += ALLOCNO_FREQ (from_a);
2089   ALLOCNO_CALL_FREQ (a) += ALLOCNO_CALL_FREQ (from_a);
2090   ALLOCNO_CALLS_CROSSED_NUM (a) += ALLOCNO_CALLS_CROSSED_NUM (from_a);
2091   ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)
2092     += ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (from_a);
2093   if (! ALLOCNO_BAD_SPILL_P (from_a))
2094     ALLOCNO_BAD_SPILL_P (a) = false;
2095   aclass = ALLOCNO_CLASS (from_a);
2096   ira_assert (aclass == ALLOCNO_CLASS (a));
2097   ira_allocate_and_accumulate_costs (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2098 				     ALLOCNO_HARD_REG_COSTS (from_a));
2099   ira_allocate_and_accumulate_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2100 				     aclass,
2101 				     ALLOCNO_CONFLICT_HARD_REG_COSTS (from_a));
2102   ALLOCNO_CLASS_COST (a) += ALLOCNO_CLASS_COST (from_a);
2103   ALLOCNO_MEMORY_COST (a) += ALLOCNO_MEMORY_COST (from_a);
2104 }
2105 
2106 /* Remove allocnos from loops removed from the allocation
2107    consideration.  */
2108 static void
2109 remove_unnecessary_allocnos (void)
2110 {
2111   int regno;
2112   bool merged_p, rebuild_p;
2113   ira_allocno_t a, prev_a, next_a, parent_a;
2114   ira_loop_tree_node_t a_node, parent;
2115 
2116   merged_p = false;
2117   regno_allocnos = NULL;
2118   for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
2119     {
2120       rebuild_p = false;
2121       for (prev_a = NULL, a = ira_regno_allocno_map[regno];
2122 	   a != NULL;
2123 	   a = next_a)
2124 	{
2125 	  next_a = ALLOCNO_NEXT_REGNO_ALLOCNO (a);
2126 	  a_node = ALLOCNO_LOOP_TREE_NODE (a);
2127 	  if (! a_node->to_remove_p)
2128 	    prev_a = a;
2129 	  else
2130 	    {
2131 	      for (parent = a_node->parent;
2132 		   (parent_a = parent->regno_allocno_map[regno]) == NULL
2133 		     && parent->to_remove_p;
2134 		   parent = parent->parent)
2135 		;
2136 	      if (parent_a == NULL)
2137 		{
2138 		  /* There are no allocnos with the same regno in
2139 		     upper region -- just move the allocno to the
2140 		     upper region.  */
2141 		  prev_a = a;
2142 		  ALLOCNO_LOOP_TREE_NODE (a) = parent;
2143 		  parent->regno_allocno_map[regno] = a;
2144 		  bitmap_set_bit (parent->all_allocnos, ALLOCNO_NUM (a));
2145 		  rebuild_p = true;
2146 		}
2147 	      else
2148 		{
2149 		  /* Remove the allocno and update info of allocno in
2150 		     the upper region.  */
2151 		  if (prev_a == NULL)
2152 		    ira_regno_allocno_map[regno] = next_a;
2153 		  else
2154 		    ALLOCNO_NEXT_REGNO_ALLOCNO (prev_a) = next_a;
2155 		  move_allocno_live_ranges (a, parent_a);
2156 		  merged_p = true;
2157 		  propagate_some_info_from_allocno (parent_a, a);
2158 		  /* Remove it from the corresponding regno allocno
2159 		     map to avoid info propagation of subsequent
2160 		     allocno into this already removed allocno.  */
2161 		  a_node->regno_allocno_map[regno] = NULL;
2162 		  finish_allocno (a);
2163 		}
2164 	    }
2165 	}
2166       if (rebuild_p)
2167 	/* We need to restore the order in regno allocno list.  */
2168 	{
2169 	  if (regno_allocnos == NULL)
2170 	    regno_allocnos
2171 	      = (ira_allocno_t *) ira_allocate (sizeof (ira_allocno_t)
2172 						* ira_allocnos_num);
2173 	  ira_rebuild_regno_allocno_list (regno);
2174 	}
2175     }
2176   if (merged_p)
2177     ira_rebuild_start_finish_chains ();
2178   if (regno_allocnos != NULL)
2179     ira_free (regno_allocnos);
2180 }
2181 
2182 /* Remove allocnos from all loops but the root.  */
2183 static void
2184 remove_low_level_allocnos (void)
2185 {
2186   int regno;
2187   bool merged_p, propagate_p;
2188   ira_allocno_t a, top_a;
2189   ira_loop_tree_node_t a_node, parent;
2190   ira_allocno_iterator ai;
2191 
2192   merged_p = false;
2193   FOR_EACH_ALLOCNO (a, ai)
2194     {
2195       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2196       if (a_node == ira_loop_tree_root || ALLOCNO_CAP_MEMBER (a) != NULL)
2197 	continue;
2198       regno = ALLOCNO_REGNO (a);
2199       if ((top_a = ira_loop_tree_root->regno_allocno_map[regno]) == NULL)
2200 	{
2201 	  ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2202 	  ira_loop_tree_root->regno_allocno_map[regno] = a;
2203 	  continue;
2204 	}
2205       propagate_p = a_node->parent->regno_allocno_map[regno] == NULL;
2206       /* Remove the allocno and update info of allocno in the upper
2207 	 region.  */
2208       move_allocno_live_ranges (a, top_a);
2209       merged_p = true;
2210       if (propagate_p)
2211 	propagate_some_info_from_allocno (top_a, a);
2212     }
2213   FOR_EACH_ALLOCNO (a, ai)
2214     {
2215       a_node = ALLOCNO_LOOP_TREE_NODE (a);
2216       if (a_node == ira_loop_tree_root)
2217 	continue;
2218       parent = a_node->parent;
2219       regno = ALLOCNO_REGNO (a);
2220       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2221 	ira_assert (ALLOCNO_CAP (a) != NULL);
2222       else if (ALLOCNO_CAP (a) == NULL)
2223  	ira_assert (parent->regno_allocno_map[regno] != NULL);
2224     }
2225   FOR_EACH_ALLOCNO (a, ai)
2226     {
2227       regno = ALLOCNO_REGNO (a);
2228       if (ira_loop_tree_root->regno_allocno_map[regno] == a)
2229 	{
2230 	  ira_object_t obj;
2231 	  ira_allocno_object_iterator oi;
2232 
2233 	  ira_regno_allocno_map[regno] = a;
2234 	  ALLOCNO_NEXT_REGNO_ALLOCNO (a) = NULL;
2235 	  ALLOCNO_CAP_MEMBER (a) = NULL;
2236 	  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2237 	    COPY_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
2238 			       OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
2239 #ifdef STACK_REGS
2240 	  if (ALLOCNO_TOTAL_NO_STACK_REG_P (a))
2241 	    ALLOCNO_NO_STACK_REG_P (a) = true;
2242 #endif
2243 	}
2244       else
2245 	finish_allocno (a);
2246     }
2247   if (merged_p)
2248     ira_rebuild_start_finish_chains ();
2249 }
2250 
2251 /* Remove loops from consideration.  We remove all loops except for
2252    root if ALL_P or loops for which a separate allocation will not
2253    improve the result.  We have to do this after allocno creation and
2254    their costs and allocno class evaluation because only after that
2255    the register pressure can be known and is calculated.  */
2256 static void
2257 remove_unnecessary_regions (bool all_p)
2258 {
2259   if (current_loops == NULL)
2260     return;
2261   if (all_p)
2262     mark_all_loops_for_removal ();
2263   else
2264     mark_loops_for_removal ();
2265   children_vec
2266     = VEC_alloc (ira_loop_tree_node_t, heap,
2267 		 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2268   removed_loop_vec
2269     = VEC_alloc (ira_loop_tree_node_t, heap,
2270 		 last_basic_block + VEC_length (loop_p, ira_loops.larray));
2271   remove_uneccesary_loop_nodes_from_loop_tree (ira_loop_tree_root) ;
2272   VEC_free (ira_loop_tree_node_t, heap, children_vec);
2273   if (all_p)
2274     remove_low_level_allocnos ();
2275   else
2276     remove_unnecessary_allocnos ();
2277   while (VEC_length (ira_loop_tree_node_t, removed_loop_vec) > 0)
2278     finish_loop_tree_node (VEC_pop (ira_loop_tree_node_t, removed_loop_vec));
2279   VEC_free (ira_loop_tree_node_t, heap, removed_loop_vec);
2280 }
2281 
2282 
2283 
2284 /* At this point true value of allocno attribute bad_spill_p means
2285    that there is an insn where allocno occurs and where the allocno
2286    can not be used as memory.  The function updates the attribute, now
2287    it can be true only for allocnos which can not be used as memory in
2288    an insn and in whose live ranges there is other allocno deaths.
2289    Spilling allocnos with true value will not improve the code because
2290    it will not make other allocnos colorable and additional reloads
2291    for the corresponding pseudo will be generated in reload pass for
2292    each insn it occurs.
2293 
2294    This is a trick mentioned in one classic article of Chaitin etc
2295    which is frequently omitted in other implementations of RA based on
2296    graph coloring.  */
2297 static void
2298 update_bad_spill_attribute (void)
2299 {
2300   int i;
2301   ira_allocno_t a;
2302   ira_allocno_iterator ai;
2303   ira_allocno_object_iterator aoi;
2304   ira_object_t obj;
2305   live_range_t r;
2306   enum reg_class aclass;
2307   bitmap_head dead_points[N_REG_CLASSES];
2308 
2309   for (i = 0; i < ira_allocno_classes_num; i++)
2310     {
2311       aclass = ira_allocno_classes[i];
2312       bitmap_initialize (&dead_points[aclass], &reg_obstack);
2313     }
2314   FOR_EACH_ALLOCNO (a, ai)
2315     {
2316       aclass = ALLOCNO_CLASS (a);
2317       if (aclass == NO_REGS)
2318 	continue;
2319       FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2320 	for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2321 	  bitmap_set_bit (&dead_points[aclass], r->finish);
2322     }
2323   FOR_EACH_ALLOCNO (a, ai)
2324     {
2325       aclass = ALLOCNO_CLASS (a);
2326       if (aclass == NO_REGS)
2327 	continue;
2328       if (! ALLOCNO_BAD_SPILL_P (a))
2329 	continue;
2330       FOR_EACH_ALLOCNO_OBJECT (a, obj, aoi)
2331 	{
2332 	  for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2333 	    {
2334 	      for (i = r->start + 1; i < r->finish; i++)
2335 		if (bitmap_bit_p (&dead_points[aclass], i))
2336 		  break;
2337 	      if (i < r->finish)
2338 		break;
2339 	    }
2340 	  if (r != NULL)
2341 	    {
2342 	      ALLOCNO_BAD_SPILL_P (a) = false;
2343 	      break;
2344 	    }
2345 	}
2346     }
2347   for (i = 0; i < ira_allocno_classes_num; i++)
2348     {
2349       aclass = ira_allocno_classes[i];
2350       bitmap_clear (&dead_points[aclass]);
2351     }
2352 }
2353 
2354 
2355 
2356 /* Set up minimal and maximal live range points for allocnos.  */
2357 static void
2358 setup_min_max_allocno_live_range_point (void)
2359 {
2360   int i;
2361   ira_allocno_t a, parent_a, cap;
2362   ira_allocno_iterator ai;
2363 #ifdef ENABLE_IRA_CHECKING
2364   ira_object_iterator oi;
2365   ira_object_t obj;
2366 #endif
2367   live_range_t r;
2368   ira_loop_tree_node_t parent;
2369 
2370   FOR_EACH_ALLOCNO (a, ai)
2371     {
2372       int n = ALLOCNO_NUM_OBJECTS (a);
2373 
2374       for (i = 0; i < n; i++)
2375 	{
2376 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
2377 	  r = OBJECT_LIVE_RANGES (obj);
2378 	  if (r == NULL)
2379 	    continue;
2380 	  OBJECT_MAX (obj) = r->finish;
2381 	  for (; r->next != NULL; r = r->next)
2382 	    ;
2383 	  OBJECT_MIN (obj) = r->start;
2384 	}
2385     }
2386   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2387     for (a = ira_regno_allocno_map[i];
2388 	 a != NULL;
2389 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2390       {
2391 	int j;
2392 	int n = ALLOCNO_NUM_OBJECTS (a);
2393 
2394 	for (j = 0; j < n; j++)
2395 	  {
2396 	    ira_object_t obj = ALLOCNO_OBJECT (a, j);
2397 	    ira_object_t parent_obj;
2398 
2399 	    if (OBJECT_MAX (obj) < 0)
2400 	      continue;
2401 	    ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2402 	    /* Accumulation of range info.  */
2403 	    if (ALLOCNO_CAP (a) != NULL)
2404 	      {
2405 		for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
2406 		  {
2407 		    ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
2408 		    if (OBJECT_MAX (cap_obj) < OBJECT_MAX (obj))
2409 		      OBJECT_MAX (cap_obj) = OBJECT_MAX (obj);
2410 		    if (OBJECT_MIN (cap_obj) > OBJECT_MIN (obj))
2411 		      OBJECT_MIN (cap_obj) = OBJECT_MIN (obj);
2412 		  }
2413 		continue;
2414 	      }
2415 	    if ((parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) == NULL)
2416 	      continue;
2417 	    parent_a = parent->regno_allocno_map[i];
2418 	    parent_obj = ALLOCNO_OBJECT (parent_a, j);
2419 	    if (OBJECT_MAX (parent_obj) < OBJECT_MAX (obj))
2420 	      OBJECT_MAX (parent_obj) = OBJECT_MAX (obj);
2421 	    if (OBJECT_MIN (parent_obj) > OBJECT_MIN (obj))
2422 	      OBJECT_MIN (parent_obj) = OBJECT_MIN (obj);
2423 	  }
2424       }
2425 #ifdef ENABLE_IRA_CHECKING
2426   FOR_EACH_OBJECT (obj, oi)
2427     {
2428       if ((0 <= OBJECT_MIN (obj) && OBJECT_MIN (obj) <= ira_max_point)
2429 	  && (0 <= OBJECT_MAX (obj) && OBJECT_MAX (obj) <= ira_max_point))
2430 	continue;
2431       gcc_unreachable ();
2432     }
2433 #endif
2434 }
2435 
2436 /* Sort allocnos according to their live ranges.  Allocnos with
2437    smaller allocno class are put first unless we use priority
2438    coloring.  Allocnos with the same class are ordered according
2439    their start (min).  Allocnos with the same start are ordered
2440    according their finish (max).  */
2441 static int
2442 object_range_compare_func (const void *v1p, const void *v2p)
2443 {
2444   int diff;
2445   ira_object_t obj1 = *(const ira_object_t *) v1p;
2446   ira_object_t obj2 = *(const ira_object_t *) v2p;
2447   ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
2448   ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
2449 
2450   if ((diff = OBJECT_MIN (obj1) - OBJECT_MIN (obj2)) != 0)
2451     return diff;
2452   if ((diff = OBJECT_MAX (obj1) - OBJECT_MAX (obj2)) != 0)
2453      return diff;
2454   return ALLOCNO_NUM (a1) - ALLOCNO_NUM (a2);
2455 }
2456 
2457 /* Sort ira_object_id_map and set up conflict id of allocnos.  */
2458 static void
2459 sort_conflict_id_map (void)
2460 {
2461   int i, num;
2462   ira_allocno_t a;
2463   ira_allocno_iterator ai;
2464 
2465   num = 0;
2466   FOR_EACH_ALLOCNO (a, ai)
2467     {
2468       ira_allocno_object_iterator oi;
2469       ira_object_t obj;
2470 
2471       FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2472 	ira_object_id_map[num++] = obj;
2473     }
2474   qsort (ira_object_id_map, num, sizeof (ira_object_t),
2475 	 object_range_compare_func);
2476   for (i = 0; i < num; i++)
2477     {
2478       ira_object_t obj = ira_object_id_map[i];
2479 
2480       gcc_assert (obj != NULL);
2481       OBJECT_CONFLICT_ID (obj) = i;
2482     }
2483   for (i = num; i < ira_objects_num; i++)
2484     ira_object_id_map[i] = NULL;
2485 }
2486 
2487 /* Set up minimal and maximal conflict ids of allocnos with which
2488    given allocno can conflict.  */
2489 static void
2490 setup_min_max_conflict_allocno_ids (void)
2491 {
2492   int aclass;
2493   int i, j, min, max, start, finish, first_not_finished, filled_area_start;
2494   int *live_range_min, *last_lived;
2495   int word0_min, word0_max;
2496   ira_allocno_t a;
2497   ira_allocno_iterator ai;
2498 
2499   live_range_min = (int *) ira_allocate (sizeof (int) * ira_objects_num);
2500   aclass = -1;
2501   first_not_finished = -1;
2502   for (i = 0; i < ira_objects_num; i++)
2503     {
2504       ira_object_t obj = ira_object_id_map[i];
2505 
2506       if (obj == NULL)
2507 	continue;
2508 
2509       a = OBJECT_ALLOCNO (obj);
2510 
2511       if (aclass < 0)
2512 	{
2513 	  aclass = ALLOCNO_CLASS (a);
2514 	  min = i;
2515 	  first_not_finished = i;
2516 	}
2517       else
2518 	{
2519 	  start = OBJECT_MIN (obj);
2520 	  /* If we skip an allocno, the allocno with smaller ids will
2521 	     be also skipped because of the secondary sorting the
2522 	     range finishes (see function
2523 	     object_range_compare_func).  */
2524 	  while (first_not_finished < i
2525 		 && start > OBJECT_MAX (ira_object_id_map
2526 					[first_not_finished]))
2527 	    first_not_finished++;
2528 	  min = first_not_finished;
2529 	}
2530       if (min == i)
2531 	/* We could increase min further in this case but it is good
2532 	   enough.  */
2533 	min++;
2534       live_range_min[i] = OBJECT_MIN (obj);
2535       OBJECT_MIN (obj) = min;
2536     }
2537   last_lived = (int *) ira_allocate (sizeof (int) * ira_max_point);
2538   aclass = -1;
2539   filled_area_start = -1;
2540   for (i = ira_objects_num - 1; i >= 0; i--)
2541     {
2542       ira_object_t obj = ira_object_id_map[i];
2543 
2544       if (obj == NULL)
2545 	continue;
2546 
2547       a = OBJECT_ALLOCNO (obj);
2548       if (aclass < 0)
2549 	{
2550 	  aclass = ALLOCNO_CLASS (a);
2551 	  for (j = 0; j < ira_max_point; j++)
2552 	    last_lived[j] = -1;
2553 	  filled_area_start = ira_max_point;
2554 	}
2555       min = live_range_min[i];
2556       finish = OBJECT_MAX (obj);
2557       max = last_lived[finish];
2558       if (max < 0)
2559 	/* We could decrease max further in this case but it is good
2560 	   enough.  */
2561 	max = OBJECT_CONFLICT_ID (obj) - 1;
2562       OBJECT_MAX (obj) = max;
2563       /* In filling, we can go further A range finish to recognize
2564 	 intersection quickly because if the finish of subsequently
2565 	 processed allocno (it has smaller conflict id) range is
2566 	 further A range finish than they are definitely intersected
2567 	 (the reason for this is the allocnos with bigger conflict id
2568 	 have their range starts not smaller than allocnos with
2569 	 smaller ids.  */
2570       for (j = min; j < filled_area_start; j++)
2571 	last_lived[j] = i;
2572       filled_area_start = min;
2573     }
2574   ira_free (last_lived);
2575   ira_free (live_range_min);
2576 
2577   /* For allocnos with more than one object, we may later record extra conflicts in
2578      subobject 0 that we cannot really know about here.
2579      For now, simply widen the min/max range of these subobjects.  */
2580 
2581   word0_min = INT_MAX;
2582   word0_max = INT_MIN;
2583 
2584   FOR_EACH_ALLOCNO (a, ai)
2585     {
2586       int n = ALLOCNO_NUM_OBJECTS (a);
2587       ira_object_t obj0;
2588 
2589       if (n < 2)
2590 	continue;
2591       obj0 = ALLOCNO_OBJECT (a, 0);
2592       if (OBJECT_CONFLICT_ID (obj0) < word0_min)
2593 	word0_min = OBJECT_CONFLICT_ID (obj0);
2594       if (OBJECT_CONFLICT_ID (obj0) > word0_max)
2595 	word0_max = OBJECT_CONFLICT_ID (obj0);
2596     }
2597   FOR_EACH_ALLOCNO (a, ai)
2598     {
2599       int n = ALLOCNO_NUM_OBJECTS (a);
2600       ira_object_t obj0;
2601 
2602       if (n < 2)
2603 	continue;
2604       obj0 = ALLOCNO_OBJECT (a, 0);
2605       if (OBJECT_MIN (obj0) > word0_min)
2606 	OBJECT_MIN (obj0) = word0_min;
2607       if (OBJECT_MAX (obj0) < word0_max)
2608 	OBJECT_MAX (obj0) = word0_max;
2609     }
2610 }
2611 
2612 
2613 
2614 static void
2615 create_caps (void)
2616 {
2617   ira_allocno_t a;
2618   ira_allocno_iterator ai;
2619   ira_loop_tree_node_t loop_tree_node;
2620 
2621   FOR_EACH_ALLOCNO (a, ai)
2622     {
2623       if (ALLOCNO_LOOP_TREE_NODE (a) == ira_loop_tree_root)
2624 	continue;
2625       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2626 	create_cap_allocno (a);
2627       else if (ALLOCNO_CAP (a) == NULL)
2628 	{
2629 	  loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2630 	  if (!bitmap_bit_p (loop_tree_node->border_allocnos, ALLOCNO_NUM (a)))
2631 	    create_cap_allocno (a);
2632 	}
2633     }
2634 }
2635 
2636 
2637 
2638 /* The page contains code transforming more one region internal
2639    representation (IR) to one region IR which is necessary for reload.
2640    This transformation is called IR flattening.  We might just rebuild
2641    the IR for one region but we don't do it because it takes a lot of
2642    time.  */
2643 
2644 /* Map: regno -> allocnos which will finally represent the regno for
2645    IR with one region.  */
2646 static ira_allocno_t *regno_top_level_allocno_map;
2647 
2648 /* Find the allocno that corresponds to A at a level one higher up in the
2649    loop tree.  Returns NULL if A is a cap, or if it has no parent.  */
2650 ira_allocno_t
2651 ira_parent_allocno (ira_allocno_t a)
2652 {
2653   ira_loop_tree_node_t parent;
2654 
2655   if (ALLOCNO_CAP (a) != NULL)
2656     return NULL;
2657 
2658   parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2659   if (parent == NULL)
2660     return NULL;
2661 
2662   return parent->regno_allocno_map[ALLOCNO_REGNO (a)];
2663 }
2664 
2665 /* Find the allocno that corresponds to A at a level one higher up in the
2666    loop tree.  If ALLOCNO_CAP is set for A, return that.  */
2667 ira_allocno_t
2668 ira_parent_or_cap_allocno (ira_allocno_t a)
2669 {
2670   if (ALLOCNO_CAP (a) != NULL)
2671     return ALLOCNO_CAP (a);
2672 
2673   return ira_parent_allocno (a);
2674 }
2675 
2676 /* Process all allocnos originated from pseudo REGNO and copy live
2677    ranges, hard reg conflicts, and allocno stack reg attributes from
2678    low level allocnos to final allocnos which are destinations of
2679    removed stores at a loop exit.  Return true if we copied live
2680    ranges.  */
2681 static bool
2682 copy_info_to_removed_store_destinations (int regno)
2683 {
2684   ira_allocno_t a;
2685   ira_allocno_t parent_a = NULL;
2686   ira_loop_tree_node_t parent;
2687   bool merged_p;
2688 
2689   merged_p = false;
2690   for (a = ira_regno_allocno_map[regno];
2691        a != NULL;
2692        a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2693     {
2694       if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))])
2695 	/* This allocno will be removed.  */
2696 	continue;
2697 
2698       /* Caps will be removed.  */
2699       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2700       for (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent;
2701 	   parent != NULL;
2702 	   parent = parent->parent)
2703 	if ((parent_a = parent->regno_allocno_map[regno]) == NULL
2704 	    || (parent_a
2705 		== regno_top_level_allocno_map[REGNO
2706 					       (allocno_emit_reg (parent_a))]
2707 		&& ALLOCNO_EMIT_DATA (parent_a)->mem_optimized_dest_p))
2708 	  break;
2709       if (parent == NULL || parent_a == NULL)
2710 	continue;
2711 
2712       copy_allocno_live_ranges (a, parent_a);
2713       merge_hard_reg_conflicts (a, parent_a, true);
2714 
2715       ALLOCNO_CALL_FREQ (parent_a) += ALLOCNO_CALL_FREQ (a);
2716       ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2717 	+= ALLOCNO_CALLS_CROSSED_NUM (a);
2718       ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2719 	+= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2720       merged_p = true;
2721     }
2722   return merged_p;
2723 }
2724 
2725 /* Flatten the IR.  In other words, this function transforms IR as if
2726    it were built with one region (without loops).  We could make it
2727    much simpler by rebuilding IR with one region, but unfortunately it
2728    takes a lot of time.  MAX_REGNO_BEFORE_EMIT and
2729    IRA_MAX_POINT_BEFORE_EMIT are correspondingly MAX_REG_NUM () and
2730    IRA_MAX_POINT before emitting insns on the loop borders.  */
2731 void
2732 ira_flattening (int max_regno_before_emit, int ira_max_point_before_emit)
2733 {
2734   int i, j;
2735   bool keep_p;
2736   int hard_regs_num;
2737   bool new_pseudos_p, merged_p, mem_dest_p;
2738   unsigned int n;
2739   enum reg_class aclass;
2740   ira_allocno_t a, parent_a, first, second, node_first, node_second;
2741   ira_copy_t cp;
2742   ira_loop_tree_node_t node;
2743   live_range_t r;
2744   ira_allocno_iterator ai;
2745   ira_copy_iterator ci;
2746 
2747   regno_top_level_allocno_map
2748     = (ira_allocno_t *) ira_allocate (max_reg_num ()
2749 				      * sizeof (ira_allocno_t));
2750   memset (regno_top_level_allocno_map, 0,
2751 	  max_reg_num () * sizeof (ira_allocno_t));
2752   new_pseudos_p = merged_p = false;
2753   FOR_EACH_ALLOCNO (a, ai)
2754     {
2755       ira_allocno_object_iterator oi;
2756       ira_object_t obj;
2757 
2758       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2759 	/* Caps are not in the regno allocno maps and they are never
2760 	   will be transformed into allocnos existing after IR
2761 	   flattening.  */
2762 	continue;
2763       FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2764 	COPY_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
2765 			   OBJECT_CONFLICT_HARD_REGS (obj));
2766 #ifdef STACK_REGS
2767       ALLOCNO_TOTAL_NO_STACK_REG_P (a) = ALLOCNO_NO_STACK_REG_P (a);
2768 #endif
2769     }
2770   /* Fix final allocno attributes.  */
2771   for (i = max_regno_before_emit - 1; i >= FIRST_PSEUDO_REGISTER; i--)
2772     {
2773       mem_dest_p = false;
2774       for (a = ira_regno_allocno_map[i];
2775 	   a != NULL;
2776 	   a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
2777 	{
2778 	  ira_emit_data_t parent_data, data = ALLOCNO_EMIT_DATA (a);
2779 
2780 	  ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2781 	  if (data->somewhere_renamed_p)
2782 	    new_pseudos_p = true;
2783 	  parent_a = ira_parent_allocno (a);
2784 	  if (parent_a == NULL)
2785 	    {
2786 	      ALLOCNO_COPIES (a) = NULL;
2787 	      regno_top_level_allocno_map[REGNO (data->reg)] = a;
2788 	      continue;
2789 	    }
2790 	  ira_assert (ALLOCNO_CAP_MEMBER (parent_a) == NULL);
2791 
2792 	  if (data->mem_optimized_dest != NULL)
2793 	    mem_dest_p = true;
2794 	  parent_data = ALLOCNO_EMIT_DATA (parent_a);
2795 	  if (REGNO (data->reg) == REGNO (parent_data->reg))
2796 	    {
2797 	      merge_hard_reg_conflicts (a, parent_a, true);
2798 	      move_allocno_live_ranges (a, parent_a);
2799 	      merged_p = true;
2800 	      parent_data->mem_optimized_dest_p
2801 		= (parent_data->mem_optimized_dest_p
2802 		   || data->mem_optimized_dest_p);
2803 	      continue;
2804 	    }
2805 	  new_pseudos_p = true;
2806 	  for (;;)
2807 	    {
2808 	      ALLOCNO_NREFS (parent_a) -= ALLOCNO_NREFS (a);
2809 	      ALLOCNO_FREQ (parent_a) -= ALLOCNO_FREQ (a);
2810 	      ALLOCNO_CALL_FREQ (parent_a) -= ALLOCNO_CALL_FREQ (a);
2811 	      ALLOCNO_CALLS_CROSSED_NUM (parent_a)
2812 		-= ALLOCNO_CALLS_CROSSED_NUM (a);
2813 	      ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (parent_a)
2814 		-= ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a);
2815 	      ira_assert (ALLOCNO_CALLS_CROSSED_NUM (parent_a) >= 0
2816 			  && ALLOCNO_NREFS (parent_a) >= 0
2817 			  && ALLOCNO_FREQ (parent_a) >= 0);
2818 	      aclass = ALLOCNO_CLASS (parent_a);
2819 	      hard_regs_num = ira_class_hard_regs_num[aclass];
2820 	      if (ALLOCNO_HARD_REG_COSTS (a) != NULL
2821 		  && ALLOCNO_HARD_REG_COSTS (parent_a) != NULL)
2822 		for (j = 0; j < hard_regs_num; j++)
2823 		  ALLOCNO_HARD_REG_COSTS (parent_a)[j]
2824 		    -= ALLOCNO_HARD_REG_COSTS (a)[j];
2825 	      if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) != NULL
2826 		  && ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a) != NULL)
2827 		for (j = 0; j < hard_regs_num; j++)
2828 		  ALLOCNO_CONFLICT_HARD_REG_COSTS (parent_a)[j]
2829 		    -= ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[j];
2830 	      ALLOCNO_CLASS_COST (parent_a)
2831 		-= ALLOCNO_CLASS_COST (a);
2832 	      ALLOCNO_MEMORY_COST (parent_a) -= ALLOCNO_MEMORY_COST (a);
2833 	      parent_a = ira_parent_allocno (parent_a);
2834 	      if (parent_a == NULL)
2835 		break;
2836 	    }
2837 	  ALLOCNO_COPIES (a) = NULL;
2838 	  regno_top_level_allocno_map[REGNO (data->reg)] = a;
2839 	}
2840       if (mem_dest_p && copy_info_to_removed_store_destinations (i))
2841 	merged_p = true;
2842     }
2843   ira_assert (new_pseudos_p || ira_max_point_before_emit == ira_max_point);
2844   if (merged_p || ira_max_point_before_emit != ira_max_point)
2845     ira_rebuild_start_finish_chains ();
2846   if (new_pseudos_p)
2847     {
2848       sparseset objects_live;
2849 
2850       /* Rebuild conflicts.  */
2851       FOR_EACH_ALLOCNO (a, ai)
2852 	{
2853 	  ira_allocno_object_iterator oi;
2854 	  ira_object_t obj;
2855 
2856 	  if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2857 	      || ALLOCNO_CAP_MEMBER (a) != NULL)
2858 	    continue;
2859 	  FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2860 	    {
2861 	      for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
2862 		ira_assert (r->object == obj);
2863 	      clear_conflicts (obj);
2864 	    }
2865 	}
2866       objects_live = sparseset_alloc (ira_objects_num);
2867       for (i = 0; i < ira_max_point; i++)
2868 	{
2869 	  for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
2870 	    {
2871 	      ira_object_t obj = r->object;
2872 
2873 	      a = OBJECT_ALLOCNO (obj);
2874 	      if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2875 		  || ALLOCNO_CAP_MEMBER (a) != NULL)
2876 		continue;
2877 
2878 	      aclass = ALLOCNO_CLASS (a);
2879 	      sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
2880 	      EXECUTE_IF_SET_IN_SPARSESET (objects_live, n)
2881 		{
2882 		  ira_object_t live_obj = ira_object_id_map[n];
2883 		  ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
2884 		  enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
2885 
2886 		  if (ira_reg_classes_intersect_p[aclass][live_aclass]
2887 		      /* Don't set up conflict for the allocno with itself.  */
2888 		      && live_a != a)
2889 		    ira_add_conflict (obj, live_obj);
2890 		}
2891 	    }
2892 
2893 	  for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
2894 	    sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
2895 	}
2896       sparseset_free (objects_live);
2897       compress_conflict_vecs ();
2898     }
2899   /* Mark some copies for removing and change allocnos in the rest
2900      copies.  */
2901   FOR_EACH_COPY (cp, ci)
2902     {
2903       if (ALLOCNO_CAP_MEMBER (cp->first) != NULL
2904 	  || ALLOCNO_CAP_MEMBER (cp->second) != NULL)
2905 	{
2906 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2907 	    fprintf
2908 	      (ira_dump_file, "      Remove cp%d:%c%dr%d-%c%dr%d\n",
2909 	       cp->num, ALLOCNO_CAP_MEMBER (cp->first) != NULL ? 'c' : 'a',
2910 	       ALLOCNO_NUM (cp->first),
2911 	       REGNO (allocno_emit_reg (cp->first)),
2912 	       ALLOCNO_CAP_MEMBER (cp->second) != NULL ? 'c' : 'a',
2913 	       ALLOCNO_NUM (cp->second),
2914 	       REGNO (allocno_emit_reg (cp->second)));
2915 	  cp->loop_tree_node = NULL;
2916 	  continue;
2917 	}
2918       first
2919 	= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->first))];
2920       second
2921 	= regno_top_level_allocno_map[REGNO (allocno_emit_reg (cp->second))];
2922       node = cp->loop_tree_node;
2923       if (node == NULL)
2924 	keep_p = true; /* It copy generated in ira-emit.c.  */
2925       else
2926 	{
2927 	  /* Check that the copy was not propagated from level on
2928 	     which we will have different pseudos.  */
2929 	  node_first = node->regno_allocno_map[ALLOCNO_REGNO (cp->first)];
2930 	  node_second = node->regno_allocno_map[ALLOCNO_REGNO (cp->second)];
2931 	  keep_p = ((REGNO (allocno_emit_reg (first))
2932 		     == REGNO (allocno_emit_reg (node_first)))
2933 		     && (REGNO (allocno_emit_reg (second))
2934 			 == REGNO (allocno_emit_reg (node_second))));
2935 	}
2936       if (keep_p)
2937 	{
2938 	  cp->loop_tree_node = ira_loop_tree_root;
2939 	  cp->first = first;
2940 	  cp->second = second;
2941 	}
2942       else
2943 	{
2944 	  cp->loop_tree_node = NULL;
2945 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2946 	    fprintf (ira_dump_file, "      Remove cp%d:a%dr%d-a%dr%d\n",
2947 		     cp->num, ALLOCNO_NUM (cp->first),
2948 		     REGNO (allocno_emit_reg (cp->first)),
2949 		     ALLOCNO_NUM (cp->second),
2950 		     REGNO (allocno_emit_reg (cp->second)));
2951 	}
2952     }
2953   /* Remove unnecessary allocnos on lower levels of the loop tree.  */
2954   FOR_EACH_ALLOCNO (a, ai)
2955     {
2956       if (a != regno_top_level_allocno_map[REGNO (allocno_emit_reg (a))]
2957 	  || ALLOCNO_CAP_MEMBER (a) != NULL)
2958 	{
2959 	  if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL)
2960 	    fprintf (ira_dump_file, "      Remove a%dr%d\n",
2961 		     ALLOCNO_NUM (a), REGNO (allocno_emit_reg (a)));
2962 	  finish_allocno (a);
2963 	  continue;
2964 	}
2965       ALLOCNO_LOOP_TREE_NODE (a) = ira_loop_tree_root;
2966       ALLOCNO_REGNO (a) = REGNO (allocno_emit_reg (a));
2967       ALLOCNO_CAP (a) = NULL;
2968       /* Restore updated costs for assignments from reload.  */
2969       ALLOCNO_UPDATED_MEMORY_COST (a) = ALLOCNO_MEMORY_COST (a);
2970       ALLOCNO_UPDATED_CLASS_COST (a) = ALLOCNO_CLASS_COST (a);
2971       if (! ALLOCNO_ASSIGNED_P (a))
2972 	ira_free_allocno_updated_costs (a);
2973       ira_assert (ALLOCNO_UPDATED_HARD_REG_COSTS (a) == NULL);
2974       ira_assert (ALLOCNO_UPDATED_CONFLICT_HARD_REG_COSTS (a) == NULL);
2975     }
2976   /* Remove unnecessary copies.  */
2977   FOR_EACH_COPY (cp, ci)
2978     {
2979       if (cp->loop_tree_node == NULL)
2980 	{
2981 	  ira_copies[cp->num] = NULL;
2982 	  finish_copy (cp);
2983 	  continue;
2984 	}
2985       ira_assert
2986 	(ALLOCNO_LOOP_TREE_NODE (cp->first) == ira_loop_tree_root
2987 	 && ALLOCNO_LOOP_TREE_NODE (cp->second) == ira_loop_tree_root);
2988       ira_add_allocno_copy_to_list (cp);
2989       ira_swap_allocno_copy_ends_if_necessary (cp);
2990     }
2991   rebuild_regno_allocno_maps ();
2992   if (ira_max_point != ira_max_point_before_emit)
2993     ira_compress_allocno_live_ranges ();
2994   ira_free (regno_top_level_allocno_map);
2995 }
2996 
2997 
2998 
2999 #ifdef ENABLE_IRA_CHECKING
3000 /* Check creation of all allocnos.  Allocnos on lower levels should
3001    have allocnos or caps on all upper levels.  */
3002 static void
3003 check_allocno_creation (void)
3004 {
3005   ira_allocno_t a;
3006   ira_allocno_iterator ai;
3007   ira_loop_tree_node_t loop_tree_node;
3008 
3009   FOR_EACH_ALLOCNO (a, ai)
3010     {
3011       loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
3012       ira_assert (bitmap_bit_p (loop_tree_node->all_allocnos,
3013 				ALLOCNO_NUM (a)));
3014       if (loop_tree_node == ira_loop_tree_root)
3015 	continue;
3016       if (ALLOCNO_CAP_MEMBER (a) != NULL)
3017 	ira_assert (ALLOCNO_CAP (a) != NULL);
3018       else if (ALLOCNO_CAP (a) == NULL)
3019 	ira_assert (loop_tree_node->parent
3020 		    ->regno_allocno_map[ALLOCNO_REGNO (a)] != NULL
3021 		    && bitmap_bit_p (loop_tree_node->border_allocnos,
3022 				     ALLOCNO_NUM (a)));
3023     }
3024 }
3025 #endif
3026 
3027 /* Identify allocnos which prefer a register class with a single hard register.
3028    Adjust ALLOCNO_CONFLICT_HARD_REG_COSTS so that conflicting allocnos are
3029    less likely to use the preferred singleton register.  */
3030 static void
3031 update_conflict_hard_reg_costs (void)
3032 {
3033   ira_allocno_t a;
3034   ira_allocno_iterator ai;
3035   int i, index, min;
3036 
3037   FOR_EACH_ALLOCNO (a, ai)
3038     {
3039       reg_class_t aclass = ALLOCNO_CLASS (a);
3040       reg_class_t pref = reg_preferred_class (ALLOCNO_REGNO (a));
3041 
3042       if (reg_class_size[(int) pref] != 1)
3043 	continue;
3044       index = ira_class_hard_reg_index[(int) aclass]
3045 				      [ira_class_hard_regs[(int) pref][0]];
3046       if (index < 0)
3047 	continue;
3048       if (ALLOCNO_CONFLICT_HARD_REG_COSTS (a) == NULL
3049 	  || ALLOCNO_HARD_REG_COSTS (a) == NULL)
3050 	continue;
3051       min = INT_MAX;
3052       for (i = ira_class_hard_regs_num[(int) aclass] - 1; i >= 0; i--)
3053 	if (ALLOCNO_HARD_REG_COSTS (a)[i] > ALLOCNO_CLASS_COST (a)
3054 	    && min > ALLOCNO_HARD_REG_COSTS (a)[i])
3055 	  min = ALLOCNO_HARD_REG_COSTS (a)[i];
3056       if (min == INT_MAX)
3057 	continue;
3058       ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
3059 				  aclass, 0);
3060       ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index]
3061 	-= min - ALLOCNO_CLASS_COST (a);
3062     }
3063 }
3064 
3065 /* Create a internal representation (IR) for IRA (allocnos, copies,
3066    loop tree nodes).  The function returns TRUE if we generate loop
3067    structure (besides nodes representing all function and the basic
3068    blocks) for regional allocation.  A true return means that we
3069    really need to flatten IR before the reload.  */
3070 bool
3071 ira_build (void)
3072 {
3073   bool loops_p;
3074 
3075   df_analyze ();
3076   initiate_cost_vectors ();
3077   initiate_allocnos ();
3078   initiate_copies ();
3079   create_loop_tree_nodes ();
3080   form_loop_tree ();
3081   create_allocnos ();
3082   ira_costs ();
3083   create_allocno_objects ();
3084   ira_create_allocno_live_ranges ();
3085   remove_unnecessary_regions (false);
3086   ira_compress_allocno_live_ranges ();
3087   update_bad_spill_attribute ();
3088   loops_p = more_one_region_p ();
3089   if (loops_p)
3090     {
3091       propagate_allocno_info ();
3092       create_caps ();
3093     }
3094   ira_tune_allocno_costs ();
3095 #ifdef ENABLE_IRA_CHECKING
3096   check_allocno_creation ();
3097 #endif
3098   setup_min_max_allocno_live_range_point ();
3099   sort_conflict_id_map ();
3100   setup_min_max_conflict_allocno_ids ();
3101   ira_build_conflicts ();
3102   update_conflict_hard_reg_costs ();
3103   if (! ira_conflicts_p)
3104     {
3105       ira_allocno_t a;
3106       ira_allocno_iterator ai;
3107 
3108       /* Remove all regions but root one.  */
3109       if (loops_p)
3110 	{
3111 	  remove_unnecessary_regions (true);
3112 	  loops_p = false;
3113 	}
3114       /* We don't save hard registers around calls for fast allocation
3115 	 -- add caller clobbered registers as conflicting ones to
3116 	 allocno crossing calls.  */
3117       FOR_EACH_ALLOCNO (a, ai)
3118 	if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
3119 	  ior_hard_reg_conflicts (a, &call_used_reg_set);
3120     }
3121   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
3122     print_copies (ira_dump_file);
3123   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3124     {
3125       int n, nr, nr_big;
3126       ira_allocno_t a;
3127       live_range_t r;
3128       ira_allocno_iterator ai;
3129 
3130       n = 0;
3131       nr = 0;
3132       nr_big = 0;
3133       FOR_EACH_ALLOCNO (a, ai)
3134 	{
3135 	  int j, nobj = ALLOCNO_NUM_OBJECTS (a);
3136 
3137 	  if (nobj > 1)
3138 	    nr_big++;
3139 	  for (j = 0; j < nobj; j++)
3140 	    {
3141 	      ira_object_t obj = ALLOCNO_OBJECT (a, j);
3142 	      n += OBJECT_NUM_CONFLICTS (obj);
3143 	      for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
3144 		nr++;
3145 	    }
3146 	}
3147       fprintf (ira_dump_file, "  regions=%d, blocks=%d, points=%d\n",
3148 	       current_loops == NULL ? 1 : VEC_length (loop_p, ira_loops.larray),
3149 	       n_basic_blocks, ira_max_point);
3150       fprintf (ira_dump_file,
3151 	       "    allocnos=%d (big %d), copies=%d, conflicts=%d, ranges=%d\n",
3152 	       ira_allocnos_num, nr_big, ira_copies_num, n, nr);
3153     }
3154   return loops_p;
3155 }
3156 
3157 /* Release the data created by function ira_build.  */
3158 void
3159 ira_destroy (void)
3160 {
3161   finish_loop_tree_nodes ();
3162   finish_copies ();
3163   finish_allocnos ();
3164   finish_cost_vectors ();
3165   ira_finish_allocno_live_ranges ();
3166 }
3167