xref: /dragonfly/contrib/gcc-4.7/gcc/ira.c (revision d9f85b33)
1 /* Integrated Register Allocator (IRA) entry point.
2    Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012
3    Free Software Foundation, Inc.
4    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
5 
6 This file is part of GCC.
7 
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12 
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16 for more details.
17 
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3.  If not see
20 <http://www.gnu.org/licenses/>.  */
21 
22 /* The integrated register allocator (IRA) is a
23    regional register allocator performing graph coloring on a top-down
24    traversal of nested regions.  Graph coloring in a region is based
25    on Chaitin-Briggs algorithm.  It is called integrated because
26    register coalescing, register live range splitting, and choosing a
27    better hard register are done on-the-fly during coloring.  Register
28    coalescing and choosing a cheaper hard register is done by hard
29    register preferencing during hard register assigning.  The live
30    range splitting is a byproduct of the regional register allocation.
31 
32    Major IRA notions are:
33 
34      o *Region* is a part of CFG where graph coloring based on
35        Chaitin-Briggs algorithm is done.  IRA can work on any set of
36        nested CFG regions forming a tree.  Currently the regions are
37        the entire function for the root region and natural loops for
38        the other regions.  Therefore data structure representing a
39        region is called loop_tree_node.
40 
41      o *Allocno class* is a register class used for allocation of
42        given allocno.  It means that only hard register of given
43        register class can be assigned to given allocno.  In reality,
44        even smaller subset of (*profitable*) hard registers can be
45        assigned.  In rare cases, the subset can be even smaller
46        because our modification of Chaitin-Briggs algorithm requires
47        that sets of hard registers can be assigned to allocnos forms a
48        forest, i.e. the sets can be ordered in a way where any
49        previous set is not intersected with given set or is a superset
50        of given set.
51 
52      o *Pressure class* is a register class belonging to a set of
53        register classes containing all of the hard-registers available
54        for register allocation.  The set of all pressure classes for a
55        target is defined in the corresponding machine-description file
56        according some criteria.  Register pressure is calculated only
57        for pressure classes and it affects some IRA decisions as
58        forming allocation regions.
59 
60      o *Allocno* represents the live range of a pseudo-register in a
61        region.  Besides the obvious attributes like the corresponding
62        pseudo-register number, allocno class, conflicting allocnos and
63        conflicting hard-registers, there are a few allocno attributes
64        which are important for understanding the allocation algorithm:
65 
66        - *Live ranges*.  This is a list of ranges of *program points*
67          where the allocno lives.  Program points represent places
68          where a pseudo can be born or become dead (there are
69          approximately two times more program points than the insns)
70          and they are represented by integers starting with 0.  The
71          live ranges are used to find conflicts between allocnos.
72          They also play very important role for the transformation of
73          the IRA internal representation of several regions into a one
74          region representation.  The later is used during the reload
75          pass work because each allocno represents all of the
76          corresponding pseudo-registers.
77 
78        - *Hard-register costs*.  This is a vector of size equal to the
79          number of available hard-registers of the allocno class.  The
80          cost of a callee-clobbered hard-register for an allocno is
81          increased by the cost of save/restore code around the calls
82          through the given allocno's life.  If the allocno is a move
83          instruction operand and another operand is a hard-register of
84          the allocno class, the cost of the hard-register is decreased
85          by the move cost.
86 
87          When an allocno is assigned, the hard-register with minimal
88          full cost is used.  Initially, a hard-register's full cost is
89          the corresponding value from the hard-register's cost vector.
90          If the allocno is connected by a *copy* (see below) to
91          another allocno which has just received a hard-register, the
92          cost of the hard-register is decreased.  Before choosing a
93          hard-register for an allocno, the allocno's current costs of
94          the hard-registers are modified by the conflict hard-register
95          costs of all of the conflicting allocnos which are not
96          assigned yet.
97 
98        - *Conflict hard-register costs*.  This is a vector of the same
99          size as the hard-register costs vector.  To permit an
100          unassigned allocno to get a better hard-register, IRA uses
101          this vector to calculate the final full cost of the
102          available hard-registers.  Conflict hard-register costs of an
103          unassigned allocno are also changed with a change of the
104          hard-register cost of the allocno when a copy involving the
105          allocno is processed as described above.  This is done to
106          show other unassigned allocnos that a given allocno prefers
107          some hard-registers in order to remove the move instruction
108          corresponding to the copy.
109 
110      o *Cap*.  If a pseudo-register does not live in a region but
111        lives in a nested region, IRA creates a special allocno called
112        a cap in the outer region.  A region cap is also created for a
113        subregion cap.
114 
115      o *Copy*.  Allocnos can be connected by copies.  Copies are used
116        to modify hard-register costs for allocnos during coloring.
117        Such modifications reflects a preference to use the same
118        hard-register for the allocnos connected by copies.  Usually
119        copies are created for move insns (in this case it results in
120        register coalescing).  But IRA also creates copies for operands
121        of an insn which should be assigned to the same hard-register
122        due to constraints in the machine description (it usually
123        results in removing a move generated in reload to satisfy
124        the constraints) and copies referring to the allocno which is
125        the output operand of an instruction and the allocno which is
126        an input operand dying in the instruction (creation of such
127        copies results in less register shuffling).  IRA *does not*
128        create copies between the same register allocnos from different
129        regions because we use another technique for propagating
130        hard-register preference on the borders of regions.
131 
132    Allocnos (including caps) for the upper region in the region tree
133    *accumulate* information important for coloring from allocnos with
134    the same pseudo-register from nested regions.  This includes
135    hard-register and memory costs, conflicts with hard-registers,
136    allocno conflicts, allocno copies and more.  *Thus, attributes for
137    allocnos in a region have the same values as if the region had no
138    subregions*.  It means that attributes for allocnos in the
139    outermost region corresponding to the function have the same values
140    as though the allocation used only one region which is the entire
141    function.  It also means that we can look at IRA work as if the
142    first IRA did allocation for all function then it improved the
143    allocation for loops then their subloops and so on.
144 
145    IRA major passes are:
146 
147      o Building IRA internal representation which consists of the
148        following subpasses:
149 
150        * First, IRA builds regions and creates allocnos (file
151          ira-build.c) and initializes most of their attributes.
152 
153        * Then IRA finds an allocno class for each allocno and
154          calculates its initial (non-accumulated) cost of memory and
155          each hard-register of its allocno class (file ira-cost.c).
156 
157        * IRA creates live ranges of each allocno, calulates register
158          pressure for each pressure class in each region, sets up
159          conflict hard registers for each allocno and info about calls
160          the allocno lives through (file ira-lives.c).
161 
162        * IRA removes low register pressure loops from the regions
163          mostly to speed IRA up (file ira-build.c).
164 
165        * IRA propagates accumulated allocno info from lower region
166          allocnos to corresponding upper region allocnos (file
167          ira-build.c).
168 
169        * IRA creates all caps (file ira-build.c).
170 
171        * Having live-ranges of allocnos and their classes, IRA creates
172          conflicting allocnos for each allocno.  Conflicting allocnos
173          are stored as a bit vector or array of pointers to the
174          conflicting allocnos whatever is more profitable (file
175          ira-conflicts.c).  At this point IRA creates allocno copies.
176 
177      o Coloring.  Now IRA has all necessary info to start graph coloring
178        process.  It is done in each region on top-down traverse of the
179        region tree (file ira-color.c).  There are following subpasses:
180 
181        * Finding profitable hard registers of corresponding allocno
182          class for each allocno.  For example, only callee-saved hard
183          registers are frequently profitable for allocnos living
184          through colors.  If the profitable hard register set of
185          allocno does not form a tree based on subset relation, we use
186          some approximation to form the tree.  This approximation is
187          used to figure out trivial colorability of allocnos.  The
188          approximation is a pretty rare case.
189 
190        * Putting allocnos onto the coloring stack.  IRA uses Briggs
191          optimistic coloring which is a major improvement over
192          Chaitin's coloring.  Therefore IRA does not spill allocnos at
193          this point.  There is some freedom in the order of putting
194          allocnos on the stack which can affect the final result of
195          the allocation.  IRA uses some heuristics to improve the
196          order.
197 
198 	 We also use a modification of Chaitin-Briggs algorithm which
199          works for intersected register classes of allocnos.  To
200          figure out trivial colorability of allocnos, the mentioned
201          above tree of hard register sets is used.  To get an idea how
202          the algorithm works in i386 example, let us consider an
203          allocno to which any general hard register can be assigned.
204          If the allocno conflicts with eight allocnos to which only
205          EAX register can be assigned, given allocno is still
206          trivially colorable because all conflicting allocnos might be
207          assigned only to EAX and all other general hard registers are
208          still free.
209 
210 	 To get an idea of the used trivial colorability criterion, it
211 	 is also useful to read article "Graph-Coloring Register
212 	 Allocation for Irregular Architectures" by Michael D. Smith
213 	 and Glen Holloway.  Major difference between the article
214 	 approach and approach used in IRA is that Smith's approach
215 	 takes register classes only from machine description and IRA
216 	 calculate register classes from intermediate code too
217 	 (e.g. an explicit usage of hard registers in RTL code for
218 	 parameter passing can result in creation of additional
219 	 register classes which contain or exclude the hard
220 	 registers).  That makes IRA approach useful for improving
221 	 coloring even for architectures with regular register files
222 	 and in fact some benchmarking shows the improvement for
223 	 regular class architectures is even bigger than for irregular
224 	 ones.  Another difference is that Smith's approach chooses
225 	 intersection of classes of all insn operands in which a given
226 	 pseudo occurs.  IRA can use bigger classes if it is still
227 	 more profitable than memory usage.
228 
229        * Popping the allocnos from the stack and assigning them hard
230          registers.  If IRA can not assign a hard register to an
231          allocno and the allocno is coalesced, IRA undoes the
232          coalescing and puts the uncoalesced allocnos onto the stack in
233          the hope that some such allocnos will get a hard register
234          separately.  If IRA fails to assign hard register or memory
235          is more profitable for it, IRA spills the allocno.  IRA
236          assigns the allocno the hard-register with minimal full
237          allocation cost which reflects the cost of usage of the
238          hard-register for the allocno and cost of usage of the
239          hard-register for allocnos conflicting with given allocno.
240 
241        * Chaitin-Briggs coloring assigns as many pseudos as possible
242          to hard registers.  After coloringh we try to improve
243          allocation with cost point of view.  We improve the
244          allocation by spilling some allocnos and assigning the freed
245          hard registers to other allocnos if it decreases the overall
246          allocation cost.
247 
248        * After allono assigning in the region, IRA modifies the hard
249          register and memory costs for the corresponding allocnos in
250          the subregions to reflect the cost of possible loads, stores,
251          or moves on the border of the region and its subregions.
252          When default regional allocation algorithm is used
253          (-fira-algorithm=mixed), IRA just propagates the assignment
254          for allocnos if the register pressure in the region for the
255          corresponding pressure class is less than number of available
256          hard registers for given pressure class.
257 
258      o Spill/restore code moving.  When IRA performs an allocation
259        by traversing regions in top-down order, it does not know what
260        happens below in the region tree.  Therefore, sometimes IRA
261        misses opportunities to perform a better allocation.  A simple
262        optimization tries to improve allocation in a region having
263        subregions and containing in another region.  If the
264        corresponding allocnos in the subregion are spilled, it spills
265        the region allocno if it is profitable.  The optimization
266        implements a simple iterative algorithm performing profitable
267        transformations while they are still possible.  It is fast in
268        practice, so there is no real need for a better time complexity
269        algorithm.
270 
271      o Code change.  After coloring, two allocnos representing the
272        same pseudo-register outside and inside a region respectively
273        may be assigned to different locations (hard-registers or
274        memory).  In this case IRA creates and uses a new
275        pseudo-register inside the region and adds code to move allocno
276        values on the region's borders.  This is done during top-down
277        traversal of the regions (file ira-emit.c).  In some
278        complicated cases IRA can create a new allocno to move allocno
279        values (e.g. when a swap of values stored in two hard-registers
280        is needed).  At this stage, the new allocno is marked as
281        spilled.  IRA still creates the pseudo-register and the moves
282        on the region borders even when both allocnos were assigned to
283        the same hard-register.  If the reload pass spills a
284        pseudo-register for some reason, the effect will be smaller
285        because another allocno will still be in the hard-register.  In
286        most cases, this is better then spilling both allocnos.  If
287        reload does not change the allocation for the two
288        pseudo-registers, the trivial move will be removed by
289        post-reload optimizations.  IRA does not generate moves for
290        allocnos assigned to the same hard register when the default
291        regional allocation algorithm is used and the register pressure
292        in the region for the corresponding pressure class is less than
293        number of available hard registers for given pressure class.
294        IRA also does some optimizations to remove redundant stores and
295        to reduce code duplication on the region borders.
296 
297      o Flattening internal representation.  After changing code, IRA
298        transforms its internal representation for several regions into
299        one region representation (file ira-build.c).  This process is
300        called IR flattening.  Such process is more complicated than IR
301        rebuilding would be, but is much faster.
302 
303      o After IR flattening, IRA tries to assign hard registers to all
304        spilled allocnos.  This is impelemented by a simple and fast
305        priority coloring algorithm (see function
306        ira_reassign_conflict_allocnos::ira-color.c).  Here new allocnos
307        created during the code change pass can be assigned to hard
308        registers.
309 
310      o At the end IRA calls the reload pass.  The reload pass
311        communicates with IRA through several functions in file
312        ira-color.c to improve its decisions in
313 
314        * sharing stack slots for the spilled pseudos based on IRA info
315          about pseudo-register conflicts.
316 
317        * reassigning hard-registers to all spilled pseudos at the end
318          of each reload iteration.
319 
320        * choosing a better hard-register to spill based on IRA info
321          about pseudo-register live ranges and the register pressure
322          in places where the pseudo-register lives.
323 
324    IRA uses a lot of data representing the target processors.  These
325    data are initilized in file ira.c.
326 
327    If function has no loops (or the loops are ignored when
328    -fira-algorithm=CB is used), we have classic Chaitin-Briggs
329    coloring (only instead of separate pass of coalescing, we use hard
330    register preferencing).  In such case, IRA works much faster
331    because many things are not made (like IR flattening, the
332    spill/restore optimization, and the code change).
333 
334    Literature is worth to read for better understanding the code:
335 
336    o Preston Briggs, Keith D. Cooper, Linda Torczon.  Improvements to
337      Graph Coloring Register Allocation.
338 
339    o David Callahan, Brian Koblenz.  Register allocation via
340      hierarchical graph coloring.
341 
342    o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
343      Coloring Register Allocation: A Study of the Chaitin-Briggs and
344      Callahan-Koblenz Algorithms.
345 
346    o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
347      Register Allocation Based on Graph Fusion.
348 
349    o Michael D. Smith and Glenn Holloway.  Graph-Coloring Register
350      Allocation for Irregular Architectures
351 
352    o Vladimir Makarov. The Integrated Register Allocator for GCC.
353 
354    o Vladimir Makarov.  The top-down register allocator for irregular
355      register file architectures.
356 
357 */
358 
359 
360 #include "config.h"
361 #include "system.h"
362 #include "coretypes.h"
363 #include "tm.h"
364 #include "regs.h"
365 #include "rtl.h"
366 #include "tm_p.h"
367 #include "target.h"
368 #include "flags.h"
369 #include "obstack.h"
370 #include "bitmap.h"
371 #include "hard-reg-set.h"
372 #include "basic-block.h"
373 #include "df.h"
374 #include "expr.h"
375 #include "recog.h"
376 #include "params.h"
377 #include "timevar.h"
378 #include "tree-pass.h"
379 #include "output.h"
380 #include "except.h"
381 #include "reload.h"
382 #include "diagnostic-core.h"
383 #include "integrate.h"
384 #include "ggc.h"
385 #include "ira-int.h"
386 #include "dce.h"
387 
388 
389 struct target_ira default_target_ira;
390 struct target_ira_int default_target_ira_int;
391 #if SWITCHABLE_TARGET
392 struct target_ira *this_target_ira = &default_target_ira;
393 struct target_ira_int *this_target_ira_int = &default_target_ira_int;
394 #endif
395 
396 /* A modified value of flag `-fira-verbose' used internally.  */
397 int internal_flag_ira_verbose;
398 
399 /* Dump file of the allocator if it is not NULL.  */
400 FILE *ira_dump_file;
401 
402 /* The number of elements in the following array.  */
403 int ira_spilled_reg_stack_slots_num;
404 
405 /* The following array contains info about spilled pseudo-registers
406    stack slots used in current function so far.  */
407 struct ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
408 
409 /* Correspondingly overall cost of the allocation, overall cost before
410    reload, cost of the allocnos assigned to hard-registers, cost of
411    the allocnos assigned to memory, cost of loads, stores and register
412    move insns generated for pseudo-register live range splitting (see
413    ira-emit.c).  */
414 int ira_overall_cost, overall_cost_before;
415 int ira_reg_cost, ira_mem_cost;
416 int ira_load_cost, ira_store_cost, ira_shuffle_cost;
417 int ira_move_loops_num, ira_additional_jumps_num;
418 
419 /* All registers that can be eliminated.  */
420 
421 HARD_REG_SET eliminable_regset;
422 
423 /* Temporary hard reg set used for a different calculation.  */
424 static HARD_REG_SET temp_hard_regset;
425 
426 
427 
428 /* The function sets up the map IRA_REG_MODE_HARD_REGSET.  */
429 static void
430 setup_reg_mode_hard_regset (void)
431 {
432   int i, m, hard_regno;
433 
434   for (m = 0; m < NUM_MACHINE_MODES; m++)
435     for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
436       {
437 	CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
438 	for (i = hard_regno_nregs[hard_regno][m] - 1; i >= 0; i--)
439 	  if (hard_regno + i < FIRST_PSEUDO_REGISTER)
440 	    SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
441 			      hard_regno + i);
442       }
443 }
444 
445 
446 #define no_unit_alloc_regs \
447   (this_target_ira_int->x_no_unit_alloc_regs)
448 
449 /* The function sets up the three arrays declared above.  */
450 static void
451 setup_class_hard_regs (void)
452 {
453   int cl, i, hard_regno, n;
454   HARD_REG_SET processed_hard_reg_set;
455 
456   ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
457   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
458     {
459       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
460       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
461       CLEAR_HARD_REG_SET (processed_hard_reg_set);
462       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
463 	{
464 	  ira_non_ordered_class_hard_regs[cl][i] = -1;
465 	  ira_class_hard_reg_index[cl][i] = -1;
466 	}
467       for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
468 	{
469 #ifdef REG_ALLOC_ORDER
470 	  hard_regno = reg_alloc_order[i];
471 #else
472 	  hard_regno = i;
473 #endif
474 	  if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
475 	    continue;
476 	  SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
477       	  if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
478 	    ira_class_hard_reg_index[cl][hard_regno] = -1;
479 	  else
480 	    {
481 	      ira_class_hard_reg_index[cl][hard_regno] = n;
482 	      ira_class_hard_regs[cl][n++] = hard_regno;
483 	    }
484 	}
485       ira_class_hard_regs_num[cl] = n;
486       for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
487 	if (TEST_HARD_REG_BIT (temp_hard_regset, i))
488 	  ira_non_ordered_class_hard_regs[cl][n++] = i;
489       ira_assert (ira_class_hard_regs_num[cl] == n);
490     }
491 }
492 
493 /* Set up IRA_AVAILABLE_CLASS_REGS.  */
494 static void
495 setup_available_class_regs (void)
496 {
497   int i, j;
498 
499   memset (ira_available_class_regs, 0, sizeof (ira_available_class_regs));
500   for (i = 0; i < N_REG_CLASSES; i++)
501     {
502       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
503       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
504       for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
505 	if (TEST_HARD_REG_BIT (temp_hard_regset, j))
506 	  ira_available_class_regs[i]++;
507     }
508 }
509 
510 /* Set up global variables defining info about hard registers for the
511    allocation.  These depend on USE_HARD_FRAME_P whose TRUE value means
512    that we can use the hard frame pointer for the allocation.  */
513 static void
514 setup_alloc_regs (bool use_hard_frame_p)
515 {
516 #ifdef ADJUST_REG_ALLOC_ORDER
517   ADJUST_REG_ALLOC_ORDER;
518 #endif
519   COPY_HARD_REG_SET (no_unit_alloc_regs, fixed_reg_set);
520   if (! use_hard_frame_p)
521     SET_HARD_REG_BIT (no_unit_alloc_regs, HARD_FRAME_POINTER_REGNUM);
522   setup_class_hard_regs ();
523   setup_available_class_regs ();
524 }
525 
526 
527 
528 #define alloc_reg_class_subclasses \
529   (this_target_ira_int->x_alloc_reg_class_subclasses)
530 
531 /* Initialize the table of subclasses of each reg class.  */
532 static void
533 setup_reg_subclasses (void)
534 {
535   int i, j;
536   HARD_REG_SET temp_hard_regset2;
537 
538   for (i = 0; i < N_REG_CLASSES; i++)
539     for (j = 0; j < N_REG_CLASSES; j++)
540       alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES;
541 
542   for (i = 0; i < N_REG_CLASSES; i++)
543     {
544       if (i == (int) NO_REGS)
545 	continue;
546 
547       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
548       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
549       if (hard_reg_set_empty_p (temp_hard_regset))
550 	continue;
551       for (j = 0; j < N_REG_CLASSES; j++)
552 	if (i != j)
553 	  {
554 	    enum reg_class *p;
555 
556 	    COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[j]);
557 	    AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
558 	    if (! hard_reg_set_subset_p (temp_hard_regset,
559 					 temp_hard_regset2))
560 	      continue;
561 	    p = &alloc_reg_class_subclasses[j][0];
562 	    while (*p != LIM_REG_CLASSES) p++;
563 	    *p = (enum reg_class) i;
564 	  }
565     }
566 }
567 
568 
569 
570 /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST.  */
571 static void
572 setup_class_subset_and_memory_move_costs (void)
573 {
574   int cl, cl2, mode, cost;
575   HARD_REG_SET temp_hard_regset2;
576 
577   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
578     ira_memory_move_cost[mode][NO_REGS][0]
579       = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
580   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
581     {
582       if (cl != (int) NO_REGS)
583 	for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
584 	  {
585 	    ira_max_memory_move_cost[mode][cl][0]
586 	      = ira_memory_move_cost[mode][cl][0]
587 	      = memory_move_cost ((enum machine_mode) mode,
588 				  (reg_class_t) cl, false);
589 	    ira_max_memory_move_cost[mode][cl][1]
590 	      = ira_memory_move_cost[mode][cl][1]
591 	      = memory_move_cost ((enum machine_mode) mode,
592 				  (reg_class_t) cl, true);
593 	    /* Costs for NO_REGS are used in cost calculation on the
594 	       1st pass when the preferred register classes are not
595 	       known yet.  In this case we take the best scenario.  */
596 	    if (ira_memory_move_cost[mode][NO_REGS][0]
597 		> ira_memory_move_cost[mode][cl][0])
598 	      ira_max_memory_move_cost[mode][NO_REGS][0]
599 		= ira_memory_move_cost[mode][NO_REGS][0]
600 		= ira_memory_move_cost[mode][cl][0];
601 	    if (ira_memory_move_cost[mode][NO_REGS][1]
602 		> ira_memory_move_cost[mode][cl][1])
603 	      ira_max_memory_move_cost[mode][NO_REGS][1]
604 		= ira_memory_move_cost[mode][NO_REGS][1]
605 		= ira_memory_move_cost[mode][cl][1];
606 	  }
607     }
608   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
609     for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
610       {
611 	COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
612 	AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
613 	COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
614 	AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
615 	ira_class_subset_p[cl][cl2]
616 	  = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
617 	if (! hard_reg_set_empty_p (temp_hard_regset2)
618 	    && hard_reg_set_subset_p (reg_class_contents[cl2],
619 				      reg_class_contents[cl]))
620 	  for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
621 	    {
622 	      cost = ira_memory_move_cost[mode][cl2][0];
623 	      if (cost > ira_max_memory_move_cost[mode][cl][0])
624 		ira_max_memory_move_cost[mode][cl][0] = cost;
625 	      cost = ira_memory_move_cost[mode][cl2][1];
626 	      if (cost > ira_max_memory_move_cost[mode][cl][1])
627 		ira_max_memory_move_cost[mode][cl][1] = cost;
628 	    }
629       }
630   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
631     for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
632       {
633 	ira_memory_move_cost[mode][cl][0]
634 	  = ira_max_memory_move_cost[mode][cl][0];
635 	ira_memory_move_cost[mode][cl][1]
636 	  = ira_max_memory_move_cost[mode][cl][1];
637       }
638   setup_reg_subclasses ();
639 }
640 
641 
642 
643 /* Define the following macro if allocation through malloc if
644    preferable.  */
645 #define IRA_NO_OBSTACK
646 
647 #ifndef IRA_NO_OBSTACK
648 /* Obstack used for storing all dynamic data (except bitmaps) of the
649    IRA.  */
650 static struct obstack ira_obstack;
651 #endif
652 
653 /* Obstack used for storing all bitmaps of the IRA.  */
654 static struct bitmap_obstack ira_bitmap_obstack;
655 
656 /* Allocate memory of size LEN for IRA data.  */
657 void *
658 ira_allocate (size_t len)
659 {
660   void *res;
661 
662 #ifndef IRA_NO_OBSTACK
663   res = obstack_alloc (&ira_obstack, len);
664 #else
665   res = xmalloc (len);
666 #endif
667   return res;
668 }
669 
670 /* Free memory ADDR allocated for IRA data.  */
671 void
672 ira_free (void *addr ATTRIBUTE_UNUSED)
673 {
674 #ifndef IRA_NO_OBSTACK
675   /* do nothing */
676 #else
677   free (addr);
678 #endif
679 }
680 
681 
682 /* Allocate and returns bitmap for IRA.  */
683 bitmap
684 ira_allocate_bitmap (void)
685 {
686   return BITMAP_ALLOC (&ira_bitmap_obstack);
687 }
688 
689 /* Free bitmap B allocated for IRA.  */
690 void
691 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
692 {
693   /* do nothing */
694 }
695 
696 
697 
698 /* Output information about allocation of all allocnos (except for
699    caps) into file F.  */
700 void
701 ira_print_disposition (FILE *f)
702 {
703   int i, n, max_regno;
704   ira_allocno_t a;
705   basic_block bb;
706 
707   fprintf (f, "Disposition:");
708   max_regno = max_reg_num ();
709   for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
710     for (a = ira_regno_allocno_map[i];
711 	 a != NULL;
712 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
713       {
714 	if (n % 4 == 0)
715 	  fprintf (f, "\n");
716 	n++;
717 	fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
718 	if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
719 	  fprintf (f, "b%-3d", bb->index);
720 	else
721 	  fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
722 	if (ALLOCNO_HARD_REGNO (a) >= 0)
723 	  fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
724 	else
725 	  fprintf (f, " mem");
726       }
727   fprintf (f, "\n");
728 }
729 
730 /* Outputs information about allocation of all allocnos into
731    stderr.  */
732 void
733 ira_debug_disposition (void)
734 {
735   ira_print_disposition (stderr);
736 }
737 
738 
739 
740 /* Set up ira_stack_reg_pressure_class which is the biggest pressure
741    register class containing stack registers or NO_REGS if there are
742    no stack registers.  To find this class, we iterate through all
743    register pressure classes and choose the first register pressure
744    class containing all the stack registers and having the biggest
745    size.  */
746 static void
747 setup_stack_reg_pressure_class (void)
748 {
749   ira_stack_reg_pressure_class = NO_REGS;
750 #ifdef STACK_REGS
751   {
752     int i, best, size;
753     enum reg_class cl;
754     HARD_REG_SET temp_hard_regset2;
755 
756     CLEAR_HARD_REG_SET (temp_hard_regset);
757     for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
758       SET_HARD_REG_BIT (temp_hard_regset, i);
759     best = 0;
760     for (i = 0; i < ira_pressure_classes_num; i++)
761       {
762 	cl = ira_pressure_classes[i];
763 	COPY_HARD_REG_SET (temp_hard_regset2, temp_hard_regset);
764 	AND_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
765 	size = hard_reg_set_size (temp_hard_regset2);
766 	if (best < size)
767 	  {
768 	    best = size;
769 	    ira_stack_reg_pressure_class = cl;
770 	  }
771       }
772   }
773 #endif
774 }
775 
776 /* Find pressure classes which are register classes for which we
777    calculate register pressure in IRA, register pressure sensitive
778    insn scheduling, and register pressure sensitive loop invariant
779    motion.
780 
781    To make register pressure calculation easy, we always use
782    non-intersected register pressure classes.  A move of hard
783    registers from one register pressure class is not more expensive
784    than load and store of the hard registers.  Most likely an allocno
785    class will be a subset of a register pressure class and in many
786    cases a register pressure class.  That makes usage of register
787    pressure classes a good approximation to find a high register
788    pressure.  */
789 static void
790 setup_pressure_classes (void)
791 {
792   int cost, i, n, curr;
793   int cl, cl2;
794   enum reg_class pressure_classes[N_REG_CLASSES];
795   int m;
796   HARD_REG_SET temp_hard_regset2;
797   bool insert_p;
798 
799   n = 0;
800   for (cl = 0; cl < N_REG_CLASSES; cl++)
801     {
802       if (ira_available_class_regs[cl] == 0)
803 	continue;
804       if (ira_available_class_regs[cl] != 1
805 	  /* A register class without subclasses may contain a few
806 	     hard registers and movement between them is costly
807 	     (e.g. SPARC FPCC registers).  We still should consider it
808 	     as a candidate for a pressure class.  */
809 	  && alloc_reg_class_subclasses[cl][0] != LIM_REG_CLASSES)
810 	{
811 	  /* Check that the moves between any hard registers of the
812 	     current class are not more expensive for a legal mode
813 	     than load/store of the hard registers of the current
814 	     class.  Such class is a potential candidate to be a
815 	     register pressure class.  */
816 	  for (m = 0; m < NUM_MACHINE_MODES; m++)
817 	    {
818 	      COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
819 	      AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
820 	      AND_COMPL_HARD_REG_SET (temp_hard_regset,
821 				      ira_prohibited_class_mode_regs[cl][m]);
822 	      if (hard_reg_set_empty_p (temp_hard_regset))
823 		continue;
824 	      ira_init_register_move_cost_if_necessary ((enum machine_mode) m);
825 	      cost = ira_register_move_cost[m][cl][cl];
826 	      if (cost <= ira_max_memory_move_cost[m][cl][1]
827 		  || cost <= ira_max_memory_move_cost[m][cl][0])
828 		break;
829 	    }
830 	  if (m >= NUM_MACHINE_MODES)
831 	    continue;
832 	}
833       curr = 0;
834       insert_p = true;
835       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
836       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
837       /* Remove so far added pressure classes which are subset of the
838 	 current candidate class.  Prefer GENERAL_REGS as a pressure
839 	 register class to another class containing the same
840 	 allocatable hard registers.  We do this because machine
841 	 dependent cost hooks might give wrong costs for the latter
842 	 class but always give the right cost for the former class
843 	 (GENERAL_REGS).  */
844       for (i = 0; i < n; i++)
845 	{
846 	  cl2 = pressure_classes[i];
847 	  COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl2]);
848 	  AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
849 	  if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
850 	      && (! hard_reg_set_equal_p (temp_hard_regset, temp_hard_regset2)
851 		  || cl2 == (int) GENERAL_REGS))
852 	    {
853 	      pressure_classes[curr++] = (enum reg_class) cl2;
854 	      insert_p = false;
855 	      continue;
856 	    }
857 	  if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)
858 	      && (! hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset)
859 		  || cl == (int) GENERAL_REGS))
860 	    continue;
861 	  if (hard_reg_set_equal_p (temp_hard_regset2, temp_hard_regset))
862 	    insert_p = false;
863 	  pressure_classes[curr++] = (enum reg_class) cl2;
864 	}
865       /* If the current candidate is a subset of a so far added
866 	 pressure class, don't add it to the list of the pressure
867 	 classes.  */
868       if (insert_p)
869 	pressure_classes[curr++] = (enum reg_class) cl;
870       n = curr;
871     }
872 #ifdef ENABLE_IRA_CHECKING
873   {
874     HARD_REG_SET ignore_hard_regs;
875 
876     /* Check pressure classes correctness: here we check that hard
877        registers from all register pressure classes contains all hard
878        registers available for the allocation.  */
879     CLEAR_HARD_REG_SET (temp_hard_regset);
880     CLEAR_HARD_REG_SET (temp_hard_regset2);
881     COPY_HARD_REG_SET (ignore_hard_regs, no_unit_alloc_regs);
882     for (cl = 0; cl < LIM_REG_CLASSES; cl++)
883       {
884 	/* For some targets (like MIPS with MD_REGS), there are some
885 	   classes with hard registers available for allocation but
886 	   not able to hold value of any mode.  */
887 	for (m = 0; m < NUM_MACHINE_MODES; m++)
888 	  if (contains_reg_of_mode[cl][m])
889 	    break;
890 	if (m >= NUM_MACHINE_MODES)
891 	  {
892 	    IOR_HARD_REG_SET (ignore_hard_regs, reg_class_contents[cl]);
893 	    continue;
894 	  }
895 	for (i = 0; i < n; i++)
896 	  if ((int) pressure_classes[i] == cl)
897 	    break;
898 	IOR_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
899 	if (i < n)
900 	  IOR_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
901       }
902     for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
903       /* Some targets (like SPARC with ICC reg) have alocatable regs
904 	 for which no reg class is defined.  */
905       if (REGNO_REG_CLASS (i) == NO_REGS)
906 	SET_HARD_REG_BIT (ignore_hard_regs, i);
907     AND_COMPL_HARD_REG_SET (temp_hard_regset, ignore_hard_regs);
908     AND_COMPL_HARD_REG_SET (temp_hard_regset2, ignore_hard_regs);
909     ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset));
910   }
911 #endif
912   ira_pressure_classes_num = 0;
913   for (i = 0; i < n; i++)
914     {
915       cl = (int) pressure_classes[i];
916       ira_reg_pressure_class_p[cl] = true;
917       ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl;
918     }
919   setup_stack_reg_pressure_class ();
920 }
921 
922 /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM,
923    IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM.
924 
925    Target may have many subtargets and not all target hard regiters can
926    be used for allocation, e.g. x86 port in 32-bit mode can not use
927    hard registers introduced in x86-64 like r8-r15).  Some classes
928    might have the same allocatable hard registers, e.g.  INDEX_REGS
929    and GENERAL_REGS in x86 port in 32-bit mode.  To decrease different
930    calculations efforts we introduce allocno classes which contain
931    unique non-empty sets of allocatable hard-registers.
932 
933    Pseudo class cost calculation in ira-costs.c is very expensive.
934    Therefore we are trying to decrease number of classes involved in
935    such calculation.  Register classes used in the cost calculation
936    are called important classes.  They are allocno classes and other
937    non-empty classes whose allocatable hard register sets are inside
938    of an allocno class hard register set.  From the first sight, it
939    looks like that they are just allocno classes.  It is not true.  In
940    example of x86-port in 32-bit mode, allocno classes will contain
941    GENERAL_REGS but not LEGACY_REGS (because allocatable hard
942    registers are the same for the both classes).  The important
943    classes will contain GENERAL_REGS and LEGACY_REGS.  It is done
944    because a machine description insn constraint may refers for
945    LEGACY_REGS and code in ira-costs.c is mostly base on investigation
946    of the insn constraints.  */
947 static void
948 setup_allocno_and_important_classes (void)
949 {
950   int i, j, n, cl;
951   bool set_p;
952   HARD_REG_SET temp_hard_regset2;
953   static enum reg_class classes[LIM_REG_CLASSES + 1];
954 
955   n = 0;
956   /* Collect classes which contain unique sets of allocatable hard
957      registers.  Prefer GENERAL_REGS to other classes containing the
958      same set of hard registers.  */
959   for (i = 0; i < LIM_REG_CLASSES; i++)
960     {
961       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[i]);
962       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
963       for (j = 0; j < n; j++)
964 	{
965 	  cl = classes[j];
966 	  COPY_HARD_REG_SET (temp_hard_regset2, reg_class_contents[cl]);
967 	  AND_COMPL_HARD_REG_SET (temp_hard_regset2,
968 				  no_unit_alloc_regs);
969 	  if (hard_reg_set_equal_p (temp_hard_regset,
970 				    temp_hard_regset2))
971 	    break;
972 	}
973       if (j >= n)
974 	classes[n++] = (enum reg_class) i;
975       else if (i == GENERAL_REGS)
976 	/* Prefer general regs.  For i386 example, it means that
977 	   we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS
978 	   (all of them consists of the same available hard
979 	   registers).  */
980 	classes[j] = (enum reg_class) i;
981     }
982   classes[n] = LIM_REG_CLASSES;
983 
984   /* Set up classes which can be used for allocnos as classes
985      conatining non-empty unique sets of allocatable hard
986      registers.  */
987   ira_allocno_classes_num = 0;
988   for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
989     {
990       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
991       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
992       if (hard_reg_set_empty_p (temp_hard_regset))
993 	continue;
994       ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl;
995     }
996   ira_important_classes_num = 0;
997   /* Add non-allocno classes containing to non-empty set of
998      allocatable hard regs.  */
999   for (cl = 0; cl < N_REG_CLASSES; cl++)
1000     {
1001       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1002       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1003       if (! hard_reg_set_empty_p (temp_hard_regset))
1004 	{
1005 	  set_p = false;
1006 	  for (j = 0; j < ira_allocno_classes_num; j++)
1007 	    {
1008 	      COPY_HARD_REG_SET (temp_hard_regset2,
1009 				 reg_class_contents[ira_allocno_classes[j]]);
1010 	      AND_COMPL_HARD_REG_SET (temp_hard_regset2, no_unit_alloc_regs);
1011 	      if ((enum reg_class) cl == ira_allocno_classes[j])
1012 		break;
1013 	      else if (hard_reg_set_subset_p (temp_hard_regset,
1014 					      temp_hard_regset2))
1015 		set_p = true;
1016 	    }
1017 	  if (set_p && j >= ira_allocno_classes_num)
1018 	    ira_important_classes[ira_important_classes_num++]
1019 	      = (enum reg_class) cl;
1020 	}
1021     }
1022   /* Now add allocno classes to the important classes.  */
1023   for (j = 0; j < ira_allocno_classes_num; j++)
1024     ira_important_classes[ira_important_classes_num++]
1025       = ira_allocno_classes[j];
1026   for (cl = 0; cl < N_REG_CLASSES; cl++)
1027     {
1028       ira_reg_allocno_class_p[cl] = false;
1029       ira_reg_pressure_class_p[cl] = false;
1030     }
1031   for (j = 0; j < ira_allocno_classes_num; j++)
1032     ira_reg_allocno_class_p[ira_allocno_classes[j]] = true;
1033   setup_pressure_classes ();
1034 }
1035 
1036 /* Setup translation in CLASS_TRANSLATE of all classes into a class
1037    given by array CLASSES of length CLASSES_NUM.  The function is used
1038    make translation any reg class to an allocno class or to an
1039    pressure class.  This translation is necessary for some
1040    calculations when we can use only allocno or pressure classes and
1041    such translation represents an approximate representation of all
1042    classes.
1043 
1044    The translation in case when allocatable hard register set of a
1045    given class is subset of allocatable hard register set of a class
1046    in CLASSES is pretty simple.  We use smallest classes from CLASSES
1047    containing a given class.  If allocatable hard register set of a
1048    given class is not a subset of any corresponding set of a class
1049    from CLASSES, we use the cheapest (with load/store point of view)
1050    class from CLASSES whose set intersects with given class set */
1051 static void
1052 setup_class_translate_array (enum reg_class *class_translate,
1053 			     int classes_num, enum reg_class *classes)
1054 {
1055   int cl, mode;
1056   enum reg_class aclass, best_class, *cl_ptr;
1057   int i, cost, min_cost, best_cost;
1058 
1059   for (cl = 0; cl < N_REG_CLASSES; cl++)
1060     class_translate[cl] = NO_REGS;
1061 
1062   for (i = 0; i < classes_num; i++)
1063     {
1064       aclass = classes[i];
1065       for (cl_ptr = &alloc_reg_class_subclasses[aclass][0];
1066 	   (cl = *cl_ptr) != LIM_REG_CLASSES;
1067 	   cl_ptr++)
1068 	if (class_translate[cl] == NO_REGS)
1069 	  class_translate[cl] = aclass;
1070       class_translate[aclass] = aclass;
1071     }
1072   /* For classes which are not fully covered by one of given classes
1073      (in other words covered by more one given class), use the
1074      cheapest class.  */
1075   for (cl = 0; cl < N_REG_CLASSES; cl++)
1076     {
1077       if (cl == NO_REGS || class_translate[cl] != NO_REGS)
1078 	continue;
1079       best_class = NO_REGS;
1080       best_cost = INT_MAX;
1081       for (i = 0; i < classes_num; i++)
1082 	{
1083 	  aclass = classes[i];
1084 	  COPY_HARD_REG_SET (temp_hard_regset,
1085 			     reg_class_contents[aclass]);
1086 	  AND_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl]);
1087 	  AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1088 	  if (! hard_reg_set_empty_p (temp_hard_regset))
1089 	    {
1090 	      min_cost = INT_MAX;
1091 	      for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1092 		{
1093 		  cost = (ira_memory_move_cost[mode][cl][0]
1094 			  + ira_memory_move_cost[mode][cl][1]);
1095 		  if (min_cost > cost)
1096 		    min_cost = cost;
1097 		}
1098 	      if (best_class == NO_REGS || best_cost > min_cost)
1099 		{
1100 		  best_class = aclass;
1101 		  best_cost = min_cost;
1102 		}
1103 	    }
1104 	}
1105       class_translate[cl] = best_class;
1106     }
1107 }
1108 
1109 /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and
1110    IRA_PRESSURE_CLASS_TRANSLATE.  */
1111 static void
1112 setup_class_translate (void)
1113 {
1114   setup_class_translate_array (ira_allocno_class_translate,
1115 			       ira_allocno_classes_num, ira_allocno_classes);
1116   setup_class_translate_array (ira_pressure_class_translate,
1117 			       ira_pressure_classes_num, ira_pressure_classes);
1118 }
1119 
1120 /* Order numbers of allocno classes in original target allocno class
1121    array, -1 for non-allocno classes.  */
1122 static int allocno_class_order[N_REG_CLASSES];
1123 
1124 /* The function used to sort the important classes.  */
1125 static int
1126 comp_reg_classes_func (const void *v1p, const void *v2p)
1127 {
1128   enum reg_class cl1 = *(const enum reg_class *) v1p;
1129   enum reg_class cl2 = *(const enum reg_class *) v2p;
1130   enum reg_class tcl1, tcl2;
1131   int diff;
1132 
1133   tcl1 = ira_allocno_class_translate[cl1];
1134   tcl2 = ira_allocno_class_translate[cl2];
1135   if (tcl1 != NO_REGS && tcl2 != NO_REGS
1136       && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0)
1137     return diff;
1138   return (int) cl1 - (int) cl2;
1139 }
1140 
1141 /* For correct work of function setup_reg_class_relation we need to
1142    reorder important classes according to the order of their allocno
1143    classes.  It places important classes containing the same
1144    allocatable hard register set adjacent to each other and allocno
1145    class with the allocatable hard register set right after the other
1146    important classes with the same set.
1147 
1148    In example from comments of function
1149    setup_allocno_and_important_classes, it places LEGACY_REGS and
1150    GENERAL_REGS close to each other and GENERAL_REGS is after
1151    LEGACY_REGS.  */
1152 static void
1153 reorder_important_classes (void)
1154 {
1155   int i;
1156 
1157   for (i = 0; i < N_REG_CLASSES; i++)
1158     allocno_class_order[i] = -1;
1159   for (i = 0; i < ira_allocno_classes_num; i++)
1160     allocno_class_order[ira_allocno_classes[i]] = i;
1161   qsort (ira_important_classes, ira_important_classes_num,
1162 	 sizeof (enum reg_class), comp_reg_classes_func);
1163   for (i = 0; i < ira_important_classes_num; i++)
1164     ira_important_class_nums[ira_important_classes[i]] = i;
1165 }
1166 
1167 /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION,
1168    IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and
1169    IRA_REG_CLASSES_INTERSECT_P.  For the meaning of the relations,
1170    please see corresponding comments in ira-int.h.  */
1171 static void
1172 setup_reg_class_relations (void)
1173 {
1174   int i, cl1, cl2, cl3;
1175   HARD_REG_SET intersection_set, union_set, temp_set2;
1176   bool important_class_p[N_REG_CLASSES];
1177 
1178   memset (important_class_p, 0, sizeof (important_class_p));
1179   for (i = 0; i < ira_important_classes_num; i++)
1180     important_class_p[ira_important_classes[i]] = true;
1181   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1182     {
1183       ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES;
1184       for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1185 	{
1186 	  ira_reg_classes_intersect_p[cl1][cl2] = false;
1187 	  ira_reg_class_intersect[cl1][cl2] = NO_REGS;
1188 	  COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
1189 	  AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1190 	  COPY_HARD_REG_SET (temp_set2, reg_class_contents[cl2]);
1191 	  AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1192 	  if (hard_reg_set_empty_p (temp_hard_regset)
1193 	      && hard_reg_set_empty_p (temp_set2))
1194 	    {
1195 	      /* The both classes have no allocatable hard registers
1196 		 -- take all class hard registers into account and use
1197 		 reg_class_subunion and reg_class_superunion.  */
1198 	      for (i = 0;; i++)
1199 		{
1200 		  cl3 = reg_class_subclasses[cl1][i];
1201 		  if (cl3 == LIM_REG_CLASSES)
1202 		    break;
1203 		  if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
1204 					  (enum reg_class) cl3))
1205 		    ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1206 		}
1207 	      ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2];
1208 	      ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2];
1209 	      continue;
1210 	    }
1211 	  ira_reg_classes_intersect_p[cl1][cl2]
1212 	    = hard_reg_set_intersect_p (temp_hard_regset, temp_set2);
1213 	  if (important_class_p[cl1] && important_class_p[cl2]
1214 	      && hard_reg_set_subset_p (temp_hard_regset, temp_set2))
1215 	    {
1216 	      /* CL1 and CL2 are important classes and CL1 allocatable
1217 		 hard register set is inside of CL2 allocatable hard
1218 		 registers -- make CL1 a superset of CL2.  */
1219 	      enum reg_class *p;
1220 
1221 	      p = &ira_reg_class_super_classes[cl1][0];
1222 	      while (*p != LIM_REG_CLASSES)
1223 		p++;
1224 	      *p++ = (enum reg_class) cl2;
1225 	      *p = LIM_REG_CLASSES;
1226 	    }
1227 	  ira_reg_class_subunion[cl1][cl2] = NO_REGS;
1228 	  ira_reg_class_superunion[cl1][cl2] = NO_REGS;
1229 	  COPY_HARD_REG_SET (intersection_set, reg_class_contents[cl1]);
1230 	  AND_HARD_REG_SET (intersection_set, reg_class_contents[cl2]);
1231 	  AND_COMPL_HARD_REG_SET (intersection_set, no_unit_alloc_regs);
1232 	  COPY_HARD_REG_SET (union_set, reg_class_contents[cl1]);
1233 	  IOR_HARD_REG_SET (union_set, reg_class_contents[cl2]);
1234 	  AND_COMPL_HARD_REG_SET (union_set, no_unit_alloc_regs);
1235 	  for (i = 0; i < ira_important_classes_num; i++)
1236 	    {
1237 	      cl3 = ira_important_classes[i];
1238 	      COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl3]);
1239 	      AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1240 	      if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
1241 		{
1242 		  /* CL3 allocatable hard register set is inside of
1243 		     intersection of allocatable hard register sets
1244 		     of CL1 and CL2.  */
1245 		  COPY_HARD_REG_SET
1246 		    (temp_set2,
1247 		     reg_class_contents[(int)
1248 					ira_reg_class_intersect[cl1][cl2]]);
1249 		  AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1250 	 	  if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1251 		      /* If the allocatable hard register sets are the
1252 			 same, prefer GENERAL_REGS or the smallest
1253 			 class for debugging purposes.  */
1254 		      || (hard_reg_set_equal_p (temp_hard_regset, temp_set2)
1255 			  && (cl3 == GENERAL_REGS
1256 			      || (ira_reg_class_intersect[cl1][cl2] != GENERAL_REGS
1257 				  && hard_reg_set_subset_p
1258 				     (reg_class_contents[cl3],
1259 				      reg_class_contents
1260 				      [(int) ira_reg_class_intersect[cl1][cl2]])))))
1261 		    ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1262 		}
1263 	      if (hard_reg_set_subset_p (temp_hard_regset, union_set))
1264 		{
1265 		  /* CL3 allocatbale hard register set is inside of
1266 		     union of allocatable hard register sets of CL1
1267 		     and CL2.  */
1268 		  COPY_HARD_REG_SET
1269 		    (temp_set2,
1270 		     reg_class_contents[(int) ira_reg_class_subunion[cl1][cl2]]);
1271 		  AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1272 	 	  if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
1273 		      || (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
1274 
1275 			  && (! hard_reg_set_equal_p (temp_set2,
1276 						      temp_hard_regset)
1277 			      || cl3 == GENERAL_REGS
1278 			      /* If the allocatable hard register sets are the
1279 				 same, prefer GENERAL_REGS or the smallest
1280 				 class for debugging purposes.  */
1281 			      || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS
1282 				  && hard_reg_set_subset_p
1283 				     (reg_class_contents[cl3],
1284 				      reg_class_contents
1285 				      [(int) ira_reg_class_subunion[cl1][cl2]])))))
1286 		    ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3;
1287 		}
1288 	      if (hard_reg_set_subset_p (union_set, temp_hard_regset))
1289 		{
1290 		  /* CL3 allocatable hard register set contains union
1291 		     of allocatable hard register sets of CL1 and
1292 		     CL2.  */
1293 		  COPY_HARD_REG_SET
1294 		    (temp_set2,
1295 		     reg_class_contents[(int) ira_reg_class_superunion[cl1][cl2]]);
1296 		  AND_COMPL_HARD_REG_SET (temp_set2, no_unit_alloc_regs);
1297 	 	  if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
1298 		      || (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1299 
1300 			  && (! hard_reg_set_equal_p (temp_set2,
1301 						      temp_hard_regset)
1302 			      || cl3 == GENERAL_REGS
1303 			      /* If the allocatable hard register sets are the
1304 				 same, prefer GENERAL_REGS or the smallest
1305 				 class for debugging purposes.  */
1306 			      || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS
1307 				  && hard_reg_set_subset_p
1308 				     (reg_class_contents[cl3],
1309 				      reg_class_contents
1310 				      [(int) ira_reg_class_superunion[cl1][cl2]])))))
1311 		    ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3;
1312 		}
1313 	    }
1314 	}
1315     }
1316 }
1317 
1318 /* Output all possible allocno classes and the translation map into
1319    file F.  */
1320 static void
1321 print_classes (FILE *f, bool pressure_p)
1322 {
1323   int classes_num = (pressure_p
1324 		     ? ira_pressure_classes_num : ira_allocno_classes_num);
1325   enum reg_class *classes = (pressure_p
1326 			     ? ira_pressure_classes : ira_allocno_classes);
1327   enum reg_class *class_translate = (pressure_p
1328 				     ? ira_pressure_class_translate
1329 				     : ira_allocno_class_translate);
1330   static const char *const reg_class_names[] = REG_CLASS_NAMES;
1331   int i;
1332 
1333   fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno");
1334   for (i = 0; i < classes_num; i++)
1335     fprintf (f, " %s", reg_class_names[classes[i]]);
1336   fprintf (f, "\nClass translation:\n");
1337   for (i = 0; i < N_REG_CLASSES; i++)
1338     fprintf (f, " %s -> %s\n", reg_class_names[i],
1339 	     reg_class_names[class_translate[i]]);
1340 }
1341 
1342 /* Output all possible allocno and translation classes and the
1343    translation maps into stderr.  */
1344 void
1345 ira_debug_allocno_classes (void)
1346 {
1347   print_classes (stderr, false);
1348   print_classes (stderr, true);
1349 }
1350 
1351 /* Set up different arrays concerning class subsets, allocno and
1352    important classes.  */
1353 static void
1354 find_reg_classes (void)
1355 {
1356   setup_allocno_and_important_classes ();
1357   setup_class_translate ();
1358   reorder_important_classes ();
1359   setup_reg_class_relations ();
1360 }
1361 
1362 
1363 
1364 /* Set up the array above.  */
1365 static void
1366 setup_hard_regno_aclass (void)
1367 {
1368   int i;
1369 
1370   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1371     {
1372 #if 1
1373       ira_hard_regno_allocno_class[i]
1374 	= (TEST_HARD_REG_BIT (no_unit_alloc_regs, i)
1375 	   ? NO_REGS
1376 	   : ira_allocno_class_translate[REGNO_REG_CLASS (i)]);
1377 #else
1378       int j;
1379       enum reg_class cl;
1380       ira_hard_regno_allocno_class[i] = NO_REGS;
1381       for (j = 0; j < ira_allocno_classes_num; j++)
1382  	{
1383 	  cl = ira_allocno_classes[j];
1384  	  if (ira_class_hard_reg_index[cl][i] >= 0)
1385  	    {
1386 	      ira_hard_regno_allocno_class[i] = cl;
1387  	      break;
1388  	    }
1389  	}
1390 #endif
1391     }
1392 }
1393 
1394 
1395 
1396 /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps.  */
1397 static void
1398 setup_reg_class_nregs (void)
1399 {
1400   int i, cl, cl2, m;
1401 
1402   for (m = 0; m < MAX_MACHINE_MODE; m++)
1403     {
1404       for (cl = 0; cl < N_REG_CLASSES; cl++)
1405 	ira_reg_class_max_nregs[cl][m]
1406 	  = ira_reg_class_min_nregs[cl][m]
1407 	  = targetm.class_max_nregs ((reg_class_t) cl, (enum machine_mode) m);
1408       for (cl = 0; cl < N_REG_CLASSES; cl++)
1409 	for (i = 0;
1410 	     (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES;
1411 	     i++)
1412 	  if (ira_reg_class_min_nregs[cl2][m]
1413 	      < ira_reg_class_min_nregs[cl][m])
1414 	    ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m];
1415     }
1416 }
1417 
1418 
1419 
1420 /* Set up IRA_PROHIBITED_CLASS_MODE_REGS.  */
1421 static void
1422 setup_prohibited_class_mode_regs (void)
1423 {
1424   int j, k, hard_regno, cl;
1425 
1426   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1427     {
1428       for (j = 0; j < NUM_MACHINE_MODES; j++)
1429 	{
1430 	  CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]);
1431 	  for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1432 	    {
1433 	      hard_regno = ira_class_hard_regs[cl][k];
1434 	      if (! HARD_REGNO_MODE_OK (hard_regno, (enum machine_mode) j))
1435 		SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1436 				  hard_regno);
1437 	    }
1438 	}
1439     }
1440 }
1441 
1442 /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers
1443    spanning from one register pressure class to another one.  It is
1444    called after defining the pressure classes.  */
1445 static void
1446 clarify_prohibited_class_mode_regs (void)
1447 {
1448   int j, k, hard_regno, cl, pclass, nregs;
1449 
1450   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1451     for (j = 0; j < NUM_MACHINE_MODES; j++)
1452       for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1453 	{
1454 	  hard_regno = ira_class_hard_regs[cl][k];
1455 	  if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno))
1456 	    continue;
1457 	  nregs = hard_regno_nregs[hard_regno][j];
1458           if (hard_regno + nregs > FIRST_PSEUDO_REGISTER)
1459             {
1460               SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1461                                 hard_regno);
1462                continue;
1463             }
1464 	  pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1465 	  for (nregs-- ;nregs >= 0; nregs--)
1466 	    if (((enum reg_class) pclass
1467 		 != ira_pressure_class_translate[REGNO_REG_CLASS
1468 						 (hard_regno + nregs)]))
1469 	      {
1470 		SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1471 				  hard_regno);
1472 		break;
1473 	      }
1474 	}
1475 }
1476 
1477 
1478 
1479 /* Allocate and initialize IRA_REGISTER_MOVE_COST,
1480    IRA_MAX_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST,
1481    IRA_MAY_MOVE_OUT_COST, IRA_MAX_MAY_MOVE_IN_COST, and
1482    IRA_MAX_MAY_MOVE_OUT_COST for MODE if it is not done yet.  */
1483 void
1484 ira_init_register_move_cost (enum machine_mode mode)
1485 {
1486   int cl1, cl2, cl3;
1487 
1488   ira_assert (ira_register_move_cost[mode] == NULL
1489 	      && ira_max_register_move_cost[mode] == NULL
1490 	      && ira_may_move_in_cost[mode] == NULL
1491 	      && ira_may_move_out_cost[mode] == NULL
1492 	      && ira_max_may_move_in_cost[mode] == NULL
1493 	      && ira_max_may_move_out_cost[mode] == NULL);
1494   if (move_cost[mode] == NULL)
1495     init_move_cost (mode);
1496   ira_register_move_cost[mode] = move_cost[mode];
1497   /* Don't use ira_allocate because the tables exist out of scope of a
1498      IRA call.  */
1499   ira_max_register_move_cost[mode]
1500     = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1501   memcpy (ira_max_register_move_cost[mode], ira_register_move_cost[mode],
1502 	  sizeof (move_table) * N_REG_CLASSES);
1503   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1504     {
1505       /* Some subclasses are to small to have enough registers to hold
1506 	 a value of MODE.  Just ignore them.  */
1507       if (ira_reg_class_max_nregs[cl1][mode] > ira_available_class_regs[cl1])
1508 	continue;
1509       COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl1]);
1510       AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1511       if (hard_reg_set_empty_p (temp_hard_regset))
1512 	continue;
1513       for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1514 	if (hard_reg_set_subset_p (reg_class_contents[cl1],
1515 				   reg_class_contents[cl2]))
1516 	  for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
1517 	    {
1518 	      if (ira_max_register_move_cost[mode][cl2][cl3]
1519 		  < ira_register_move_cost[mode][cl1][cl3])
1520 		ira_max_register_move_cost[mode][cl2][cl3]
1521 		  = ira_register_move_cost[mode][cl1][cl3];
1522 	      if (ira_max_register_move_cost[mode][cl3][cl2]
1523 		  < ira_register_move_cost[mode][cl3][cl1])
1524 		ira_max_register_move_cost[mode][cl3][cl2]
1525 		  = ira_register_move_cost[mode][cl3][cl1];
1526 	    }
1527     }
1528   ira_may_move_in_cost[mode]
1529     = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1530   memcpy (ira_may_move_in_cost[mode], may_move_in_cost[mode],
1531 	  sizeof (move_table) * N_REG_CLASSES);
1532   ira_may_move_out_cost[mode]
1533     = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1534   memcpy (ira_may_move_out_cost[mode], may_move_out_cost[mode],
1535 	  sizeof (move_table) * N_REG_CLASSES);
1536   ira_max_may_move_in_cost[mode]
1537     = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1538   memcpy (ira_max_may_move_in_cost[mode], ira_max_register_move_cost[mode],
1539 	  sizeof (move_table) * N_REG_CLASSES);
1540   ira_max_may_move_out_cost[mode]
1541     = (move_table *) xmalloc (sizeof (move_table) * N_REG_CLASSES);
1542   memcpy (ira_max_may_move_out_cost[mode], ira_max_register_move_cost[mode],
1543 	  sizeof (move_table) * N_REG_CLASSES);
1544   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1545     {
1546       for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1547 	{
1548 	  COPY_HARD_REG_SET (temp_hard_regset, reg_class_contents[cl2]);
1549 	  AND_COMPL_HARD_REG_SET (temp_hard_regset, no_unit_alloc_regs);
1550 	  if (hard_reg_set_empty_p (temp_hard_regset))
1551 	    continue;
1552 	  if (ira_class_subset_p[cl1][cl2])
1553 	    ira_may_move_in_cost[mode][cl1][cl2] = 0;
1554 	  if (ira_class_subset_p[cl2][cl1])
1555 	    ira_may_move_out_cost[mode][cl1][cl2] = 0;
1556 	  if (ira_class_subset_p[cl1][cl2])
1557 	    ira_max_may_move_in_cost[mode][cl1][cl2] = 0;
1558 	  if (ira_class_subset_p[cl2][cl1])
1559 	    ira_max_may_move_out_cost[mode][cl1][cl2] = 0;
1560 	  ira_register_move_cost[mode][cl1][cl2]
1561 	    = ira_max_register_move_cost[mode][cl1][cl2];
1562 	  ira_may_move_in_cost[mode][cl1][cl2]
1563 	    = ira_max_may_move_in_cost[mode][cl1][cl2];
1564 	  ira_may_move_out_cost[mode][cl1][cl2]
1565 	    = ira_max_may_move_out_cost[mode][cl1][cl2];
1566 	}
1567     }
1568 }
1569 
1570 
1571 
1572 /* This is called once during compiler work.  It sets up
1573    different arrays whose values don't depend on the compiled
1574    function.  */
1575 void
1576 ira_init_once (void)
1577 {
1578   int mode;
1579 
1580   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1581     {
1582       ira_register_move_cost[mode] = NULL;
1583       ira_max_register_move_cost[mode] = NULL;
1584       ira_may_move_in_cost[mode] = NULL;
1585       ira_may_move_out_cost[mode] = NULL;
1586       ira_max_may_move_in_cost[mode] = NULL;
1587       ira_max_may_move_out_cost[mode] = NULL;
1588     }
1589   ira_init_costs_once ();
1590 }
1591 
1592 /* Free ira_max_register_move_cost, ira_may_move_in_cost,
1593    ira_may_move_out_cost, ira_max_may_move_in_cost, and
1594    ira_max_may_move_out_cost for each mode.  */
1595 static void
1596 free_register_move_costs (void)
1597 {
1598   int mode;
1599 
1600   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1601     {
1602       free (ira_max_register_move_cost[mode]);
1603       free (ira_may_move_in_cost[mode]);
1604       free (ira_may_move_out_cost[mode]);
1605       free (ira_max_may_move_in_cost[mode]);
1606       free (ira_max_may_move_out_cost[mode]);
1607       ira_register_move_cost[mode] = NULL;
1608       ira_max_register_move_cost[mode] = NULL;
1609       ira_may_move_in_cost[mode] = NULL;
1610       ira_may_move_out_cost[mode] = NULL;
1611       ira_max_may_move_in_cost[mode] = NULL;
1612       ira_max_may_move_out_cost[mode] = NULL;
1613     }
1614 }
1615 
1616 /* This is called every time when register related information is
1617    changed.  */
1618 void
1619 ira_init (void)
1620 {
1621   free_register_move_costs ();
1622   setup_reg_mode_hard_regset ();
1623   setup_alloc_regs (flag_omit_frame_pointer != 0);
1624   setup_class_subset_and_memory_move_costs ();
1625   setup_reg_class_nregs ();
1626   setup_prohibited_class_mode_regs ();
1627   find_reg_classes ();
1628   clarify_prohibited_class_mode_regs ();
1629   setup_hard_regno_aclass ();
1630   ira_init_costs ();
1631 }
1632 
1633 /* Function called once at the end of compiler work.  */
1634 void
1635 ira_finish_once (void)
1636 {
1637   ira_finish_costs_once ();
1638   free_register_move_costs ();
1639 }
1640 
1641 
1642 #define ira_prohibited_mode_move_regs_initialized_p \
1643   (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p)
1644 
1645 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS.  */
1646 static void
1647 setup_prohibited_mode_move_regs (void)
1648 {
1649   int i, j;
1650   rtx test_reg1, test_reg2, move_pat, move_insn;
1651 
1652   if (ira_prohibited_mode_move_regs_initialized_p)
1653     return;
1654   ira_prohibited_mode_move_regs_initialized_p = true;
1655   test_reg1 = gen_rtx_REG (VOIDmode, 0);
1656   test_reg2 = gen_rtx_REG (VOIDmode, 0);
1657   move_pat = gen_rtx_SET (VOIDmode, test_reg1, test_reg2);
1658   move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, 0, move_pat, 0, -1, 0);
1659   for (i = 0; i < NUM_MACHINE_MODES; i++)
1660     {
1661       SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
1662       for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
1663 	{
1664 	  if (! HARD_REGNO_MODE_OK (j, (enum machine_mode) i))
1665 	    continue;
1666 	  SET_REGNO_RAW (test_reg1, j);
1667 	  PUT_MODE (test_reg1, (enum machine_mode) i);
1668 	  SET_REGNO_RAW (test_reg2, j);
1669 	  PUT_MODE (test_reg2, (enum machine_mode) i);
1670 	  INSN_CODE (move_insn) = -1;
1671 	  recog_memoized (move_insn);
1672 	  if (INSN_CODE (move_insn) < 0)
1673 	    continue;
1674 	  extract_insn (move_insn);
1675 	  if (! constrain_operands (1))
1676 	    continue;
1677 	  CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
1678 	}
1679     }
1680 }
1681 
1682 
1683 
1684 /* Return nonzero if REGNO is a particularly bad choice for reloading X.  */
1685 static bool
1686 ira_bad_reload_regno_1 (int regno, rtx x)
1687 {
1688   int x_regno, n, i;
1689   ira_allocno_t a;
1690   enum reg_class pref;
1691 
1692   /* We only deal with pseudo regs.  */
1693   if (! x || GET_CODE (x) != REG)
1694     return false;
1695 
1696   x_regno = REGNO (x);
1697   if (x_regno < FIRST_PSEUDO_REGISTER)
1698     return false;
1699 
1700   /* If the pseudo prefers REGNO explicitly, then do not consider
1701      REGNO a bad spill choice.  */
1702   pref = reg_preferred_class (x_regno);
1703   if (reg_class_size[pref] == 1)
1704     return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno);
1705 
1706   /* If the pseudo conflicts with REGNO, then we consider REGNO a
1707      poor choice for a reload regno.  */
1708   a = ira_regno_allocno_map[x_regno];
1709   n = ALLOCNO_NUM_OBJECTS (a);
1710   for (i = 0; i < n; i++)
1711     {
1712       ira_object_t obj = ALLOCNO_OBJECT (a, i);
1713       if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
1714 	return true;
1715     }
1716   return false;
1717 }
1718 
1719 /* Return nonzero if REGNO is a particularly bad choice for reloading
1720    IN or OUT.  */
1721 bool
1722 ira_bad_reload_regno (int regno, rtx in, rtx out)
1723 {
1724   return (ira_bad_reload_regno_1 (regno, in)
1725 	  || ira_bad_reload_regno_1 (regno, out));
1726 }
1727 
1728 /* Return TRUE if *LOC contains an asm.  */
1729 static int
1730 insn_contains_asm_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
1731 {
1732   if ( !*loc)
1733     return FALSE;
1734   if (GET_CODE (*loc) == ASM_OPERANDS)
1735     return TRUE;
1736   return FALSE;
1737 }
1738 
1739 
1740 /* Return TRUE if INSN contains an ASM.  */
1741 static bool
1742 insn_contains_asm (rtx insn)
1743 {
1744   return for_each_rtx (&insn, insn_contains_asm_1, NULL);
1745 }
1746 
1747 /* Add register clobbers from asm statements.  */
1748 static void
1749 compute_regs_asm_clobbered (void)
1750 {
1751   basic_block bb;
1752 
1753   FOR_EACH_BB (bb)
1754     {
1755       rtx insn;
1756       FOR_BB_INSNS_REVERSE (bb, insn)
1757 	{
1758 	  df_ref *def_rec;
1759 
1760 	  if (insn_contains_asm (insn))
1761 	    for (def_rec = DF_INSN_DEFS (insn); *def_rec; def_rec++)
1762 	      {
1763 		df_ref def = *def_rec;
1764 		unsigned int dregno = DF_REF_REGNO (def);
1765 		if (HARD_REGISTER_NUM_P (dregno))
1766 		  add_to_hard_reg_set (&crtl->asm_clobbers,
1767 				       GET_MODE (DF_REF_REAL_REG (def)),
1768 				       dregno);
1769 	      }
1770 	}
1771     }
1772 }
1773 
1774 
1775 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and REGS_EVER_LIVE.  */
1776 void
1777 ira_setup_eliminable_regset (void)
1778 {
1779 #ifdef ELIMINABLE_REGS
1780   int i;
1781   static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
1782 #endif
1783   /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
1784      sp for alloca.  So we can't eliminate the frame pointer in that
1785      case.  At some point, we should improve this by emitting the
1786      sp-adjusting insns for this case.  */
1787   int need_fp
1788     = (! flag_omit_frame_pointer
1789        || (cfun->calls_alloca && EXIT_IGNORE_STACK)
1790        /* We need the frame pointer to catch stack overflow exceptions
1791 	  if the stack pointer is moving.  */
1792        || (flag_stack_check && STACK_CHECK_MOVING_SP)
1793        || crtl->accesses_prior_frames
1794        || crtl->stack_realign_needed
1795        || targetm.frame_pointer_required ());
1796 
1797   frame_pointer_needed = need_fp;
1798 
1799   COPY_HARD_REG_SET (ira_no_alloc_regs, no_unit_alloc_regs);
1800   CLEAR_HARD_REG_SET (eliminable_regset);
1801 
1802   compute_regs_asm_clobbered ();
1803 
1804   /* Build the regset of all eliminable registers and show we can't
1805      use those that we already know won't be eliminated.  */
1806 #ifdef ELIMINABLE_REGS
1807   for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
1808     {
1809       bool cannot_elim
1810 	= (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
1811 	   || (eliminables[i].to == STACK_POINTER_REGNUM && need_fp));
1812 
1813       if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from))
1814 	{
1815 	    SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
1816 
1817 	    if (cannot_elim)
1818 	      SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
1819 	}
1820       else if (cannot_elim)
1821 	error ("%s cannot be used in asm here",
1822 	       reg_names[eliminables[i].from]);
1823       else
1824 	df_set_regs_ever_live (eliminables[i].from, true);
1825     }
1826 #if !HARD_FRAME_POINTER_IS_FRAME_POINTER
1827   if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
1828     {
1829       SET_HARD_REG_BIT (eliminable_regset, HARD_FRAME_POINTER_REGNUM);
1830       if (need_fp)
1831 	SET_HARD_REG_BIT (ira_no_alloc_regs, HARD_FRAME_POINTER_REGNUM);
1832     }
1833   else if (need_fp)
1834     error ("%s cannot be used in asm here",
1835 	   reg_names[HARD_FRAME_POINTER_REGNUM]);
1836   else
1837     df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM, true);
1838 #endif
1839 
1840 #else
1841   if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, HARD_FRAME_POINTER_REGNUM))
1842     {
1843       SET_HARD_REG_BIT (eliminable_regset, FRAME_POINTER_REGNUM);
1844       if (need_fp)
1845 	SET_HARD_REG_BIT (ira_no_alloc_regs, FRAME_POINTER_REGNUM);
1846     }
1847   else if (need_fp)
1848     error ("%s cannot be used in asm here", reg_names[FRAME_POINTER_REGNUM]);
1849   else
1850     df_set_regs_ever_live (FRAME_POINTER_REGNUM, true);
1851 #endif
1852 }
1853 
1854 
1855 
1856 /* The length of the following two arrays.  */
1857 int ira_reg_equiv_len;
1858 
1859 /* The element value is TRUE if the corresponding regno value is
1860    invariant.  */
1861 bool *ira_reg_equiv_invariant_p;
1862 
1863 /* The element value is equiv constant of given pseudo-register or
1864    NULL_RTX.  */
1865 rtx *ira_reg_equiv_const;
1866 
1867 /* Set up the two arrays declared above.  */
1868 static void
1869 find_reg_equiv_invariant_const (void)
1870 {
1871   unsigned int i;
1872   bool invariant_p;
1873   rtx list, insn, note, constant, x;
1874 
1875   for (i = FIRST_PSEUDO_REGISTER; i < VEC_length (reg_equivs_t, reg_equivs); i++)
1876     {
1877       constant = NULL_RTX;
1878       invariant_p = false;
1879       for (list = reg_equiv_init (i); list != NULL_RTX; list = XEXP (list, 1))
1880 	{
1881 	  insn = XEXP (list, 0);
1882 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
1883 
1884 	  if (note == NULL_RTX)
1885 	    continue;
1886 
1887 	  x = XEXP (note, 0);
1888 
1889 	  if (! CONSTANT_P (x)
1890 	      || ! flag_pic || LEGITIMATE_PIC_OPERAND_P (x))
1891 	    {
1892 	      /* It can happen that a REG_EQUIV note contains a MEM
1893 		 that is not a legitimate memory operand.  As later
1894 		 stages of the reload assume that all addresses found
1895 		 in the reg_equiv_* arrays were originally legitimate,
1896 		 we ignore such REG_EQUIV notes.  */
1897 	      if (memory_operand (x, VOIDmode))
1898 		invariant_p = MEM_READONLY_P (x);
1899 	      else if (function_invariant_p (x))
1900 		{
1901 		  if (GET_CODE (x) == PLUS
1902 		      || x == frame_pointer_rtx || x == arg_pointer_rtx)
1903 		    invariant_p = true;
1904 		  else
1905 		    constant = x;
1906 		}
1907 	    }
1908 	}
1909       ira_reg_equiv_invariant_p[i] = invariant_p;
1910       ira_reg_equiv_const[i] = constant;
1911     }
1912 }
1913 
1914 
1915 
1916 /* Vector of substitutions of register numbers,
1917    used to map pseudo regs into hardware regs.
1918    This is set up as a result of register allocation.
1919    Element N is the hard reg assigned to pseudo reg N,
1920    or is -1 if no hard reg was assigned.
1921    If N is a hard reg number, element N is N.  */
1922 short *reg_renumber;
1923 
1924 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
1925    the allocation found by IRA.  */
1926 static void
1927 setup_reg_renumber (void)
1928 {
1929   int regno, hard_regno;
1930   ira_allocno_t a;
1931   ira_allocno_iterator ai;
1932 
1933   caller_save_needed = 0;
1934   FOR_EACH_ALLOCNO (a, ai)
1935     {
1936       /* There are no caps at this point.  */
1937       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
1938       if (! ALLOCNO_ASSIGNED_P (a))
1939 	/* It can happen if A is not referenced but partially anticipated
1940 	   somewhere in a region.  */
1941 	ALLOCNO_ASSIGNED_P (a) = true;
1942       ira_free_allocno_updated_costs (a);
1943       hard_regno = ALLOCNO_HARD_REGNO (a);
1944       regno = ALLOCNO_REGNO (a);
1945       reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
1946       if (hard_regno >= 0)
1947 	{
1948 	  int i, nwords;
1949 	  enum reg_class pclass;
1950 	  ira_object_t obj;
1951 
1952 	  pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1953 	  nwords = ALLOCNO_NUM_OBJECTS (a);
1954 	  for (i = 0; i < nwords; i++)
1955 	    {
1956 	      obj = ALLOCNO_OBJECT (a, i);
1957 	      IOR_COMPL_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
1958 				      reg_class_contents[pclass]);
1959 	    }
1960 	  if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0
1961 	      && ira_hard_reg_set_intersection_p (hard_regno, ALLOCNO_MODE (a),
1962 						  call_used_reg_set))
1963 	    {
1964 	      ira_assert (!optimize || flag_caller_saves
1965 			  || regno >= ira_reg_equiv_len
1966 			  || ira_reg_equiv_const[regno]
1967 			  || ira_reg_equiv_invariant_p[regno]);
1968 	      caller_save_needed = 1;
1969 	    }
1970 	}
1971     }
1972 }
1973 
1974 /* Set up allocno assignment flags for further allocation
1975    improvements.  */
1976 static void
1977 setup_allocno_assignment_flags (void)
1978 {
1979   int hard_regno;
1980   ira_allocno_t a;
1981   ira_allocno_iterator ai;
1982 
1983   FOR_EACH_ALLOCNO (a, ai)
1984     {
1985       if (! ALLOCNO_ASSIGNED_P (a))
1986 	/* It can happen if A is not referenced but partially anticipated
1987 	   somewhere in a region.  */
1988 	ira_free_allocno_updated_costs (a);
1989       hard_regno = ALLOCNO_HARD_REGNO (a);
1990       /* Don't assign hard registers to allocnos which are destination
1991 	 of removed store at the end of loop.  It has no sense to keep
1992 	 the same value in different hard registers.  It is also
1993 	 impossible to assign hard registers correctly to such
1994 	 allocnos because the cost info and info about intersected
1995 	 calls are incorrect for them.  */
1996       ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0
1997 				|| ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p
1998 				|| (ALLOCNO_MEMORY_COST (a)
1999 				    - ALLOCNO_CLASS_COST (a)) < 0);
2000       ira_assert
2001 	(hard_regno < 0
2002 	 || ira_hard_reg_in_set_p (hard_regno, ALLOCNO_MODE (a),
2003 				   reg_class_contents[ALLOCNO_CLASS (a)]));
2004     }
2005 }
2006 
2007 /* Evaluate overall allocation cost and the costs for using hard
2008    registers and memory for allocnos.  */
2009 static void
2010 calculate_allocation_cost (void)
2011 {
2012   int hard_regno, cost;
2013   ira_allocno_t a;
2014   ira_allocno_iterator ai;
2015 
2016   ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
2017   FOR_EACH_ALLOCNO (a, ai)
2018     {
2019       hard_regno = ALLOCNO_HARD_REGNO (a);
2020       ira_assert (hard_regno < 0
2021 		  || (ira_hard_reg_in_set_p
2022 		      (hard_regno, ALLOCNO_MODE (a),
2023 		       reg_class_contents[ALLOCNO_CLASS (a)])));
2024       if (hard_regno < 0)
2025 	{
2026 	  cost = ALLOCNO_MEMORY_COST (a);
2027 	  ira_mem_cost += cost;
2028 	}
2029       else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
2030 	{
2031 	  cost = (ALLOCNO_HARD_REG_COSTS (a)
2032 		  [ira_class_hard_reg_index
2033 		   [ALLOCNO_CLASS (a)][hard_regno]]);
2034 	  ira_reg_cost += cost;
2035 	}
2036       else
2037 	{
2038 	  cost = ALLOCNO_CLASS_COST (a);
2039 	  ira_reg_cost += cost;
2040 	}
2041       ira_overall_cost += cost;
2042     }
2043 
2044   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2045     {
2046       fprintf (ira_dump_file,
2047 	       "+++Costs: overall %d, reg %d, mem %d, ld %d, st %d, move %d\n",
2048 	       ira_overall_cost, ira_reg_cost, ira_mem_cost,
2049 	       ira_load_cost, ira_store_cost, ira_shuffle_cost);
2050       fprintf (ira_dump_file, "+++       move loops %d, new jumps %d\n",
2051 	       ira_move_loops_num, ira_additional_jumps_num);
2052     }
2053 
2054 }
2055 
2056 #ifdef ENABLE_IRA_CHECKING
2057 /* Check the correctness of the allocation.  We do need this because
2058    of complicated code to transform more one region internal
2059    representation into one region representation.  */
2060 static void
2061 check_allocation (void)
2062 {
2063   ira_allocno_t a;
2064   int hard_regno, nregs, conflict_nregs;
2065   ira_allocno_iterator ai;
2066 
2067   FOR_EACH_ALLOCNO (a, ai)
2068     {
2069       int n = ALLOCNO_NUM_OBJECTS (a);
2070       int i;
2071 
2072       if (ALLOCNO_CAP_MEMBER (a) != NULL
2073 	  || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
2074 	continue;
2075       nregs = hard_regno_nregs[hard_regno][ALLOCNO_MODE (a)];
2076       if (nregs == 1)
2077 	/* We allocated a single hard register.  */
2078 	n = 1;
2079       else if (n > 1)
2080 	/* We allocated multiple hard registers, and we will test
2081 	   conflicts in a granularity of single hard regs.  */
2082 	nregs = 1;
2083 
2084       for (i = 0; i < n; i++)
2085 	{
2086 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
2087 	  ira_object_t conflict_obj;
2088 	  ira_object_conflict_iterator oci;
2089 	  int this_regno = hard_regno;
2090 	  if (n > 1)
2091 	    {
2092 	      if (REG_WORDS_BIG_ENDIAN)
2093 		this_regno += n - i - 1;
2094 	      else
2095 		this_regno += i;
2096 	    }
2097 	  FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2098 	    {
2099 	      ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2100 	      int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
2101 	      if (conflict_hard_regno < 0)
2102 		continue;
2103 
2104 	      conflict_nregs
2105 		= (hard_regno_nregs
2106 		   [conflict_hard_regno][ALLOCNO_MODE (conflict_a)]);
2107 
2108 	      if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1
2109 		  && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a))
2110 		{
2111 		  if (REG_WORDS_BIG_ENDIAN)
2112 		    conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a)
2113 					    - OBJECT_SUBWORD (conflict_obj) - 1);
2114 		  else
2115 		    conflict_hard_regno += OBJECT_SUBWORD (conflict_obj);
2116 		  conflict_nregs = 1;
2117 		}
2118 
2119 	      if ((conflict_hard_regno <= this_regno
2120 		 && this_regno < conflict_hard_regno + conflict_nregs)
2121 		|| (this_regno <= conflict_hard_regno
2122 		    && conflict_hard_regno < this_regno + nregs))
2123 		{
2124 		  fprintf (stderr, "bad allocation for %d and %d\n",
2125 			   ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
2126 		  gcc_unreachable ();
2127 		}
2128 	    }
2129 	}
2130     }
2131 }
2132 #endif
2133 
2134 /* Fix values of array REG_EQUIV_INIT after live range splitting done
2135    by IRA.  */
2136 static void
2137 fix_reg_equiv_init (void)
2138 {
2139   unsigned int max_regno = max_reg_num ();
2140   int i, new_regno, max;
2141   rtx x, prev, next, insn, set;
2142 
2143   if (VEC_length (reg_equivs_t, reg_equivs) < max_regno)
2144     {
2145       max = VEC_length (reg_equivs_t, reg_equivs);
2146       grow_reg_equivs ();
2147       for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
2148 	for (prev = NULL_RTX, x = reg_equiv_init (i);
2149 	     x != NULL_RTX;
2150 	     x = next)
2151 	  {
2152 	    next = XEXP (x, 1);
2153 	    insn = XEXP (x, 0);
2154 	    set = single_set (insn);
2155 	    ira_assert (set != NULL_RTX
2156 			&& (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
2157 	    if (REG_P (SET_DEST (set))
2158 		&& ((int) REGNO (SET_DEST (set)) == i
2159 		    || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
2160 	      new_regno = REGNO (SET_DEST (set));
2161 	    else if (REG_P (SET_SRC (set))
2162 		     && ((int) REGNO (SET_SRC (set)) == i
2163 			 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
2164 	      new_regno = REGNO (SET_SRC (set));
2165 	    else
2166  	      gcc_unreachable ();
2167 	    if (new_regno == i)
2168 	      prev = x;
2169 	    else
2170 	      {
2171 		if (prev == NULL_RTX)
2172 		  reg_equiv_init (i) = next;
2173 		else
2174 		  XEXP (prev, 1) = next;
2175 		XEXP (x, 1) = reg_equiv_init (new_regno);
2176 		reg_equiv_init (new_regno) = x;
2177 	      }
2178 	  }
2179     }
2180 }
2181 
2182 #ifdef ENABLE_IRA_CHECKING
2183 /* Print redundant memory-memory copies.  */
2184 static void
2185 print_redundant_copies (void)
2186 {
2187   int hard_regno;
2188   ira_allocno_t a;
2189   ira_copy_t cp, next_cp;
2190   ira_allocno_iterator ai;
2191 
2192   FOR_EACH_ALLOCNO (a, ai)
2193     {
2194       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2195 	/* It is a cap. */
2196 	continue;
2197       hard_regno = ALLOCNO_HARD_REGNO (a);
2198       if (hard_regno >= 0)
2199 	continue;
2200       for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2201 	if (cp->first == a)
2202 	  next_cp = cp->next_first_allocno_copy;
2203 	else
2204 	  {
2205 	    next_cp = cp->next_second_allocno_copy;
2206 	    if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL
2207 		&& cp->insn != NULL_RTX
2208 		&& ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
2209 	      fprintf (ira_dump_file,
2210 		       "        Redundant move from %d(freq %d):%d\n",
2211 		       INSN_UID (cp->insn), cp->freq, hard_regno);
2212 	  }
2213     }
2214 }
2215 #endif
2216 
2217 /* Setup preferred and alternative classes for new pseudo-registers
2218    created by IRA starting with START.  */
2219 static void
2220 setup_preferred_alternate_classes_for_new_pseudos (int start)
2221 {
2222   int i, old_regno;
2223   int max_regno = max_reg_num ();
2224 
2225   for (i = start; i < max_regno; i++)
2226     {
2227       old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
2228       ira_assert (i != old_regno);
2229       setup_reg_classes (i, reg_preferred_class (old_regno),
2230 			 reg_alternate_class (old_regno),
2231 			 reg_allocno_class (old_regno));
2232       if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2233 	fprintf (ira_dump_file,
2234 		 "    New r%d: setting preferred %s, alternative %s\n",
2235 		 i, reg_class_names[reg_preferred_class (old_regno)],
2236 		 reg_class_names[reg_alternate_class (old_regno)]);
2237     }
2238 }
2239 
2240 
2241 
2242 /* Regional allocation can create new pseudo-registers.  This function
2243    expands some arrays for pseudo-registers.  */
2244 static void
2245 expand_reg_info (int old_size)
2246 {
2247   int i;
2248   int size = max_reg_num ();
2249 
2250   resize_reg_info ();
2251   for (i = old_size; i < size; i++)
2252     setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
2253 }
2254 
2255 /* Return TRUE if there is too high register pressure in the function.
2256    It is used to decide when stack slot sharing is worth to do.  */
2257 static bool
2258 too_high_register_pressure_p (void)
2259 {
2260   int i;
2261   enum reg_class pclass;
2262 
2263   for (i = 0; i < ira_pressure_classes_num; i++)
2264     {
2265       pclass = ira_pressure_classes[i];
2266       if (ira_loop_tree_root->reg_pressure[pclass] > 10000)
2267 	return true;
2268     }
2269   return false;
2270 }
2271 
2272 
2273 
2274 /* Indicate that hard register number FROM was eliminated and replaced with
2275    an offset from hard register number TO.  The status of hard registers live
2276    at the start of a basic block is updated by replacing a use of FROM with
2277    a use of TO.  */
2278 
2279 void
2280 mark_elimination (int from, int to)
2281 {
2282   basic_block bb;
2283 
2284   FOR_EACH_BB (bb)
2285     {
2286       /* We don't use LIVE info in IRA.  */
2287       bitmap r = DF_LR_IN (bb);
2288 
2289       if (REGNO_REG_SET_P (r, from))
2290 	{
2291 	  CLEAR_REGNO_REG_SET (r, from);
2292 	  SET_REGNO_REG_SET (r, to);
2293 	}
2294     }
2295 }
2296 
2297 
2298 
2299 struct equivalence
2300 {
2301   /* Set when a REG_EQUIV note is found or created.  Use to
2302      keep track of what memory accesses might be created later,
2303      e.g. by reload.  */
2304   rtx replacement;
2305   rtx *src_p;
2306   /* The list of each instruction which initializes this register.  */
2307   rtx init_insns;
2308   /* Loop depth is used to recognize equivalences which appear
2309      to be present within the same loop (or in an inner loop).  */
2310   int loop_depth;
2311   /* Nonzero if this had a preexisting REG_EQUIV note.  */
2312   int is_arg_equivalence;
2313   /* Set when an attempt should be made to replace a register
2314      with the associated src_p entry.  */
2315   char replace;
2316 };
2317 
2318 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
2319    structure for that register.  */
2320 static struct equivalence *reg_equiv;
2321 
2322 /* Used for communication between the following two functions: contains
2323    a MEM that we wish to ensure remains unchanged.  */
2324 static rtx equiv_mem;
2325 
2326 /* Set nonzero if EQUIV_MEM is modified.  */
2327 static int equiv_mem_modified;
2328 
2329 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
2330    Called via note_stores.  */
2331 static void
2332 validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
2333 			       void *data ATTRIBUTE_UNUSED)
2334 {
2335   if ((REG_P (dest)
2336        && reg_overlap_mentioned_p (dest, equiv_mem))
2337       || (MEM_P (dest)
2338 	  && true_dependence (dest, VOIDmode, equiv_mem)))
2339     equiv_mem_modified = 1;
2340 }
2341 
2342 /* Verify that no store between START and the death of REG invalidates
2343    MEMREF.  MEMREF is invalidated by modifying a register used in MEMREF,
2344    by storing into an overlapping memory location, or with a non-const
2345    CALL_INSN.
2346 
2347    Return 1 if MEMREF remains valid.  */
2348 static int
2349 validate_equiv_mem (rtx start, rtx reg, rtx memref)
2350 {
2351   rtx insn;
2352   rtx note;
2353 
2354   equiv_mem = memref;
2355   equiv_mem_modified = 0;
2356 
2357   /* If the memory reference has side effects or is volatile, it isn't a
2358      valid equivalence.  */
2359   if (side_effects_p (memref))
2360     return 0;
2361 
2362   for (insn = start; insn && ! equiv_mem_modified; insn = NEXT_INSN (insn))
2363     {
2364       if (! INSN_P (insn))
2365 	continue;
2366 
2367       if (find_reg_note (insn, REG_DEAD, reg))
2368 	return 1;
2369 
2370       /* This used to ignore readonly memory and const/pure calls.  The problem
2371 	 is the equivalent form may reference a pseudo which gets assigned a
2372 	 call clobbered hard reg.  When we later replace REG with its
2373 	 equivalent form, the value in the call-clobbered reg has been
2374 	 changed and all hell breaks loose.  */
2375       if (CALL_P (insn))
2376 	return 0;
2377 
2378       note_stores (PATTERN (insn), validate_equiv_mem_from_store, NULL);
2379 
2380       /* If a register mentioned in MEMREF is modified via an
2381 	 auto-increment, we lose the equivalence.  Do the same if one
2382 	 dies; although we could extend the life, it doesn't seem worth
2383 	 the trouble.  */
2384 
2385       for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2386 	if ((REG_NOTE_KIND (note) == REG_INC
2387 	     || REG_NOTE_KIND (note) == REG_DEAD)
2388 	    && REG_P (XEXP (note, 0))
2389 	    && reg_overlap_mentioned_p (XEXP (note, 0), memref))
2390 	  return 0;
2391     }
2392 
2393   return 0;
2394 }
2395 
2396 /* Returns zero if X is known to be invariant.  */
2397 static int
2398 equiv_init_varies_p (rtx x)
2399 {
2400   RTX_CODE code = GET_CODE (x);
2401   int i;
2402   const char *fmt;
2403 
2404   switch (code)
2405     {
2406     case MEM:
2407       return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
2408 
2409     case CONST:
2410     case CONST_INT:
2411     case CONST_DOUBLE:
2412     case CONST_FIXED:
2413     case CONST_VECTOR:
2414     case SYMBOL_REF:
2415     case LABEL_REF:
2416       return 0;
2417 
2418     case REG:
2419       return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
2420 
2421     case ASM_OPERANDS:
2422       if (MEM_VOLATILE_P (x))
2423 	return 1;
2424 
2425       /* Fall through.  */
2426 
2427     default:
2428       break;
2429     }
2430 
2431   fmt = GET_RTX_FORMAT (code);
2432   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2433     if (fmt[i] == 'e')
2434       {
2435 	if (equiv_init_varies_p (XEXP (x, i)))
2436 	  return 1;
2437       }
2438     else if (fmt[i] == 'E')
2439       {
2440 	int j;
2441 	for (j = 0; j < XVECLEN (x, i); j++)
2442 	  if (equiv_init_varies_p (XVECEXP (x, i, j)))
2443 	    return 1;
2444       }
2445 
2446   return 0;
2447 }
2448 
2449 /* Returns nonzero if X (used to initialize register REGNO) is movable.
2450    X is only movable if the registers it uses have equivalent initializations
2451    which appear to be within the same loop (or in an inner loop) and movable
2452    or if they are not candidates for local_alloc and don't vary.  */
2453 static int
2454 equiv_init_movable_p (rtx x, int regno)
2455 {
2456   int i, j;
2457   const char *fmt;
2458   enum rtx_code code = GET_CODE (x);
2459 
2460   switch (code)
2461     {
2462     case SET:
2463       return equiv_init_movable_p (SET_SRC (x), regno);
2464 
2465     case CC0:
2466     case CLOBBER:
2467       return 0;
2468 
2469     case PRE_INC:
2470     case PRE_DEC:
2471     case POST_INC:
2472     case POST_DEC:
2473     case PRE_MODIFY:
2474     case POST_MODIFY:
2475       return 0;
2476 
2477     case REG:
2478       return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
2479 	       && reg_equiv[REGNO (x)].replace)
2480 	      || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS
2481 		  && ! rtx_varies_p (x, 0)));
2482 
2483     case UNSPEC_VOLATILE:
2484       return 0;
2485 
2486     case ASM_OPERANDS:
2487       if (MEM_VOLATILE_P (x))
2488 	return 0;
2489 
2490       /* Fall through.  */
2491 
2492     default:
2493       break;
2494     }
2495 
2496   fmt = GET_RTX_FORMAT (code);
2497   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2498     switch (fmt[i])
2499       {
2500       case 'e':
2501 	if (! equiv_init_movable_p (XEXP (x, i), regno))
2502 	  return 0;
2503 	break;
2504       case 'E':
2505 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2506 	  if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
2507 	    return 0;
2508 	break;
2509       }
2510 
2511   return 1;
2512 }
2513 
2514 /* TRUE if X uses any registers for which reg_equiv[REGNO].replace is
2515    true.  */
2516 static int
2517 contains_replace_regs (rtx x)
2518 {
2519   int i, j;
2520   const char *fmt;
2521   enum rtx_code code = GET_CODE (x);
2522 
2523   switch (code)
2524     {
2525     case CONST_INT:
2526     case CONST:
2527     case LABEL_REF:
2528     case SYMBOL_REF:
2529     case CONST_DOUBLE:
2530     case CONST_FIXED:
2531     case CONST_VECTOR:
2532     case PC:
2533     case CC0:
2534     case HIGH:
2535       return 0;
2536 
2537     case REG:
2538       return reg_equiv[REGNO (x)].replace;
2539 
2540     default:
2541       break;
2542     }
2543 
2544   fmt = GET_RTX_FORMAT (code);
2545   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2546     switch (fmt[i])
2547       {
2548       case 'e':
2549 	if (contains_replace_regs (XEXP (x, i)))
2550 	  return 1;
2551 	break;
2552       case 'E':
2553 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2554 	  if (contains_replace_regs (XVECEXP (x, i, j)))
2555 	    return 1;
2556 	break;
2557       }
2558 
2559   return 0;
2560 }
2561 
2562 /* TRUE if X references a memory location that would be affected by a store
2563    to MEMREF.  */
2564 static int
2565 memref_referenced_p (rtx memref, rtx x)
2566 {
2567   int i, j;
2568   const char *fmt;
2569   enum rtx_code code = GET_CODE (x);
2570 
2571   switch (code)
2572     {
2573     case CONST_INT:
2574     case CONST:
2575     case LABEL_REF:
2576     case SYMBOL_REF:
2577     case CONST_DOUBLE:
2578     case CONST_FIXED:
2579     case CONST_VECTOR:
2580     case PC:
2581     case CC0:
2582     case HIGH:
2583     case LO_SUM:
2584       return 0;
2585 
2586     case REG:
2587       return (reg_equiv[REGNO (x)].replacement
2588 	      && memref_referenced_p (memref,
2589 				      reg_equiv[REGNO (x)].replacement));
2590 
2591     case MEM:
2592       if (true_dependence (memref, VOIDmode, x))
2593 	return 1;
2594       break;
2595 
2596     case SET:
2597       /* If we are setting a MEM, it doesn't count (its address does), but any
2598 	 other SET_DEST that has a MEM in it is referencing the MEM.  */
2599       if (MEM_P (SET_DEST (x)))
2600 	{
2601 	  if (memref_referenced_p (memref, XEXP (SET_DEST (x), 0)))
2602 	    return 1;
2603 	}
2604       else if (memref_referenced_p (memref, SET_DEST (x)))
2605 	return 1;
2606 
2607       return memref_referenced_p (memref, SET_SRC (x));
2608 
2609     default:
2610       break;
2611     }
2612 
2613   fmt = GET_RTX_FORMAT (code);
2614   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2615     switch (fmt[i])
2616       {
2617       case 'e':
2618 	if (memref_referenced_p (memref, XEXP (x, i)))
2619 	  return 1;
2620 	break;
2621       case 'E':
2622 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2623 	  if (memref_referenced_p (memref, XVECEXP (x, i, j)))
2624 	    return 1;
2625 	break;
2626       }
2627 
2628   return 0;
2629 }
2630 
2631 /* TRUE if some insn in the range (START, END] references a memory location
2632    that would be affected by a store to MEMREF.  */
2633 static int
2634 memref_used_between_p (rtx memref, rtx start, rtx end)
2635 {
2636   rtx insn;
2637 
2638   for (insn = NEXT_INSN (start); insn != NEXT_INSN (end);
2639        insn = NEXT_INSN (insn))
2640     {
2641       if (!NONDEBUG_INSN_P (insn))
2642 	continue;
2643 
2644       if (memref_referenced_p (memref, PATTERN (insn)))
2645 	return 1;
2646 
2647       /* Nonconst functions may access memory.  */
2648       if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
2649 	return 1;
2650     }
2651 
2652   return 0;
2653 }
2654 
2655 /* Mark REG as having no known equivalence.
2656    Some instructions might have been processed before and furnished
2657    with REG_EQUIV notes for this register; these notes will have to be
2658    removed.
2659    STORE is the piece of RTL that does the non-constant / conflicting
2660    assignment - a SET, CLOBBER or REG_INC note.  It is currently not used,
2661    but needs to be there because this function is called from note_stores.  */
2662 static void
2663 no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED,
2664 	  void *data ATTRIBUTE_UNUSED)
2665 {
2666   int regno;
2667   rtx list;
2668 
2669   if (!REG_P (reg))
2670     return;
2671   regno = REGNO (reg);
2672   list = reg_equiv[regno].init_insns;
2673   if (list == const0_rtx)
2674     return;
2675   reg_equiv[regno].init_insns = const0_rtx;
2676   reg_equiv[regno].replacement = NULL_RTX;
2677   /* This doesn't matter for equivalences made for argument registers, we
2678      should keep their initialization insns.  */
2679   if (reg_equiv[regno].is_arg_equivalence)
2680     return;
2681   reg_equiv_init (regno) = NULL_RTX;
2682   for (; list; list =  XEXP (list, 1))
2683     {
2684       rtx insn = XEXP (list, 0);
2685       remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
2686     }
2687 }
2688 
2689 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
2690    equivalent replacement.  */
2691 
2692 static rtx
2693 adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data)
2694 {
2695   if (REG_P (loc))
2696     {
2697       bitmap cleared_regs = (bitmap) data;
2698       if (bitmap_bit_p (cleared_regs, REGNO (loc)))
2699 	return simplify_replace_fn_rtx (*reg_equiv[REGNO (loc)].src_p,
2700 					NULL_RTX, adjust_cleared_regs, data);
2701     }
2702   return NULL_RTX;
2703 }
2704 
2705 /* Nonzero if we recorded an equivalence for a LABEL_REF.  */
2706 static int recorded_label_ref;
2707 
2708 /* Find registers that are equivalent to a single value throughout the
2709    compilation (either because they can be referenced in memory or are
2710    set once from a single constant).  Lower their priority for a
2711    register.
2712 
2713    If such a register is only referenced once, try substituting its
2714    value into the using insn.  If it succeeds, we can eliminate the
2715    register completely.
2716 
2717    Initialize the REG_EQUIV_INIT array of initializing insns.
2718 
2719    Return non-zero if jump label rebuilding should be done.  */
2720 static int
2721 update_equiv_regs (void)
2722 {
2723   rtx insn;
2724   basic_block bb;
2725   int loop_depth;
2726   bitmap cleared_regs;
2727 
2728   /* We need to keep track of whether or not we recorded a LABEL_REF so
2729      that we know if the jump optimizer needs to be rerun.  */
2730   recorded_label_ref = 0;
2731 
2732   reg_equiv = XCNEWVEC (struct equivalence, max_regno);
2733   grow_reg_equivs ();
2734 
2735   init_alias_analysis ();
2736 
2737   /* Scan the insns and find which registers have equivalences.  Do this
2738      in a separate scan of the insns because (due to -fcse-follow-jumps)
2739      a register can be set below its use.  */
2740   FOR_EACH_BB (bb)
2741     {
2742       loop_depth = bb->loop_depth;
2743 
2744       for (insn = BB_HEAD (bb);
2745 	   insn != NEXT_INSN (BB_END (bb));
2746 	   insn = NEXT_INSN (insn))
2747 	{
2748 	  rtx note;
2749 	  rtx set;
2750 	  rtx dest, src;
2751 	  int regno;
2752 
2753 	  if (! INSN_P (insn))
2754 	    continue;
2755 
2756 	  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2757 	    if (REG_NOTE_KIND (note) == REG_INC)
2758 	      no_equiv (XEXP (note, 0), note, NULL);
2759 
2760 	  set = single_set (insn);
2761 
2762 	  /* If this insn contains more (or less) than a single SET,
2763 	     only mark all destinations as having no known equivalence.  */
2764 	  if (set == 0)
2765 	    {
2766 	      note_stores (PATTERN (insn), no_equiv, NULL);
2767 	      continue;
2768 	    }
2769 	  else if (GET_CODE (PATTERN (insn)) == PARALLEL)
2770 	    {
2771 	      int i;
2772 
2773 	      for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
2774 		{
2775 		  rtx part = XVECEXP (PATTERN (insn), 0, i);
2776 		  if (part != set)
2777 		    note_stores (part, no_equiv, NULL);
2778 		}
2779 	    }
2780 
2781 	  dest = SET_DEST (set);
2782 	  src = SET_SRC (set);
2783 
2784 	  /* See if this is setting up the equivalence between an argument
2785 	     register and its stack slot.  */
2786 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
2787 	  if (note)
2788 	    {
2789 	      gcc_assert (REG_P (dest));
2790 	      regno = REGNO (dest);
2791 
2792 	      /* Note that we don't want to clear reg_equiv_init even if there
2793 		 are multiple sets of this register.  */
2794 	      reg_equiv[regno].is_arg_equivalence = 1;
2795 
2796 	      /* Record for reload that this is an equivalencing insn.  */
2797 	      if (rtx_equal_p (src, XEXP (note, 0)))
2798 		reg_equiv_init (regno)
2799 		  = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
2800 
2801 	      /* Continue normally in case this is a candidate for
2802 		 replacements.  */
2803 	    }
2804 
2805 	  if (!optimize)
2806 	    continue;
2807 
2808 	  /* We only handle the case of a pseudo register being set
2809 	     once, or always to the same value.  */
2810 	  /* ??? The mn10200 port breaks if we add equivalences for
2811 	     values that need an ADDRESS_REGS register and set them equivalent
2812 	     to a MEM of a pseudo.  The actual problem is in the over-conservative
2813 	     handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
2814 	     calculate_needs, but we traditionally work around this problem
2815 	     here by rejecting equivalences when the destination is in a register
2816 	     that's likely spilled.  This is fragile, of course, since the
2817 	     preferred class of a pseudo depends on all instructions that set
2818 	     or use it.  */
2819 
2820 	  if (!REG_P (dest)
2821 	      || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
2822 	      || reg_equiv[regno].init_insns == const0_rtx
2823 	      || (targetm.class_likely_spilled_p (reg_preferred_class (regno))
2824 		  && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
2825 	    {
2826 	      /* This might be setting a SUBREG of a pseudo, a pseudo that is
2827 		 also set somewhere else to a constant.  */
2828 	      note_stores (set, no_equiv, NULL);
2829 	      continue;
2830 	    }
2831 
2832 	  note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
2833 
2834 	  /* cse sometimes generates function invariants, but doesn't put a
2835 	     REG_EQUAL note on the insn.  Since this note would be redundant,
2836 	     there's no point creating it earlier than here.  */
2837 	  if (! note && ! rtx_varies_p (src, 0))
2838 	    note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
2839 
2840 	  /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
2841 	     since it represents a function call */
2842 	  if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
2843 	    note = NULL_RTX;
2844 
2845 	  if (DF_REG_DEF_COUNT (regno) != 1
2846 	      && (! note
2847 		  || rtx_varies_p (XEXP (note, 0), 0)
2848 		  || (reg_equiv[regno].replacement
2849 		      && ! rtx_equal_p (XEXP (note, 0),
2850 					reg_equiv[regno].replacement))))
2851 	    {
2852 	      no_equiv (dest, set, NULL);
2853 	      continue;
2854 	    }
2855 	  /* Record this insn as initializing this register.  */
2856 	  reg_equiv[regno].init_insns
2857 	    = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
2858 
2859 	  /* If this register is known to be equal to a constant, record that
2860 	     it is always equivalent to the constant.  */
2861 	  if (DF_REG_DEF_COUNT (regno) == 1
2862 	      && note && ! rtx_varies_p (XEXP (note, 0), 0))
2863 	    {
2864 	      rtx note_value = XEXP (note, 0);
2865 	      remove_note (insn, note);
2866 	      set_unique_reg_note (insn, REG_EQUIV, note_value);
2867 	    }
2868 
2869 	  /* If this insn introduces a "constant" register, decrease the priority
2870 	     of that register.  Record this insn if the register is only used once
2871 	     more and the equivalence value is the same as our source.
2872 
2873 	     The latter condition is checked for two reasons:  First, it is an
2874 	     indication that it may be more efficient to actually emit the insn
2875 	     as written (if no registers are available, reload will substitute
2876 	     the equivalence).  Secondly, it avoids problems with any registers
2877 	     dying in this insn whose death notes would be missed.
2878 
2879 	     If we don't have a REG_EQUIV note, see if this insn is loading
2880 	     a register used only in one basic block from a MEM.  If so, and the
2881 	     MEM remains unchanged for the life of the register, add a REG_EQUIV
2882 	     note.  */
2883 
2884 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
2885 
2886 	  if (note == 0 && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
2887 	      && MEM_P (SET_SRC (set))
2888 	      && validate_equiv_mem (insn, dest, SET_SRC (set)))
2889 	    note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (SET_SRC (set)));
2890 
2891 	  if (note)
2892 	    {
2893 	      int regno = REGNO (dest);
2894 	      rtx x = XEXP (note, 0);
2895 
2896 	      /* If we haven't done so, record for reload that this is an
2897 		 equivalencing insn.  */
2898 	      if (!reg_equiv[regno].is_arg_equivalence)
2899 		reg_equiv_init (regno)
2900 		  = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv_init (regno));
2901 
2902 	      /* Record whether or not we created a REG_EQUIV note for a LABEL_REF.
2903 		 We might end up substituting the LABEL_REF for uses of the
2904 		 pseudo here or later.  That kind of transformation may turn an
2905 		 indirect jump into a direct jump, in which case we must rerun the
2906 		 jump optimizer to ensure that the JUMP_LABEL fields are valid.  */
2907 	      if (GET_CODE (x) == LABEL_REF
2908 		  || (GET_CODE (x) == CONST
2909 		      && GET_CODE (XEXP (x, 0)) == PLUS
2910 		      && (GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF)))
2911 		recorded_label_ref = 1;
2912 
2913 	      reg_equiv[regno].replacement = x;
2914 	      reg_equiv[regno].src_p = &SET_SRC (set);
2915 	      reg_equiv[regno].loop_depth = loop_depth;
2916 
2917 	      /* Don't mess with things live during setjmp.  */
2918 	      if (REG_LIVE_LENGTH (regno) >= 0 && optimize)
2919 		{
2920 		  /* Note that the statement below does not affect the priority
2921 		     in local-alloc!  */
2922 		  REG_LIVE_LENGTH (regno) *= 2;
2923 
2924 		  /* If the register is referenced exactly twice, meaning it is
2925 		     set once and used once, indicate that the reference may be
2926 		     replaced by the equivalence we computed above.  Do this
2927 		     even if the register is only used in one block so that
2928 		     dependencies can be handled where the last register is
2929 		     used in a different block (i.e. HIGH / LO_SUM sequences)
2930 		     and to reduce the number of registers alive across
2931 		     calls.  */
2932 
2933 		  if (REG_N_REFS (regno) == 2
2934 		      && (rtx_equal_p (x, src)
2935 			  || ! equiv_init_varies_p (src))
2936 		      && NONJUMP_INSN_P (insn)
2937 		      && equiv_init_movable_p (PATTERN (insn), regno))
2938 		    reg_equiv[regno].replace = 1;
2939 		}
2940 	    }
2941 	}
2942     }
2943 
2944   if (!optimize)
2945     goto out;
2946 
2947   /* A second pass, to gather additional equivalences with memory.  This needs
2948      to be done after we know which registers we are going to replace.  */
2949 
2950   for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2951     {
2952       rtx set, src, dest;
2953       unsigned regno;
2954 
2955       if (! INSN_P (insn))
2956 	continue;
2957 
2958       set = single_set (insn);
2959       if (! set)
2960 	continue;
2961 
2962       dest = SET_DEST (set);
2963       src = SET_SRC (set);
2964 
2965       /* If this sets a MEM to the contents of a REG that is only used
2966 	 in a single basic block, see if the register is always equivalent
2967 	 to that memory location and if moving the store from INSN to the
2968 	 insn that set REG is safe.  If so, put a REG_EQUIV note on the
2969 	 initializing insn.
2970 
2971 	 Don't add a REG_EQUIV note if the insn already has one.  The existing
2972 	 REG_EQUIV is likely more useful than the one we are adding.
2973 
2974 	 If one of the regs in the address has reg_equiv[REGNO].replace set,
2975 	 then we can't add this REG_EQUIV note.  The reg_equiv[REGNO].replace
2976 	 optimization may move the set of this register immediately before
2977 	 insn, which puts it after reg_equiv[REGNO].init_insns, and hence
2978 	 the mention in the REG_EQUIV note would be to an uninitialized
2979 	 pseudo.  */
2980 
2981       if (MEM_P (dest) && REG_P (src)
2982 	  && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
2983 	  && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
2984 	  && DF_REG_DEF_COUNT (regno) == 1
2985 	  && reg_equiv[regno].init_insns != 0
2986 	  && reg_equiv[regno].init_insns != const0_rtx
2987 	  && ! find_reg_note (XEXP (reg_equiv[regno].init_insns, 0),
2988 			      REG_EQUIV, NULL_RTX)
2989 	  && ! contains_replace_regs (XEXP (dest, 0)))
2990 	{
2991 	  rtx init_insn = XEXP (reg_equiv[regno].init_insns, 0);
2992 	  if (validate_equiv_mem (init_insn, src, dest)
2993 	      && ! memref_used_between_p (dest, init_insn, insn)
2994 	      /* Attaching a REG_EQUIV note will fail if INIT_INSN has
2995 		 multiple sets.  */
2996 	      && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
2997 	    {
2998 	      /* This insn makes the equivalence, not the one initializing
2999 		 the register.  */
3000 	      reg_equiv_init (regno)
3001 		= gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
3002 	      df_notes_rescan (init_insn);
3003 	    }
3004 	}
3005     }
3006 
3007   cleared_regs = BITMAP_ALLOC (NULL);
3008   /* Now scan all regs killed in an insn to see if any of them are
3009      registers only used that once.  If so, see if we can replace the
3010      reference with the equivalent form.  If we can, delete the
3011      initializing reference and this register will go away.  If we
3012      can't replace the reference, and the initializing reference is
3013      within the same loop (or in an inner loop), then move the register
3014      initialization just before the use, so that they are in the same
3015      basic block.  */
3016   FOR_EACH_BB_REVERSE (bb)
3017     {
3018       loop_depth = bb->loop_depth;
3019       for (insn = BB_END (bb);
3020 	   insn != PREV_INSN (BB_HEAD (bb));
3021 	   insn = PREV_INSN (insn))
3022 	{
3023 	  rtx link;
3024 
3025 	  if (! INSN_P (insn))
3026 	    continue;
3027 
3028 	  /* Don't substitute into a non-local goto, this confuses CFG.  */
3029 	  if (JUMP_P (insn)
3030 	      && find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
3031 	    continue;
3032 
3033 	  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
3034 	    {
3035 	      if (REG_NOTE_KIND (link) == REG_DEAD
3036 		  /* Make sure this insn still refers to the register.  */
3037 		  && reg_mentioned_p (XEXP (link, 0), PATTERN (insn)))
3038 		{
3039 		  int regno = REGNO (XEXP (link, 0));
3040 		  rtx equiv_insn;
3041 
3042 		  if (! reg_equiv[regno].replace
3043 		      || reg_equiv[regno].loop_depth < loop_depth
3044 		      /* There is no sense to move insns if we did
3045 			 register pressure-sensitive scheduling was
3046 			 done because it will not improve allocation
3047 			 but worsen insn schedule with a big
3048 			 probability.  */
3049 		      || (flag_sched_pressure && flag_schedule_insns))
3050 		    continue;
3051 
3052 		  /* reg_equiv[REGNO].replace gets set only when
3053 		     REG_N_REFS[REGNO] is 2, i.e. the register is set
3054 		     once and used once.  (If it were only set, but not used,
3055 		     flow would have deleted the setting insns.)  Hence
3056 		     there can only be one insn in reg_equiv[REGNO].init_insns.  */
3057 		  gcc_assert (reg_equiv[regno].init_insns
3058 			      && !XEXP (reg_equiv[regno].init_insns, 1));
3059 		  equiv_insn = XEXP (reg_equiv[regno].init_insns, 0);
3060 
3061 		  /* We may not move instructions that can throw, since
3062 		     that changes basic block boundaries and we are not
3063 		     prepared to adjust the CFG to match.  */
3064 		  if (can_throw_internal (equiv_insn))
3065 		    continue;
3066 
3067 		  if (asm_noperands (PATTERN (equiv_insn)) < 0
3068 		      && validate_replace_rtx (regno_reg_rtx[regno],
3069 					       *(reg_equiv[regno].src_p), insn))
3070 		    {
3071 		      rtx equiv_link;
3072 		      rtx last_link;
3073 		      rtx note;
3074 
3075 		      /* Find the last note.  */
3076 		      for (last_link = link; XEXP (last_link, 1);
3077 			   last_link = XEXP (last_link, 1))
3078 			;
3079 
3080 		      /* Append the REG_DEAD notes from equiv_insn.  */
3081 		      equiv_link = REG_NOTES (equiv_insn);
3082 		      while (equiv_link)
3083 			{
3084 			  note = equiv_link;
3085 			  equiv_link = XEXP (equiv_link, 1);
3086 			  if (REG_NOTE_KIND (note) == REG_DEAD)
3087 			    {
3088 			      remove_note (equiv_insn, note);
3089 			      XEXP (last_link, 1) = note;
3090 			      XEXP (note, 1) = NULL_RTX;
3091 			      last_link = note;
3092 			    }
3093 			}
3094 
3095 		      remove_death (regno, insn);
3096 		      SET_REG_N_REFS (regno, 0);
3097 		      REG_FREQ (regno) = 0;
3098 		      delete_insn (equiv_insn);
3099 
3100 		      reg_equiv[regno].init_insns
3101 			= XEXP (reg_equiv[regno].init_insns, 1);
3102 
3103 		      reg_equiv_init (regno) = NULL_RTX;
3104 		      bitmap_set_bit (cleared_regs, regno);
3105 		    }
3106 		  /* Move the initialization of the register to just before
3107 		     INSN.  Update the flow information.  */
3108 		  else if (prev_nondebug_insn (insn) != equiv_insn)
3109 		    {
3110 		      rtx new_insn;
3111 
3112 		      new_insn = emit_insn_before (PATTERN (equiv_insn), insn);
3113 		      REG_NOTES (new_insn) = REG_NOTES (equiv_insn);
3114 		      REG_NOTES (equiv_insn) = 0;
3115 		      /* Rescan it to process the notes.  */
3116 		      df_insn_rescan (new_insn);
3117 
3118 		      /* Make sure this insn is recognized before
3119 			 reload begins, otherwise
3120 			 eliminate_regs_in_insn will die.  */
3121 		      INSN_CODE (new_insn) = INSN_CODE (equiv_insn);
3122 
3123 		      delete_insn (equiv_insn);
3124 
3125 		      XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
3126 
3127 		      REG_BASIC_BLOCK (regno) = bb->index;
3128 		      REG_N_CALLS_CROSSED (regno) = 0;
3129 		      REG_FREQ_CALLS_CROSSED (regno) = 0;
3130 		      REG_N_THROWING_CALLS_CROSSED (regno) = 0;
3131 		      REG_LIVE_LENGTH (regno) = 2;
3132 
3133 		      if (insn == BB_HEAD (bb))
3134 			BB_HEAD (bb) = PREV_INSN (insn);
3135 
3136 		      reg_equiv_init (regno)
3137 			= gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
3138 		      bitmap_set_bit (cleared_regs, regno);
3139 		    }
3140 		}
3141 	    }
3142 	}
3143     }
3144 
3145   if (!bitmap_empty_p (cleared_regs))
3146     {
3147       FOR_EACH_BB (bb)
3148 	{
3149 	  bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
3150 	  bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
3151 	  bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
3152 	  bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
3153 	}
3154 
3155       /* Last pass - adjust debug insns referencing cleared regs.  */
3156       if (MAY_HAVE_DEBUG_INSNS)
3157 	for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
3158 	  if (DEBUG_INSN_P (insn))
3159 	    {
3160 	      rtx old_loc = INSN_VAR_LOCATION_LOC (insn);
3161 	      INSN_VAR_LOCATION_LOC (insn)
3162 		= simplify_replace_fn_rtx (old_loc, NULL_RTX,
3163 					   adjust_cleared_regs,
3164 					   (void *) cleared_regs);
3165 	      if (old_loc != INSN_VAR_LOCATION_LOC (insn))
3166 		df_insn_rescan (insn);
3167 	    }
3168     }
3169 
3170   BITMAP_FREE (cleared_regs);
3171 
3172   out:
3173   /* Clean up.  */
3174 
3175   end_alias_analysis ();
3176   free (reg_equiv);
3177   return recorded_label_ref;
3178 }
3179 
3180 
3181 
3182 /* Print chain C to FILE.  */
3183 static void
3184 print_insn_chain (FILE *file, struct insn_chain *c)
3185 {
3186   fprintf (file, "insn=%d, ", INSN_UID(c->insn));
3187   bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
3188   bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
3189 }
3190 
3191 
3192 /* Print all reload_insn_chains to FILE.  */
3193 static void
3194 print_insn_chains (FILE *file)
3195 {
3196   struct insn_chain *c;
3197   for (c = reload_insn_chain; c ; c = c->next)
3198     print_insn_chain (file, c);
3199 }
3200 
3201 /* Return true if pseudo REGNO should be added to set live_throughout
3202    or dead_or_set of the insn chains for reload consideration.  */
3203 static bool
3204 pseudo_for_reload_consideration_p (int regno)
3205 {
3206   /* Consider spilled pseudos too for IRA because they still have a
3207      chance to get hard-registers in the reload when IRA is used.  */
3208   return (reg_renumber[regno] >= 0 || ira_conflicts_p);
3209 }
3210 
3211 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] using
3212    REG to the number of nregs, and INIT_VALUE to get the
3213    initialization.  ALLOCNUM need not be the regno of REG.  */
3214 static void
3215 init_live_subregs (bool init_value, sbitmap *live_subregs,
3216 		   int *live_subregs_used, int allocnum, rtx reg)
3217 {
3218   unsigned int regno = REGNO (SUBREG_REG (reg));
3219   int size = GET_MODE_SIZE (GET_MODE (regno_reg_rtx[regno]));
3220 
3221   gcc_assert (size > 0);
3222 
3223   /* Been there, done that.  */
3224   if (live_subregs_used[allocnum])
3225     return;
3226 
3227   /* Create a new one with zeros.  */
3228   if (live_subregs[allocnum] == NULL)
3229     live_subregs[allocnum] = sbitmap_alloc (size);
3230 
3231   /* If the entire reg was live before blasting into subregs, we need
3232      to init all of the subregs to ones else init to 0.  */
3233   if (init_value)
3234     sbitmap_ones (live_subregs[allocnum]);
3235   else
3236     sbitmap_zero (live_subregs[allocnum]);
3237 
3238   /* Set the number of bits that we really want.  */
3239   live_subregs_used[allocnum] = size;
3240 }
3241 
3242 /* Walk the insns of the current function and build reload_insn_chain,
3243    and record register life information.  */
3244 static void
3245 build_insn_chain (void)
3246 {
3247   unsigned int i;
3248   struct insn_chain **p = &reload_insn_chain;
3249   basic_block bb;
3250   struct insn_chain *c = NULL;
3251   struct insn_chain *next = NULL;
3252   bitmap live_relevant_regs = BITMAP_ALLOC (NULL);
3253   bitmap elim_regset = BITMAP_ALLOC (NULL);
3254   /* live_subregs is a vector used to keep accurate information about
3255      which hardregs are live in multiword pseudos.  live_subregs and
3256      live_subregs_used are indexed by pseudo number.  The live_subreg
3257      entry for a particular pseudo is only used if the corresponding
3258      element is non zero in live_subregs_used.  The value in
3259      live_subregs_used is number of bytes that the pseudo can
3260      occupy.  */
3261   sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
3262   int *live_subregs_used = XNEWVEC (int, max_regno);
3263 
3264   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3265     if (TEST_HARD_REG_BIT (eliminable_regset, i))
3266       bitmap_set_bit (elim_regset, i);
3267   FOR_EACH_BB_REVERSE (bb)
3268     {
3269       bitmap_iterator bi;
3270       rtx insn;
3271 
3272       CLEAR_REG_SET (live_relevant_regs);
3273       memset (live_subregs_used, 0, max_regno * sizeof (int));
3274 
3275       EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb), 0, i, bi)
3276 	{
3277 	  if (i >= FIRST_PSEUDO_REGISTER)
3278 	    break;
3279 	  bitmap_set_bit (live_relevant_regs, i);
3280 	}
3281 
3282       EXECUTE_IF_SET_IN_BITMAP (DF_LR_OUT (bb),
3283 				FIRST_PSEUDO_REGISTER, i, bi)
3284 	{
3285 	  if (pseudo_for_reload_consideration_p (i))
3286 	    bitmap_set_bit (live_relevant_regs, i);
3287 	}
3288 
3289       FOR_BB_INSNS_REVERSE (bb, insn)
3290 	{
3291 	  if (!NOTE_P (insn) && !BARRIER_P (insn))
3292 	    {
3293 	      unsigned int uid = INSN_UID (insn);
3294 	      df_ref *def_rec;
3295 	      df_ref *use_rec;
3296 
3297 	      c = new_insn_chain ();
3298 	      c->next = next;
3299 	      next = c;
3300 	      *p = c;
3301 	      p = &c->prev;
3302 
3303 	      c->insn = insn;
3304 	      c->block = bb->index;
3305 
3306 	      if (INSN_P (insn))
3307 		for (def_rec = DF_INSN_UID_DEFS (uid); *def_rec; def_rec++)
3308 		  {
3309 		    df_ref def = *def_rec;
3310 		    unsigned int regno = DF_REF_REGNO (def);
3311 
3312 		    /* Ignore may clobbers because these are generated
3313 		       from calls. However, every other kind of def is
3314 		       added to dead_or_set.  */
3315 		    if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
3316 		      {
3317 			if (regno < FIRST_PSEUDO_REGISTER)
3318 			  {
3319 			    if (!fixed_regs[regno])
3320 			      bitmap_set_bit (&c->dead_or_set, regno);
3321 			  }
3322 			else if (pseudo_for_reload_consideration_p (regno))
3323 			  bitmap_set_bit (&c->dead_or_set, regno);
3324 		      }
3325 
3326 		    if ((regno < FIRST_PSEUDO_REGISTER
3327 			 || reg_renumber[regno] >= 0
3328 			 || ira_conflicts_p)
3329 			&& (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
3330 		      {
3331 			rtx reg = DF_REF_REG (def);
3332 
3333 			/* We can model subregs, but not if they are
3334 			   wrapped in ZERO_EXTRACTS.  */
3335 			if (GET_CODE (reg) == SUBREG
3336 			    && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT))
3337 			  {
3338 			    unsigned int start = SUBREG_BYTE (reg);
3339 			    unsigned int last = start
3340 			      + GET_MODE_SIZE (GET_MODE (reg));
3341 
3342 			    init_live_subregs
3343 			      (bitmap_bit_p (live_relevant_regs, regno),
3344 			       live_subregs, live_subregs_used, regno, reg);
3345 
3346 			    if (!DF_REF_FLAGS_IS_SET
3347 				(def, DF_REF_STRICT_LOW_PART))
3348 			      {
3349 				/* Expand the range to cover entire words.
3350 				   Bytes added here are "don't care".  */
3351 				start
3352 				  = start / UNITS_PER_WORD * UNITS_PER_WORD;
3353 				last = ((last + UNITS_PER_WORD - 1)
3354 					/ UNITS_PER_WORD * UNITS_PER_WORD);
3355 			      }
3356 
3357 			    /* Ignore the paradoxical bits.  */
3358 			    if ((int)last > live_subregs_used[regno])
3359 			      last = live_subregs_used[regno];
3360 
3361 			    while (start < last)
3362 			      {
3363 				RESET_BIT (live_subregs[regno], start);
3364 				start++;
3365 			      }
3366 
3367 			    if (sbitmap_empty_p (live_subregs[regno]))
3368 			      {
3369 				live_subregs_used[regno] = 0;
3370 				bitmap_clear_bit (live_relevant_regs, regno);
3371 			      }
3372 			    else
3373 			      /* Set live_relevant_regs here because
3374 				 that bit has to be true to get us to
3375 				 look at the live_subregs fields.  */
3376 			      bitmap_set_bit (live_relevant_regs, regno);
3377 			  }
3378 			else
3379 			  {
3380 			    /* DF_REF_PARTIAL is generated for
3381 			       subregs, STRICT_LOW_PART, and
3382 			       ZERO_EXTRACT.  We handle the subreg
3383 			       case above so here we have to keep from
3384 			       modeling the def as a killing def.  */
3385 			    if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
3386 			      {
3387 				bitmap_clear_bit (live_relevant_regs, regno);
3388 				live_subregs_used[regno] = 0;
3389 			      }
3390 			  }
3391 		      }
3392 		  }
3393 
3394 	      bitmap_and_compl_into (live_relevant_regs, elim_regset);
3395 	      bitmap_copy (&c->live_throughout, live_relevant_regs);
3396 
3397 	      if (INSN_P (insn))
3398 		for (use_rec = DF_INSN_UID_USES (uid); *use_rec; use_rec++)
3399 		  {
3400 		    df_ref use = *use_rec;
3401 		    unsigned int regno = DF_REF_REGNO (use);
3402 		    rtx reg = DF_REF_REG (use);
3403 
3404 		    /* DF_REF_READ_WRITE on a use means that this use
3405 		       is fabricated from a def that is a partial set
3406 		       to a multiword reg.  Here, we only model the
3407 		       subreg case that is not wrapped in ZERO_EXTRACT
3408 		       precisely so we do not need to look at the
3409 		       fabricated use. */
3410 		    if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
3411 			&& !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
3412 			&& DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
3413 		      continue;
3414 
3415 		    /* Add the last use of each var to dead_or_set.  */
3416 		    if (!bitmap_bit_p (live_relevant_regs, regno))
3417 		      {
3418 			if (regno < FIRST_PSEUDO_REGISTER)
3419 			  {
3420 			    if (!fixed_regs[regno])
3421 			      bitmap_set_bit (&c->dead_or_set, regno);
3422 			  }
3423 			else if (pseudo_for_reload_consideration_p (regno))
3424 			  bitmap_set_bit (&c->dead_or_set, regno);
3425 		      }
3426 
3427 		    if (regno < FIRST_PSEUDO_REGISTER
3428 			|| pseudo_for_reload_consideration_p (regno))
3429 		      {
3430 			if (GET_CODE (reg) == SUBREG
3431 			    && !DF_REF_FLAGS_IS_SET (use,
3432 						     DF_REF_SIGN_EXTRACT
3433 						     | DF_REF_ZERO_EXTRACT))
3434 			  {
3435 			    unsigned int start = SUBREG_BYTE (reg);
3436 			    unsigned int last = start
3437 			      + GET_MODE_SIZE (GET_MODE (reg));
3438 
3439 			    init_live_subregs
3440 			      (bitmap_bit_p (live_relevant_regs, regno),
3441 			       live_subregs, live_subregs_used, regno, reg);
3442 
3443 			    /* Ignore the paradoxical bits.  */
3444 			    if ((int)last > live_subregs_used[regno])
3445 			      last = live_subregs_used[regno];
3446 
3447 			    while (start < last)
3448 			      {
3449 				SET_BIT (live_subregs[regno], start);
3450 				start++;
3451 			      }
3452 			  }
3453 			else
3454 			  /* Resetting the live_subregs_used is
3455 			     effectively saying do not use the subregs
3456 			     because we are reading the whole
3457 			     pseudo.  */
3458 			  live_subregs_used[regno] = 0;
3459 			bitmap_set_bit (live_relevant_regs, regno);
3460 		      }
3461 		  }
3462 	    }
3463 	}
3464 
3465       /* FIXME!! The following code is a disaster.  Reload needs to see the
3466 	 labels and jump tables that are just hanging out in between
3467 	 the basic blocks.  See pr33676.  */
3468       insn = BB_HEAD (bb);
3469 
3470       /* Skip over the barriers and cruft.  */
3471       while (insn && (BARRIER_P (insn) || NOTE_P (insn)
3472 		      || BLOCK_FOR_INSN (insn) == bb))
3473 	insn = PREV_INSN (insn);
3474 
3475       /* While we add anything except barriers and notes, the focus is
3476 	 to get the labels and jump tables into the
3477 	 reload_insn_chain.  */
3478       while (insn)
3479 	{
3480 	  if (!NOTE_P (insn) && !BARRIER_P (insn))
3481 	    {
3482 	      if (BLOCK_FOR_INSN (insn))
3483 		break;
3484 
3485 	      c = new_insn_chain ();
3486 	      c->next = next;
3487 	      next = c;
3488 	      *p = c;
3489 	      p = &c->prev;
3490 
3491 	      /* The block makes no sense here, but it is what the old
3492 		 code did.  */
3493 	      c->block = bb->index;
3494 	      c->insn = insn;
3495 	      bitmap_copy (&c->live_throughout, live_relevant_regs);
3496 	    }
3497 	  insn = PREV_INSN (insn);
3498 	}
3499     }
3500 
3501   for (i = 0; i < (unsigned int) max_regno; i++)
3502     free (live_subregs[i]);
3503 
3504   reload_insn_chain = c;
3505   *p = NULL;
3506 
3507   free (live_subregs);
3508   free (live_subregs_used);
3509   BITMAP_FREE (live_relevant_regs);
3510   BITMAP_FREE (elim_regset);
3511 
3512   if (dump_file)
3513     print_insn_chains (dump_file);
3514 }
3515 
3516 
3517 
3518 /* All natural loops.  */
3519 struct loops ira_loops;
3520 
3521 /* True if we have allocno conflicts.  It is false for non-optimized
3522    mode or when the conflict table is too big.  */
3523 bool ira_conflicts_p;
3524 
3525 /* Saved between IRA and reload.  */
3526 static int saved_flag_ira_share_spill_slots;
3527 
3528 /* This is the main entry of IRA.  */
3529 static void
3530 ira (FILE *f)
3531 {
3532   int allocated_reg_info_size;
3533   bool loops_p;
3534   int max_regno_before_ira, ira_max_point_before_emit;
3535   int rebuild_p;
3536 
3537   if (flag_caller_saves)
3538     init_caller_save ();
3539 
3540   if (flag_ira_verbose < 10)
3541     {
3542       internal_flag_ira_verbose = flag_ira_verbose;
3543       ira_dump_file = f;
3544     }
3545   else
3546     {
3547       internal_flag_ira_verbose = flag_ira_verbose - 10;
3548       ira_dump_file = stderr;
3549     }
3550 
3551   ira_conflicts_p = optimize > 0;
3552   setup_prohibited_mode_move_regs ();
3553 
3554   df_note_add_problem ();
3555 
3556   if (optimize == 1)
3557     {
3558       df_live_add_problem ();
3559       df_live_set_all_dirty ();
3560     }
3561 #ifdef ENABLE_CHECKING
3562   df->changeable_flags |= DF_VERIFY_SCHEDULED;
3563 #endif
3564   df_analyze ();
3565   df_clear_flags (DF_NO_INSN_RESCAN);
3566   regstat_init_n_sets_and_refs ();
3567   regstat_compute_ri ();
3568 
3569   /* If we are not optimizing, then this is the only place before
3570      register allocation where dataflow is done.  And that is needed
3571      to generate these warnings.  */
3572   if (warn_clobbered)
3573     generate_setjmp_warnings ();
3574 
3575   /* Determine if the current function is a leaf before running IRA
3576      since this can impact optimizations done by the prologue and
3577      epilogue thus changing register elimination offsets.  */
3578   current_function_is_leaf = leaf_function_p ();
3579 
3580   if (resize_reg_info () && flag_ira_loop_pressure)
3581     ira_set_pseudo_classes (ira_dump_file);
3582 
3583   rebuild_p = update_equiv_regs ();
3584 
3585 #ifndef IRA_NO_OBSTACK
3586   gcc_obstack_init (&ira_obstack);
3587 #endif
3588   bitmap_obstack_initialize (&ira_bitmap_obstack);
3589   if (optimize)
3590     {
3591       max_regno = max_reg_num ();
3592       ira_reg_equiv_len = max_regno;
3593       ira_reg_equiv_invariant_p
3594 	= (bool *) ira_allocate (max_regno * sizeof (bool));
3595       memset (ira_reg_equiv_invariant_p, 0, max_regno * sizeof (bool));
3596       ira_reg_equiv_const = (rtx *) ira_allocate (max_regno * sizeof (rtx));
3597       memset (ira_reg_equiv_const, 0, max_regno * sizeof (rtx));
3598       find_reg_equiv_invariant_const ();
3599       if (rebuild_p)
3600 	{
3601 	  timevar_push (TV_JUMP);
3602 	  rebuild_jump_labels (get_insns ());
3603 	  if (purge_all_dead_edges ())
3604 	    delete_unreachable_blocks ();
3605 	  timevar_pop (TV_JUMP);
3606 	}
3607     }
3608 
3609   max_regno_before_ira = allocated_reg_info_size = max_reg_num ();
3610   ira_setup_eliminable_regset ();
3611 
3612   ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
3613   ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
3614   ira_move_loops_num = ira_additional_jumps_num = 0;
3615 
3616   ira_assert (current_loops == NULL);
3617   if (flag_ira_region == IRA_REGION_ALL || flag_ira_region == IRA_REGION_MIXED)
3618     {
3619       flow_loops_find (&ira_loops);
3620       record_loop_exits ();
3621       current_loops = &ira_loops;
3622     }
3623 
3624   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3625     fprintf (ira_dump_file, "Building IRA IR\n");
3626   loops_p = ira_build ();
3627 
3628   ira_assert (ira_conflicts_p || !loops_p);
3629 
3630   saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
3631   if (too_high_register_pressure_p () || cfun->calls_setjmp)
3632     /* It is just wasting compiler's time to pack spilled pseudos into
3633        stack slots in this case -- prohibit it.  We also do this if
3634        there is setjmp call because a variable not modified between
3635        setjmp and longjmp the compiler is required to preserve its
3636        value and sharing slots does not guarantee it.  */
3637     flag_ira_share_spill_slots = FALSE;
3638 
3639   ira_color ();
3640 
3641   ira_max_point_before_emit = ira_max_point;
3642 
3643   ira_initiate_emit_data ();
3644 
3645   ira_emit (loops_p);
3646 
3647   if (ira_conflicts_p)
3648     {
3649       max_regno = max_reg_num ();
3650 
3651       if (! loops_p)
3652 	ira_initiate_assign ();
3653       else
3654 	{
3655 	  expand_reg_info (allocated_reg_info_size);
3656 	  setup_preferred_alternate_classes_for_new_pseudos
3657 	    (allocated_reg_info_size);
3658 	  allocated_reg_info_size = max_regno;
3659 
3660 	  if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
3661 	    fprintf (ira_dump_file, "Flattening IR\n");
3662 	  ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
3663 	  /* New insns were generated: add notes and recalculate live
3664 	     info.  */
3665 	  df_analyze ();
3666 
3667 	  flow_loops_find (&ira_loops);
3668 	  record_loop_exits ();
3669 	  current_loops = &ira_loops;
3670 
3671 	  setup_allocno_assignment_flags ();
3672 	  ira_initiate_assign ();
3673 	  ira_reassign_conflict_allocnos (max_regno);
3674 	}
3675     }
3676 
3677   ira_finish_emit_data ();
3678 
3679   setup_reg_renumber ();
3680 
3681   calculate_allocation_cost ();
3682 
3683 #ifdef ENABLE_IRA_CHECKING
3684   if (ira_conflicts_p)
3685     check_allocation ();
3686 #endif
3687 
3688   if (delete_trivially_dead_insns (get_insns (), max_reg_num ()))
3689     df_analyze ();
3690 
3691   if (max_regno != max_regno_before_ira)
3692     {
3693       regstat_free_n_sets_and_refs ();
3694       regstat_free_ri ();
3695       regstat_init_n_sets_and_refs ();
3696       regstat_compute_ri ();
3697     }
3698 
3699   overall_cost_before = ira_overall_cost;
3700   if (! ira_conflicts_p)
3701     grow_reg_equivs ();
3702   else
3703     {
3704       fix_reg_equiv_init ();
3705 
3706 #ifdef ENABLE_IRA_CHECKING
3707       print_redundant_copies ();
3708 #endif
3709 
3710       ira_spilled_reg_stack_slots_num = 0;
3711       ira_spilled_reg_stack_slots
3712 	= ((struct ira_spilled_reg_stack_slot *)
3713 	   ira_allocate (max_regno
3714 			 * sizeof (struct ira_spilled_reg_stack_slot)));
3715       memset (ira_spilled_reg_stack_slots, 0,
3716 	      max_regno * sizeof (struct ira_spilled_reg_stack_slot));
3717     }
3718   allocate_initial_values (reg_equivs);
3719 }
3720 
3721 static void
3722 do_reload (void)
3723 {
3724   basic_block bb;
3725   bool need_dce;
3726 
3727   if (flag_ira_verbose < 10)
3728     ira_dump_file = dump_file;
3729 
3730   df_set_flags (DF_NO_INSN_RESCAN);
3731   build_insn_chain ();
3732 
3733   need_dce = reload (get_insns (), ira_conflicts_p);
3734 
3735   timevar_push (TV_IRA);
3736 
3737   if (ira_conflicts_p)
3738     {
3739       ira_free (ira_spilled_reg_stack_slots);
3740 
3741       ira_finish_assign ();
3742     }
3743   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
3744       && overall_cost_before != ira_overall_cost)
3745     fprintf (ira_dump_file, "+++Overall after reload %d\n", ira_overall_cost);
3746   ira_destroy ();
3747 
3748   flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
3749 
3750   if (current_loops != NULL)
3751     {
3752       flow_loops_free (&ira_loops);
3753       free_dominance_info (CDI_DOMINATORS);
3754     }
3755   FOR_ALL_BB (bb)
3756     bb->loop_father = NULL;
3757   current_loops = NULL;
3758 
3759   regstat_free_ri ();
3760   regstat_free_n_sets_and_refs ();
3761 
3762   if (optimize)
3763     {
3764       cleanup_cfg (CLEANUP_EXPENSIVE);
3765 
3766       ira_free (ira_reg_equiv_invariant_p);
3767       ira_free (ira_reg_equiv_const);
3768     }
3769 
3770   bitmap_obstack_release (&ira_bitmap_obstack);
3771 #ifndef IRA_NO_OBSTACK
3772   obstack_free (&ira_obstack, NULL);
3773 #endif
3774 
3775   /* The code after the reload has changed so much that at this point
3776      we might as well just rescan everything.  Note that
3777      df_rescan_all_insns is not going to help here because it does not
3778      touch the artificial uses and defs.  */
3779   df_finish_pass (true);
3780   if (optimize > 1)
3781     df_live_add_problem ();
3782   df_scan_alloc (NULL);
3783   df_scan_blocks ();
3784 
3785   if (optimize)
3786     df_analyze ();
3787 
3788   if (need_dce && optimize)
3789     run_fast_dce ();
3790 
3791   /* Diagnose uses of the hard frame pointer when it is used as a global
3792      register.  Often we can get away with letting the user appropriate
3793      the frame pointer, but we should let them know when code generation
3794      makes that impossible.  */
3795   if (global_regs[HARD_FRAME_POINTER_REGNUM] && frame_pointer_needed)
3796     {
3797       tree decl = global_regs_decl[HARD_FRAME_POINTER_REGNUM];
3798       error_at (DECL_SOURCE_LOCATION (current_function_decl),
3799                 "frame pointer required, but reserved");
3800       inform (DECL_SOURCE_LOCATION (decl), "for %qD", decl);
3801     }
3802 
3803   timevar_pop (TV_IRA);
3804 }
3805 
3806 /* Run the integrated register allocator.  */
3807 static unsigned int
3808 rest_of_handle_ira (void)
3809 {
3810   ira (dump_file);
3811   return 0;
3812 }
3813 
3814 struct rtl_opt_pass pass_ira =
3815 {
3816  {
3817   RTL_PASS,
3818   "ira",                                /* name */
3819   NULL,                                 /* gate */
3820   rest_of_handle_ira,		        /* execute */
3821   NULL,                                 /* sub */
3822   NULL,                                 /* next */
3823   0,                                    /* static_pass_number */
3824   TV_IRA,	                        /* tv_id */
3825   0,                                    /* properties_required */
3826   0,                                    /* properties_provided */
3827   0,                                    /* properties_destroyed */
3828   0,                                    /* todo_flags_start */
3829   TODO_dump_func                        /* todo_flags_finish */
3830  }
3831 };
3832 
3833 static unsigned int
3834 rest_of_handle_reload (void)
3835 {
3836   do_reload ();
3837   return 0;
3838 }
3839 
3840 struct rtl_opt_pass pass_reload =
3841 {
3842  {
3843   RTL_PASS,
3844   "reload",                             /* name */
3845   NULL,                                 /* gate */
3846   rest_of_handle_reload,	        /* execute */
3847   NULL,                                 /* sub */
3848   NULL,                                 /* next */
3849   0,                                    /* static_pass_number */
3850   TV_RELOAD,	                        /* tv_id */
3851   0,                                    /* properties_required */
3852   0,                                    /* properties_provided */
3853   0,                                    /* properties_destroyed */
3854   0,                                    /* todo_flags_start */
3855   TODO_dump_func | TODO_ggc_collect     /* todo_flags_finish */
3856  }
3857 };
3858