1 /* Integrated Register Allocator (IRA) entry point.
2    Copyright (C) 2006-2020 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /* The integrated register allocator (IRA) is a
22    regional register allocator performing graph coloring on a top-down
23    traversal of nested regions.  Graph coloring in a region is based
24    on Chaitin-Briggs algorithm.  It is called integrated because
25    register coalescing, register live range splitting, and choosing a
26    better hard register are done on-the-fly during coloring.  Register
27    coalescing and choosing a cheaper hard register is done by hard
28    register preferencing during hard register assigning.  The live
29    range splitting is a byproduct of the regional register allocation.
30 
31    Major IRA notions are:
32 
33      o *Region* is a part of CFG where graph coloring based on
34        Chaitin-Briggs algorithm is done.  IRA can work on any set of
35        nested CFG regions forming a tree.  Currently the regions are
36        the entire function for the root region and natural loops for
37        the other regions.  Therefore data structure representing a
38        region is called loop_tree_node.
39 
40      o *Allocno class* is a register class used for allocation of
41        given allocno.  It means that only hard register of given
42        register class can be assigned to given allocno.  In reality,
43        even smaller subset of (*profitable*) hard registers can be
44        assigned.  In rare cases, the subset can be even smaller
45        because our modification of Chaitin-Briggs algorithm requires
46        that sets of hard registers can be assigned to allocnos forms a
47        forest, i.e. the sets can be ordered in a way where any
48        previous set is not intersected with given set or is a superset
49        of given set.
50 
51      o *Pressure class* is a register class belonging to a set of
52        register classes containing all of the hard-registers available
53        for register allocation.  The set of all pressure classes for a
54        target is defined in the corresponding machine-description file
55        according some criteria.  Register pressure is calculated only
56        for pressure classes and it affects some IRA decisions as
57        forming allocation regions.
58 
59      o *Allocno* represents the live range of a pseudo-register in a
60        region.  Besides the obvious attributes like the corresponding
61        pseudo-register number, allocno class, conflicting allocnos and
62        conflicting hard-registers, there are a few allocno attributes
63        which are important for understanding the allocation algorithm:
64 
65        - *Live ranges*.  This is a list of ranges of *program points*
66          where the allocno lives.  Program points represent places
67          where a pseudo can be born or become dead (there are
68          approximately two times more program points than the insns)
69          and they are represented by integers starting with 0.  The
70          live ranges are used to find conflicts between allocnos.
71          They also play very important role for the transformation of
72          the IRA internal representation of several regions into a one
73          region representation.  The later is used during the reload
74          pass work because each allocno represents all of the
75          corresponding pseudo-registers.
76 
77        - *Hard-register costs*.  This is a vector of size equal to the
78          number of available hard-registers of the allocno class.  The
79          cost of a callee-clobbered hard-register for an allocno is
80          increased by the cost of save/restore code around the calls
81          through the given allocno's life.  If the allocno is a move
82          instruction operand and another operand is a hard-register of
83          the allocno class, the cost of the hard-register is decreased
84          by the move cost.
85 
86          When an allocno is assigned, the hard-register with minimal
87          full cost is used.  Initially, a hard-register's full cost is
88          the corresponding value from the hard-register's cost vector.
89          If the allocno is connected by a *copy* (see below) to
90          another allocno which has just received a hard-register, the
91          cost of the hard-register is decreased.  Before choosing a
92          hard-register for an allocno, the allocno's current costs of
93          the hard-registers are modified by the conflict hard-register
94          costs of all of the conflicting allocnos which are not
95          assigned yet.
96 
97        - *Conflict hard-register costs*.  This is a vector of the same
98          size as the hard-register costs vector.  To permit an
99          unassigned allocno to get a better hard-register, IRA uses
100          this vector to calculate the final full cost of the
101          available hard-registers.  Conflict hard-register costs of an
102          unassigned allocno are also changed with a change of the
103          hard-register cost of the allocno when a copy involving the
104          allocno is processed as described above.  This is done to
105          show other unassigned allocnos that a given allocno prefers
106          some hard-registers in order to remove the move instruction
107          corresponding to the copy.
108 
109      o *Cap*.  If a pseudo-register does not live in a region but
110        lives in a nested region, IRA creates a special allocno called
111        a cap in the outer region.  A region cap is also created for a
112        subregion cap.
113 
114      o *Copy*.  Allocnos can be connected by copies.  Copies are used
115        to modify hard-register costs for allocnos during coloring.
116        Such modifications reflects a preference to use the same
117        hard-register for the allocnos connected by copies.  Usually
118        copies are created for move insns (in this case it results in
119        register coalescing).  But IRA also creates copies for operands
120        of an insn which should be assigned to the same hard-register
121        due to constraints in the machine description (it usually
122        results in removing a move generated in reload to satisfy
123        the constraints) and copies referring to the allocno which is
124        the output operand of an instruction and the allocno which is
125        an input operand dying in the instruction (creation of such
126        copies results in less register shuffling).  IRA *does not*
127        create copies between the same register allocnos from different
128        regions because we use another technique for propagating
129        hard-register preference on the borders of regions.
130 
131    Allocnos (including caps) for the upper region in the region tree
132    *accumulate* information important for coloring from allocnos with
133    the same pseudo-register from nested regions.  This includes
134    hard-register and memory costs, conflicts with hard-registers,
135    allocno conflicts, allocno copies and more.  *Thus, attributes for
136    allocnos in a region have the same values as if the region had no
137    subregions*.  It means that attributes for allocnos in the
138    outermost region corresponding to the function have the same values
139    as though the allocation used only one region which is the entire
140    function.  It also means that we can look at IRA work as if the
141    first IRA did allocation for all function then it improved the
142    allocation for loops then their subloops and so on.
143 
144    IRA major passes are:
145 
146      o Building IRA internal representation which consists of the
147        following subpasses:
148 
149        * First, IRA builds regions and creates allocnos (file
150          ira-build.c) and initializes most of their attributes.
151 
152        * Then IRA finds an allocno class for each allocno and
153          calculates its initial (non-accumulated) cost of memory and
154          each hard-register of its allocno class (file ira-cost.c).
155 
156        * IRA creates live ranges of each allocno, calculates register
157          pressure for each pressure class in each region, sets up
158          conflict hard registers for each allocno and info about calls
159          the allocno lives through (file ira-lives.c).
160 
161        * IRA removes low register pressure loops from the regions
162          mostly to speed IRA up (file ira-build.c).
163 
164        * IRA propagates accumulated allocno info from lower region
165          allocnos to corresponding upper region allocnos (file
166          ira-build.c).
167 
168        * IRA creates all caps (file ira-build.c).
169 
170        * Having live-ranges of allocnos and their classes, IRA creates
171          conflicting allocnos for each allocno.  Conflicting allocnos
172          are stored as a bit vector or array of pointers to the
173          conflicting allocnos whatever is more profitable (file
174          ira-conflicts.c).  At this point IRA creates allocno copies.
175 
176      o Coloring.  Now IRA has all necessary info to start graph coloring
177        process.  It is done in each region on top-down traverse of the
178        region tree (file ira-color.c).  There are following subpasses:
179 
180        * Finding profitable hard registers of corresponding allocno
181          class for each allocno.  For example, only callee-saved hard
182          registers are frequently profitable for allocnos living
183          through colors.  If the profitable hard register set of
184          allocno does not form a tree based on subset relation, we use
185          some approximation to form the tree.  This approximation is
186          used to figure out trivial colorability of allocnos.  The
187          approximation is a pretty rare case.
188 
189        * Putting allocnos onto the coloring stack.  IRA uses Briggs
190          optimistic coloring which is a major improvement over
191          Chaitin's coloring.  Therefore IRA does not spill allocnos at
192          this point.  There is some freedom in the order of putting
193          allocnos on the stack which can affect the final result of
194          the allocation.  IRA uses some heuristics to improve the
195          order.  The major one is to form *threads* from colorable
196          allocnos and push them on the stack by threads.  Thread is a
197          set of non-conflicting colorable allocnos connected by
198          copies.  The thread contains allocnos from the colorable
199          bucket or colorable allocnos already pushed onto the coloring
200          stack.  Pushing thread allocnos one after another onto the
201          stack increases chances of removing copies when the allocnos
202          get the same hard reg.
203 
204 	 We also use a modification of Chaitin-Briggs algorithm which
205          works for intersected register classes of allocnos.  To
206          figure out trivial colorability of allocnos, the mentioned
207          above tree of hard register sets is used.  To get an idea how
208          the algorithm works in i386 example, let us consider an
209          allocno to which any general hard register can be assigned.
210          If the allocno conflicts with eight allocnos to which only
211          EAX register can be assigned, given allocno is still
212          trivially colorable because all conflicting allocnos might be
213          assigned only to EAX and all other general hard registers are
214          still free.
215 
216 	 To get an idea of the used trivial colorability criterion, it
217 	 is also useful to read article "Graph-Coloring Register
218 	 Allocation for Irregular Architectures" by Michael D. Smith
219 	 and Glen Holloway.  Major difference between the article
220 	 approach and approach used in IRA is that Smith's approach
221 	 takes register classes only from machine description and IRA
222 	 calculate register classes from intermediate code too
223 	 (e.g. an explicit usage of hard registers in RTL code for
224 	 parameter passing can result in creation of additional
225 	 register classes which contain or exclude the hard
226 	 registers).  That makes IRA approach useful for improving
227 	 coloring even for architectures with regular register files
228 	 and in fact some benchmarking shows the improvement for
229 	 regular class architectures is even bigger than for irregular
230 	 ones.  Another difference is that Smith's approach chooses
231 	 intersection of classes of all insn operands in which a given
232 	 pseudo occurs.  IRA can use bigger classes if it is still
233 	 more profitable than memory usage.
234 
235        * Popping the allocnos from the stack and assigning them hard
236          registers.  If IRA cannot assign a hard register to an
237          allocno and the allocno is coalesced, IRA undoes the
238          coalescing and puts the uncoalesced allocnos onto the stack in
239          the hope that some such allocnos will get a hard register
240          separately.  If IRA fails to assign hard register or memory
241          is more profitable for it, IRA spills the allocno.  IRA
242          assigns the allocno the hard-register with minimal full
243          allocation cost which reflects the cost of usage of the
244          hard-register for the allocno and cost of usage of the
245          hard-register for allocnos conflicting with given allocno.
246 
247        * Chaitin-Briggs coloring assigns as many pseudos as possible
248          to hard registers.  After coloring we try to improve
249          allocation with cost point of view.  We improve the
250          allocation by spilling some allocnos and assigning the freed
251          hard registers to other allocnos if it decreases the overall
252          allocation cost.
253 
254        * After allocno assigning in the region, IRA modifies the hard
255          register and memory costs for the corresponding allocnos in
256          the subregions to reflect the cost of possible loads, stores,
257          or moves on the border of the region and its subregions.
258          When default regional allocation algorithm is used
259          (-fira-algorithm=mixed), IRA just propagates the assignment
260          for allocnos if the register pressure in the region for the
261          corresponding pressure class is less than number of available
262          hard registers for given pressure class.
263 
264      o Spill/restore code moving.  When IRA performs an allocation
265        by traversing regions in top-down order, it does not know what
266        happens below in the region tree.  Therefore, sometimes IRA
267        misses opportunities to perform a better allocation.  A simple
268        optimization tries to improve allocation in a region having
269        subregions and containing in another region.  If the
270        corresponding allocnos in the subregion are spilled, it spills
271        the region allocno if it is profitable.  The optimization
272        implements a simple iterative algorithm performing profitable
273        transformations while they are still possible.  It is fast in
274        practice, so there is no real need for a better time complexity
275        algorithm.
276 
277      o Code change.  After coloring, two allocnos representing the
278        same pseudo-register outside and inside a region respectively
279        may be assigned to different locations (hard-registers or
280        memory).  In this case IRA creates and uses a new
281        pseudo-register inside the region and adds code to move allocno
282        values on the region's borders.  This is done during top-down
283        traversal of the regions (file ira-emit.c).  In some
284        complicated cases IRA can create a new allocno to move allocno
285        values (e.g. when a swap of values stored in two hard-registers
286        is needed).  At this stage, the new allocno is marked as
287        spilled.  IRA still creates the pseudo-register and the moves
288        on the region borders even when both allocnos were assigned to
289        the same hard-register.  If the reload pass spills a
290        pseudo-register for some reason, the effect will be smaller
291        because another allocno will still be in the hard-register.  In
292        most cases, this is better then spilling both allocnos.  If
293        reload does not change the allocation for the two
294        pseudo-registers, the trivial move will be removed by
295        post-reload optimizations.  IRA does not generate moves for
296        allocnos assigned to the same hard register when the default
297        regional allocation algorithm is used and the register pressure
298        in the region for the corresponding pressure class is less than
299        number of available hard registers for given pressure class.
300        IRA also does some optimizations to remove redundant stores and
301        to reduce code duplication on the region borders.
302 
303      o Flattening internal representation.  After changing code, IRA
304        transforms its internal representation for several regions into
305        one region representation (file ira-build.c).  This process is
306        called IR flattening.  Such process is more complicated than IR
307        rebuilding would be, but is much faster.
308 
309      o After IR flattening, IRA tries to assign hard registers to all
310        spilled allocnos.  This is implemented by a simple and fast
311        priority coloring algorithm (see function
312        ira_reassign_conflict_allocnos::ira-color.c).  Here new allocnos
313        created during the code change pass can be assigned to hard
314        registers.
315 
316      o At the end IRA calls the reload pass.  The reload pass
317        communicates with IRA through several functions in file
318        ira-color.c to improve its decisions in
319 
320        * sharing stack slots for the spilled pseudos based on IRA info
321          about pseudo-register conflicts.
322 
323        * reassigning hard-registers to all spilled pseudos at the end
324          of each reload iteration.
325 
326        * choosing a better hard-register to spill based on IRA info
327          about pseudo-register live ranges and the register pressure
328          in places where the pseudo-register lives.
329 
330    IRA uses a lot of data representing the target processors.  These
331    data are initialized in file ira.c.
332 
333    If function has no loops (or the loops are ignored when
334    -fira-algorithm=CB is used), we have classic Chaitin-Briggs
335    coloring (only instead of separate pass of coalescing, we use hard
336    register preferencing).  In such case, IRA works much faster
337    because many things are not made (like IR flattening, the
338    spill/restore optimization, and the code change).
339 
340    Literature is worth to read for better understanding the code:
341 
342    o Preston Briggs, Keith D. Cooper, Linda Torczon.  Improvements to
343      Graph Coloring Register Allocation.
344 
345    o David Callahan, Brian Koblenz.  Register allocation via
346      hierarchical graph coloring.
347 
348    o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
349      Coloring Register Allocation: A Study of the Chaitin-Briggs and
350      Callahan-Koblenz Algorithms.
351 
352    o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
353      Register Allocation Based on Graph Fusion.
354 
355    o Michael D. Smith and Glenn Holloway.  Graph-Coloring Register
356      Allocation for Irregular Architectures
357 
358    o Vladimir Makarov. The Integrated Register Allocator for GCC.
359 
360    o Vladimir Makarov.  The top-down register allocator for irregular
361      register file architectures.
362 
363 */
364 
365 
366 #include "config.h"
367 #include "system.h"
368 #include "coretypes.h"
369 #include "backend.h"
370 #include "target.h"
371 #include "rtl.h"
372 #include "tree.h"
373 #include "df.h"
374 #include "memmodel.h"
375 #include "tm_p.h"
376 #include "insn-config.h"
377 #include "regs.h"
378 #include "ira.h"
379 #include "ira-int.h"
380 #include "diagnostic-core.h"
381 #include "cfgrtl.h"
382 #include "cfgbuild.h"
383 #include "cfgcleanup.h"
384 #include "expr.h"
385 #include "tree-pass.h"
386 #include "output.h"
387 #include "reload.h"
388 #include "cfgloop.h"
389 #include "lra.h"
390 #include "dce.h"
391 #include "dbgcnt.h"
392 #include "rtl-iter.h"
393 #include "shrink-wrap.h"
394 #include "print-rtl.h"
395 
396 struct target_ira default_target_ira;
397 class target_ira_int default_target_ira_int;
398 #if SWITCHABLE_TARGET
399 struct target_ira *this_target_ira = &default_target_ira;
400 class target_ira_int *this_target_ira_int = &default_target_ira_int;
401 #endif
402 
403 /* A modified value of flag `-fira-verbose' used internally.  */
404 int internal_flag_ira_verbose;
405 
406 /* Dump file of the allocator if it is not NULL.  */
407 FILE *ira_dump_file;
408 
409 /* The number of elements in the following array.  */
410 int ira_spilled_reg_stack_slots_num;
411 
412 /* The following array contains info about spilled pseudo-registers
413    stack slots used in current function so far.  */
414 class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
415 
416 /* Correspondingly overall cost of the allocation, overall cost before
417    reload, cost of the allocnos assigned to hard-registers, cost of
418    the allocnos assigned to memory, cost of loads, stores and register
419    move insns generated for pseudo-register live range splitting (see
420    ira-emit.c).  */
421 int64_t ira_overall_cost, overall_cost_before;
422 int64_t ira_reg_cost, ira_mem_cost;
423 int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost;
424 int ira_move_loops_num, ira_additional_jumps_num;
425 
426 /* All registers that can be eliminated.  */
427 
428 HARD_REG_SET eliminable_regset;
429 
430 /* Value of max_reg_num () before IRA work start.  This value helps
431    us to recognize a situation when new pseudos were created during
432    IRA work.  */
433 static int max_regno_before_ira;
434 
435 /* Temporary hard reg set used for a different calculation.  */
436 static HARD_REG_SET temp_hard_regset;
437 
438 #define last_mode_for_init_move_cost \
439   (this_target_ira_int->x_last_mode_for_init_move_cost)
440 
441 
442 /* The function sets up the map IRA_REG_MODE_HARD_REGSET.  */
443 static void
setup_reg_mode_hard_regset(void)444 setup_reg_mode_hard_regset (void)
445 {
446   int i, m, hard_regno;
447 
448   for (m = 0; m < NUM_MACHINE_MODES; m++)
449     for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
450       {
451 	CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
452 	for (i = hard_regno_nregs (hard_regno, (machine_mode) m) - 1;
453 	     i >= 0; i--)
454 	  if (hard_regno + i < FIRST_PSEUDO_REGISTER)
455 	    SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
456 			      hard_regno + i);
457       }
458 }
459 
460 
461 #define no_unit_alloc_regs \
462   (this_target_ira_int->x_no_unit_alloc_regs)
463 
464 /* The function sets up the three arrays declared above.  */
465 static void
setup_class_hard_regs(void)466 setup_class_hard_regs (void)
467 {
468   int cl, i, hard_regno, n;
469   HARD_REG_SET processed_hard_reg_set;
470 
471   ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
472   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
473     {
474       temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
475       CLEAR_HARD_REG_SET (processed_hard_reg_set);
476       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
477 	{
478 	  ira_non_ordered_class_hard_regs[cl][i] = -1;
479 	  ira_class_hard_reg_index[cl][i] = -1;
480 	}
481       for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
482 	{
483 #ifdef REG_ALLOC_ORDER
484 	  hard_regno = reg_alloc_order[i];
485 #else
486 	  hard_regno = i;
487 #endif
488 	  if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
489 	    continue;
490 	  SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
491       	  if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
492 	    ira_class_hard_reg_index[cl][hard_regno] = -1;
493 	  else
494 	    {
495 	      ira_class_hard_reg_index[cl][hard_regno] = n;
496 	      ira_class_hard_regs[cl][n++] = hard_regno;
497 	    }
498 	}
499       ira_class_hard_regs_num[cl] = n;
500       for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
501 	if (TEST_HARD_REG_BIT (temp_hard_regset, i))
502 	  ira_non_ordered_class_hard_regs[cl][n++] = i;
503       ira_assert (ira_class_hard_regs_num[cl] == n);
504     }
505 }
506 
507 /* Set up global variables defining info about hard registers for the
508    allocation.  These depend on USE_HARD_FRAME_P whose TRUE value means
509    that we can use the hard frame pointer for the allocation.  */
510 static void
setup_alloc_regs(bool use_hard_frame_p)511 setup_alloc_regs (bool use_hard_frame_p)
512 {
513 #ifdef ADJUST_REG_ALLOC_ORDER
514   ADJUST_REG_ALLOC_ORDER;
515 #endif
516   no_unit_alloc_regs = fixed_nonglobal_reg_set;
517   if (! use_hard_frame_p)
518     add_to_hard_reg_set (&no_unit_alloc_regs, Pmode,
519 			 HARD_FRAME_POINTER_REGNUM);
520   setup_class_hard_regs ();
521 }
522 
523 
524 
525 #define alloc_reg_class_subclasses \
526   (this_target_ira_int->x_alloc_reg_class_subclasses)
527 
528 /* Initialize the table of subclasses of each reg class.  */
529 static void
setup_reg_subclasses(void)530 setup_reg_subclasses (void)
531 {
532   int i, j;
533   HARD_REG_SET temp_hard_regset2;
534 
535   for (i = 0; i < N_REG_CLASSES; i++)
536     for (j = 0; j < N_REG_CLASSES; j++)
537       alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES;
538 
539   for (i = 0; i < N_REG_CLASSES; i++)
540     {
541       if (i == (int) NO_REGS)
542 	continue;
543 
544       temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs;
545       if (hard_reg_set_empty_p (temp_hard_regset))
546 	continue;
547       for (j = 0; j < N_REG_CLASSES; j++)
548 	if (i != j)
549 	  {
550 	    enum reg_class *p;
551 
552 	    temp_hard_regset2 = reg_class_contents[j] & ~no_unit_alloc_regs;
553 	    if (! hard_reg_set_subset_p (temp_hard_regset,
554 					 temp_hard_regset2))
555 	      continue;
556 	    p = &alloc_reg_class_subclasses[j][0];
557 	    while (*p != LIM_REG_CLASSES) p++;
558 	    *p = (enum reg_class) i;
559 	  }
560     }
561 }
562 
563 
564 
565 /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST.  */
566 static void
setup_class_subset_and_memory_move_costs(void)567 setup_class_subset_and_memory_move_costs (void)
568 {
569   int cl, cl2, mode, cost;
570   HARD_REG_SET temp_hard_regset2;
571 
572   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
573     ira_memory_move_cost[mode][NO_REGS][0]
574       = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
575   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
576     {
577       if (cl != (int) NO_REGS)
578 	for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
579 	  {
580 	    ira_max_memory_move_cost[mode][cl][0]
581 	      = ira_memory_move_cost[mode][cl][0]
582 	      = memory_move_cost ((machine_mode) mode,
583 				  (reg_class_t) cl, false);
584 	    ira_max_memory_move_cost[mode][cl][1]
585 	      = ira_memory_move_cost[mode][cl][1]
586 	      = memory_move_cost ((machine_mode) mode,
587 				  (reg_class_t) cl, true);
588 	    /* Costs for NO_REGS are used in cost calculation on the
589 	       1st pass when the preferred register classes are not
590 	       known yet.  In this case we take the best scenario.  */
591 	    if (ira_memory_move_cost[mode][NO_REGS][0]
592 		> ira_memory_move_cost[mode][cl][0])
593 	      ira_max_memory_move_cost[mode][NO_REGS][0]
594 		= ira_memory_move_cost[mode][NO_REGS][0]
595 		= ira_memory_move_cost[mode][cl][0];
596 	    if (ira_memory_move_cost[mode][NO_REGS][1]
597 		> ira_memory_move_cost[mode][cl][1])
598 	      ira_max_memory_move_cost[mode][NO_REGS][1]
599 		= ira_memory_move_cost[mode][NO_REGS][1]
600 		= ira_memory_move_cost[mode][cl][1];
601 	  }
602     }
603   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
604     for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
605       {
606 	temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
607 	temp_hard_regset2 = reg_class_contents[cl2] & ~no_unit_alloc_regs;
608 	ira_class_subset_p[cl][cl2]
609 	  = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
610 	if (! hard_reg_set_empty_p (temp_hard_regset2)
611 	    && hard_reg_set_subset_p (reg_class_contents[cl2],
612 				      reg_class_contents[cl]))
613 	  for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
614 	    {
615 	      cost = ira_memory_move_cost[mode][cl2][0];
616 	      if (cost > ira_max_memory_move_cost[mode][cl][0])
617 		ira_max_memory_move_cost[mode][cl][0] = cost;
618 	      cost = ira_memory_move_cost[mode][cl2][1];
619 	      if (cost > ira_max_memory_move_cost[mode][cl][1])
620 		ira_max_memory_move_cost[mode][cl][1] = cost;
621 	    }
622       }
623   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
624     for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
625       {
626 	ira_memory_move_cost[mode][cl][0]
627 	  = ira_max_memory_move_cost[mode][cl][0];
628 	ira_memory_move_cost[mode][cl][1]
629 	  = ira_max_memory_move_cost[mode][cl][1];
630       }
631   setup_reg_subclasses ();
632 }
633 
634 
635 
636 /* Define the following macro if allocation through malloc if
637    preferable.  */
638 #define IRA_NO_OBSTACK
639 
640 #ifndef IRA_NO_OBSTACK
641 /* Obstack used for storing all dynamic data (except bitmaps) of the
642    IRA.  */
643 static struct obstack ira_obstack;
644 #endif
645 
646 /* Obstack used for storing all bitmaps of the IRA.  */
647 static struct bitmap_obstack ira_bitmap_obstack;
648 
649 /* Allocate memory of size LEN for IRA data.  */
650 void *
ira_allocate(size_t len)651 ira_allocate (size_t len)
652 {
653   void *res;
654 
655 #ifndef IRA_NO_OBSTACK
656   res = obstack_alloc (&ira_obstack, len);
657 #else
658   res = xmalloc (len);
659 #endif
660   return res;
661 }
662 
663 /* Free memory ADDR allocated for IRA data.  */
664 void
ira_free(void * addr ATTRIBUTE_UNUSED)665 ira_free (void *addr ATTRIBUTE_UNUSED)
666 {
667 #ifndef IRA_NO_OBSTACK
668   /* do nothing */
669 #else
670   free (addr);
671 #endif
672 }
673 
674 
675 /* Allocate and returns bitmap for IRA.  */
676 bitmap
ira_allocate_bitmap(void)677 ira_allocate_bitmap (void)
678 {
679   return BITMAP_ALLOC (&ira_bitmap_obstack);
680 }
681 
682 /* Free bitmap B allocated for IRA.  */
683 void
ira_free_bitmap(bitmap b ATTRIBUTE_UNUSED)684 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
685 {
686   /* do nothing */
687 }
688 
689 
690 
691 /* Output information about allocation of all allocnos (except for
692    caps) into file F.  */
693 void
ira_print_disposition(FILE * f)694 ira_print_disposition (FILE *f)
695 {
696   int i, n, max_regno;
697   ira_allocno_t a;
698   basic_block bb;
699 
700   fprintf (f, "Disposition:");
701   max_regno = max_reg_num ();
702   for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
703     for (a = ira_regno_allocno_map[i];
704 	 a != NULL;
705 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
706       {
707 	if (n % 4 == 0)
708 	  fprintf (f, "\n");
709 	n++;
710 	fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
711 	if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
712 	  fprintf (f, "b%-3d", bb->index);
713 	else
714 	  fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
715 	if (ALLOCNO_HARD_REGNO (a) >= 0)
716 	  fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
717 	else
718 	  fprintf (f, " mem");
719       }
720   fprintf (f, "\n");
721 }
722 
723 /* Outputs information about allocation of all allocnos into
724    stderr.  */
725 void
ira_debug_disposition(void)726 ira_debug_disposition (void)
727 {
728   ira_print_disposition (stderr);
729 }
730 
731 
732 
733 /* Set up ira_stack_reg_pressure_class which is the biggest pressure
734    register class containing stack registers or NO_REGS if there are
735    no stack registers.  To find this class, we iterate through all
736    register pressure classes and choose the first register pressure
737    class containing all the stack registers and having the biggest
738    size.  */
739 static void
setup_stack_reg_pressure_class(void)740 setup_stack_reg_pressure_class (void)
741 {
742   ira_stack_reg_pressure_class = NO_REGS;
743 #ifdef STACK_REGS
744   {
745     int i, best, size;
746     enum reg_class cl;
747     HARD_REG_SET temp_hard_regset2;
748 
749     CLEAR_HARD_REG_SET (temp_hard_regset);
750     for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
751       SET_HARD_REG_BIT (temp_hard_regset, i);
752     best = 0;
753     for (i = 0; i < ira_pressure_classes_num; i++)
754       {
755 	cl = ira_pressure_classes[i];
756 	temp_hard_regset2 = temp_hard_regset & reg_class_contents[cl];
757 	size = hard_reg_set_size (temp_hard_regset2);
758 	if (best < size)
759 	  {
760 	    best = size;
761 	    ira_stack_reg_pressure_class = cl;
762 	  }
763       }
764   }
765 #endif
766 }
767 
768 /* Find pressure classes which are register classes for which we
769    calculate register pressure in IRA, register pressure sensitive
770    insn scheduling, and register pressure sensitive loop invariant
771    motion.
772 
773    To make register pressure calculation easy, we always use
774    non-intersected register pressure classes.  A move of hard
775    registers from one register pressure class is not more expensive
776    than load and store of the hard registers.  Most likely an allocno
777    class will be a subset of a register pressure class and in many
778    cases a register pressure class.  That makes usage of register
779    pressure classes a good approximation to find a high register
780    pressure.  */
781 static void
setup_pressure_classes(void)782 setup_pressure_classes (void)
783 {
784   int cost, i, n, curr;
785   int cl, cl2;
786   enum reg_class pressure_classes[N_REG_CLASSES];
787   int m;
788   HARD_REG_SET temp_hard_regset2;
789   bool insert_p;
790 
791   if (targetm.compute_pressure_classes)
792     n = targetm.compute_pressure_classes (pressure_classes);
793   else
794     {
795       n = 0;
796       for (cl = 0; cl < N_REG_CLASSES; cl++)
797 	{
798 	  if (ira_class_hard_regs_num[cl] == 0)
799 	    continue;
800 	  if (ira_class_hard_regs_num[cl] != 1
801 	      /* A register class without subclasses may contain a few
802 		 hard registers and movement between them is costly
803 		 (e.g. SPARC FPCC registers).  We still should consider it
804 		 as a candidate for a pressure class.  */
805 	      && alloc_reg_class_subclasses[cl][0] < cl)
806 	    {
807 	      /* Check that the moves between any hard registers of the
808 		 current class are not more expensive for a legal mode
809 		 than load/store of the hard registers of the current
810 		 class.  Such class is a potential candidate to be a
811 		 register pressure class.  */
812 	      for (m = 0; m < NUM_MACHINE_MODES; m++)
813 		{
814 		  temp_hard_regset
815 		    = (reg_class_contents[cl]
816 		       & ~(no_unit_alloc_regs
817 			   | ira_prohibited_class_mode_regs[cl][m]));
818 		  if (hard_reg_set_empty_p (temp_hard_regset))
819 		    continue;
820 		  ira_init_register_move_cost_if_necessary ((machine_mode) m);
821 		  cost = ira_register_move_cost[m][cl][cl];
822 		  if (cost <= ira_max_memory_move_cost[m][cl][1]
823 		      || cost <= ira_max_memory_move_cost[m][cl][0])
824 		    break;
825 		}
826 	      if (m >= NUM_MACHINE_MODES)
827 		continue;
828 	    }
829 	  curr = 0;
830 	  insert_p = true;
831 	  temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
832 	  /* Remove so far added pressure classes which are subset of the
833 	     current candidate class.  Prefer GENERAL_REGS as a pressure
834 	     register class to another class containing the same
835 	     allocatable hard registers.  We do this because machine
836 	     dependent cost hooks might give wrong costs for the latter
837 	     class but always give the right cost for the former class
838 	     (GENERAL_REGS).  */
839 	  for (i = 0; i < n; i++)
840 	    {
841 	      cl2 = pressure_classes[i];
842 	      temp_hard_regset2 = (reg_class_contents[cl2]
843 				   & ~no_unit_alloc_regs);
844 	      if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
845 		  && (temp_hard_regset != temp_hard_regset2
846 		      || cl2 == (int) GENERAL_REGS))
847 		{
848 		  pressure_classes[curr++] = (enum reg_class) cl2;
849 		  insert_p = false;
850 		  continue;
851 		}
852 	      if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)
853 		  && (temp_hard_regset2 != temp_hard_regset
854 		      || cl == (int) GENERAL_REGS))
855 		continue;
856 	      if (temp_hard_regset2 == temp_hard_regset)
857 		insert_p = false;
858 	      pressure_classes[curr++] = (enum reg_class) cl2;
859 	    }
860 	  /* If the current candidate is a subset of a so far added
861 	     pressure class, don't add it to the list of the pressure
862 	     classes.  */
863 	  if (insert_p)
864 	    pressure_classes[curr++] = (enum reg_class) cl;
865 	  n = curr;
866 	}
867     }
868 #ifdef ENABLE_IRA_CHECKING
869   {
870     HARD_REG_SET ignore_hard_regs;
871 
872     /* Check pressure classes correctness: here we check that hard
873        registers from all register pressure classes contains all hard
874        registers available for the allocation.  */
875     CLEAR_HARD_REG_SET (temp_hard_regset);
876     CLEAR_HARD_REG_SET (temp_hard_regset2);
877     ignore_hard_regs = no_unit_alloc_regs;
878     for (cl = 0; cl < LIM_REG_CLASSES; cl++)
879       {
880 	/* For some targets (like MIPS with MD_REGS), there are some
881 	   classes with hard registers available for allocation but
882 	   not able to hold value of any mode.  */
883 	for (m = 0; m < NUM_MACHINE_MODES; m++)
884 	  if (contains_reg_of_mode[cl][m])
885 	    break;
886 	if (m >= NUM_MACHINE_MODES)
887 	  {
888 	    ignore_hard_regs |= reg_class_contents[cl];
889 	    continue;
890 	  }
891 	for (i = 0; i < n; i++)
892 	  if ((int) pressure_classes[i] == cl)
893 	    break;
894 	temp_hard_regset2 |= reg_class_contents[cl];
895 	if (i < n)
896 	  temp_hard_regset |= reg_class_contents[cl];
897       }
898     for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
899       /* Some targets (like SPARC with ICC reg) have allocatable regs
900 	 for which no reg class is defined.  */
901       if (REGNO_REG_CLASS (i) == NO_REGS)
902 	SET_HARD_REG_BIT (ignore_hard_regs, i);
903     temp_hard_regset &= ~ignore_hard_regs;
904     temp_hard_regset2 &= ~ignore_hard_regs;
905     ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset));
906   }
907 #endif
908   ira_pressure_classes_num = 0;
909   for (i = 0; i < n; i++)
910     {
911       cl = (int) pressure_classes[i];
912       ira_reg_pressure_class_p[cl] = true;
913       ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl;
914     }
915   setup_stack_reg_pressure_class ();
916 }
917 
918 /* Set up IRA_UNIFORM_CLASS_P.  Uniform class is a register class
919    whose register move cost between any registers of the class is the
920    same as for all its subclasses.  We use the data to speed up the
921    2nd pass of calculations of allocno costs.  */
922 static void
setup_uniform_class_p(void)923 setup_uniform_class_p (void)
924 {
925   int i, cl, cl2, m;
926 
927   for (cl = 0; cl < N_REG_CLASSES; cl++)
928     {
929       ira_uniform_class_p[cl] = false;
930       if (ira_class_hard_regs_num[cl] == 0)
931 	continue;
932       /* We cannot use alloc_reg_class_subclasses here because move
933 	 cost hooks does not take into account that some registers are
934 	 unavailable for the subtarget.  E.g. for i686, INT_SSE_REGS
935 	 is element of alloc_reg_class_subclasses for GENERAL_REGS
936 	 because SSE regs are unavailable.  */
937       for (i = 0; (cl2 = reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; i++)
938 	{
939 	  if (ira_class_hard_regs_num[cl2] == 0)
940 	    continue;
941       	  for (m = 0; m < NUM_MACHINE_MODES; m++)
942 	    if (contains_reg_of_mode[cl][m] && contains_reg_of_mode[cl2][m])
943 	      {
944 		ira_init_register_move_cost_if_necessary ((machine_mode) m);
945 		if (ira_register_move_cost[m][cl][cl]
946 		    != ira_register_move_cost[m][cl2][cl2])
947 		  break;
948 	      }
949 	  if (m < NUM_MACHINE_MODES)
950 	    break;
951 	}
952       if (cl2 == LIM_REG_CLASSES)
953 	ira_uniform_class_p[cl] = true;
954     }
955 }
956 
957 /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM,
958    IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM.
959 
960    Target may have many subtargets and not all target hard registers can
961    be used for allocation, e.g. x86 port in 32-bit mode cannot use
962    hard registers introduced in x86-64 like r8-r15).  Some classes
963    might have the same allocatable hard registers, e.g.  INDEX_REGS
964    and GENERAL_REGS in x86 port in 32-bit mode.  To decrease different
965    calculations efforts we introduce allocno classes which contain
966    unique non-empty sets of allocatable hard-registers.
967 
968    Pseudo class cost calculation in ira-costs.c is very expensive.
969    Therefore we are trying to decrease number of classes involved in
970    such calculation.  Register classes used in the cost calculation
971    are called important classes.  They are allocno classes and other
972    non-empty classes whose allocatable hard register sets are inside
973    of an allocno class hard register set.  From the first sight, it
974    looks like that they are just allocno classes.  It is not true.  In
975    example of x86-port in 32-bit mode, allocno classes will contain
976    GENERAL_REGS but not LEGACY_REGS (because allocatable hard
977    registers are the same for the both classes).  The important
978    classes will contain GENERAL_REGS and LEGACY_REGS.  It is done
979    because a machine description insn constraint may refers for
980    LEGACY_REGS and code in ira-costs.c is mostly base on investigation
981    of the insn constraints.  */
982 static void
setup_allocno_and_important_classes(void)983 setup_allocno_and_important_classes (void)
984 {
985   int i, j, n, cl;
986   bool set_p;
987   HARD_REG_SET temp_hard_regset2;
988   static enum reg_class classes[LIM_REG_CLASSES + 1];
989 
990   n = 0;
991   /* Collect classes which contain unique sets of allocatable hard
992      registers.  Prefer GENERAL_REGS to other classes containing the
993      same set of hard registers.  */
994   for (i = 0; i < LIM_REG_CLASSES; i++)
995     {
996       temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs;
997       for (j = 0; j < n; j++)
998 	{
999 	  cl = classes[j];
1000 	  temp_hard_regset2 = reg_class_contents[cl] & ~no_unit_alloc_regs;
1001 	  if (temp_hard_regset == temp_hard_regset2)
1002 	    break;
1003 	}
1004       if (j >= n || targetm.additional_allocno_class_p (i))
1005 	classes[n++] = (enum reg_class) i;
1006       else if (i == GENERAL_REGS)
1007 	/* Prefer general regs.  For i386 example, it means that
1008 	   we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS
1009 	   (all of them consists of the same available hard
1010 	   registers).  */
1011 	classes[j] = (enum reg_class) i;
1012     }
1013   classes[n] = LIM_REG_CLASSES;
1014 
1015   /* Set up classes which can be used for allocnos as classes
1016      containing non-empty unique sets of allocatable hard
1017      registers.  */
1018   ira_allocno_classes_num = 0;
1019   for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
1020     if (ira_class_hard_regs_num[cl] > 0)
1021       ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl;
1022   ira_important_classes_num = 0;
1023   /* Add non-allocno classes containing to non-empty set of
1024      allocatable hard regs.  */
1025   for (cl = 0; cl < N_REG_CLASSES; cl++)
1026     if (ira_class_hard_regs_num[cl] > 0)
1027       {
1028 	temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
1029 	set_p = false;
1030 	for (j = 0; j < ira_allocno_classes_num; j++)
1031 	  {
1032 	    temp_hard_regset2 = (reg_class_contents[ira_allocno_classes[j]]
1033 				 & ~no_unit_alloc_regs);
1034 	    if ((enum reg_class) cl == ira_allocno_classes[j])
1035 	      break;
1036 	    else if (hard_reg_set_subset_p (temp_hard_regset,
1037 					    temp_hard_regset2))
1038 	      set_p = true;
1039 	  }
1040 	if (set_p && j >= ira_allocno_classes_num)
1041 	  ira_important_classes[ira_important_classes_num++]
1042 	    = (enum reg_class) cl;
1043       }
1044   /* Now add allocno classes to the important classes.  */
1045   for (j = 0; j < ira_allocno_classes_num; j++)
1046     ira_important_classes[ira_important_classes_num++]
1047       = ira_allocno_classes[j];
1048   for (cl = 0; cl < N_REG_CLASSES; cl++)
1049     {
1050       ira_reg_allocno_class_p[cl] = false;
1051       ira_reg_pressure_class_p[cl] = false;
1052     }
1053   for (j = 0; j < ira_allocno_classes_num; j++)
1054     ira_reg_allocno_class_p[ira_allocno_classes[j]] = true;
1055   setup_pressure_classes ();
1056   setup_uniform_class_p ();
1057 }
1058 
1059 /* Setup translation in CLASS_TRANSLATE of all classes into a class
1060    given by array CLASSES of length CLASSES_NUM.  The function is used
1061    make translation any reg class to an allocno class or to an
1062    pressure class.  This translation is necessary for some
1063    calculations when we can use only allocno or pressure classes and
1064    such translation represents an approximate representation of all
1065    classes.
1066 
1067    The translation in case when allocatable hard register set of a
1068    given class is subset of allocatable hard register set of a class
1069    in CLASSES is pretty simple.  We use smallest classes from CLASSES
1070    containing a given class.  If allocatable hard register set of a
1071    given class is not a subset of any corresponding set of a class
1072    from CLASSES, we use the cheapest (with load/store point of view)
1073    class from CLASSES whose set intersects with given class set.  */
1074 static void
setup_class_translate_array(enum reg_class * class_translate,int classes_num,enum reg_class * classes)1075 setup_class_translate_array (enum reg_class *class_translate,
1076 			     int classes_num, enum reg_class *classes)
1077 {
1078   int cl, mode;
1079   enum reg_class aclass, best_class, *cl_ptr;
1080   int i, cost, min_cost, best_cost;
1081 
1082   for (cl = 0; cl < N_REG_CLASSES; cl++)
1083     class_translate[cl] = NO_REGS;
1084 
1085   for (i = 0; i < classes_num; i++)
1086     {
1087       aclass = classes[i];
1088       for (cl_ptr = &alloc_reg_class_subclasses[aclass][0];
1089 	   (cl = *cl_ptr) != LIM_REG_CLASSES;
1090 	   cl_ptr++)
1091 	if (class_translate[cl] == NO_REGS)
1092 	  class_translate[cl] = aclass;
1093       class_translate[aclass] = aclass;
1094     }
1095   /* For classes which are not fully covered by one of given classes
1096      (in other words covered by more one given class), use the
1097      cheapest class.  */
1098   for (cl = 0; cl < N_REG_CLASSES; cl++)
1099     {
1100       if (cl == NO_REGS || class_translate[cl] != NO_REGS)
1101 	continue;
1102       best_class = NO_REGS;
1103       best_cost = INT_MAX;
1104       for (i = 0; i < classes_num; i++)
1105 	{
1106 	  aclass = classes[i];
1107 	  temp_hard_regset = (reg_class_contents[aclass]
1108 			      & reg_class_contents[cl]
1109 			      & ~no_unit_alloc_regs);
1110 	  if (! hard_reg_set_empty_p (temp_hard_regset))
1111 	    {
1112 	      min_cost = INT_MAX;
1113 	      for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1114 		{
1115 		  cost = (ira_memory_move_cost[mode][aclass][0]
1116 			  + ira_memory_move_cost[mode][aclass][1]);
1117 		  if (min_cost > cost)
1118 		    min_cost = cost;
1119 		}
1120 	      if (best_class == NO_REGS || best_cost > min_cost)
1121 		{
1122 		  best_class = aclass;
1123 		  best_cost = min_cost;
1124 		}
1125 	    }
1126 	}
1127       class_translate[cl] = best_class;
1128     }
1129 }
1130 
1131 /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and
1132    IRA_PRESSURE_CLASS_TRANSLATE.  */
1133 static void
setup_class_translate(void)1134 setup_class_translate (void)
1135 {
1136   setup_class_translate_array (ira_allocno_class_translate,
1137 			       ira_allocno_classes_num, ira_allocno_classes);
1138   setup_class_translate_array (ira_pressure_class_translate,
1139 			       ira_pressure_classes_num, ira_pressure_classes);
1140 }
1141 
1142 /* Order numbers of allocno classes in original target allocno class
1143    array, -1 for non-allocno classes.  */
1144 static int allocno_class_order[N_REG_CLASSES];
1145 
1146 /* The function used to sort the important classes.  */
1147 static int
comp_reg_classes_func(const void * v1p,const void * v2p)1148 comp_reg_classes_func (const void *v1p, const void *v2p)
1149 {
1150   enum reg_class cl1 = *(const enum reg_class *) v1p;
1151   enum reg_class cl2 = *(const enum reg_class *) v2p;
1152   enum reg_class tcl1, tcl2;
1153   int diff;
1154 
1155   tcl1 = ira_allocno_class_translate[cl1];
1156   tcl2 = ira_allocno_class_translate[cl2];
1157   if (tcl1 != NO_REGS && tcl2 != NO_REGS
1158       && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0)
1159     return diff;
1160   return (int) cl1 - (int) cl2;
1161 }
1162 
1163 /* For correct work of function setup_reg_class_relation we need to
1164    reorder important classes according to the order of their allocno
1165    classes.  It places important classes containing the same
1166    allocatable hard register set adjacent to each other and allocno
1167    class with the allocatable hard register set right after the other
1168    important classes with the same set.
1169 
1170    In example from comments of function
1171    setup_allocno_and_important_classes, it places LEGACY_REGS and
1172    GENERAL_REGS close to each other and GENERAL_REGS is after
1173    LEGACY_REGS.  */
1174 static void
reorder_important_classes(void)1175 reorder_important_classes (void)
1176 {
1177   int i;
1178 
1179   for (i = 0; i < N_REG_CLASSES; i++)
1180     allocno_class_order[i] = -1;
1181   for (i = 0; i < ira_allocno_classes_num; i++)
1182     allocno_class_order[ira_allocno_classes[i]] = i;
1183   qsort (ira_important_classes, ira_important_classes_num,
1184 	 sizeof (enum reg_class), comp_reg_classes_func);
1185   for (i = 0; i < ira_important_classes_num; i++)
1186     ira_important_class_nums[ira_important_classes[i]] = i;
1187 }
1188 
1189 /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION,
1190    IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and
1191    IRA_REG_CLASSES_INTERSECT_P.  For the meaning of the relations,
1192    please see corresponding comments in ira-int.h.  */
1193 static void
setup_reg_class_relations(void)1194 setup_reg_class_relations (void)
1195 {
1196   int i, cl1, cl2, cl3;
1197   HARD_REG_SET intersection_set, union_set, temp_set2;
1198   bool important_class_p[N_REG_CLASSES];
1199 
1200   memset (important_class_p, 0, sizeof (important_class_p));
1201   for (i = 0; i < ira_important_classes_num; i++)
1202     important_class_p[ira_important_classes[i]] = true;
1203   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1204     {
1205       ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES;
1206       for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1207 	{
1208 	  ira_reg_classes_intersect_p[cl1][cl2] = false;
1209 	  ira_reg_class_intersect[cl1][cl2] = NO_REGS;
1210 	  ira_reg_class_subset[cl1][cl2] = NO_REGS;
1211 	  temp_hard_regset = reg_class_contents[cl1] & ~no_unit_alloc_regs;
1212 	  temp_set2 = reg_class_contents[cl2] & ~no_unit_alloc_regs;
1213 	  if (hard_reg_set_empty_p (temp_hard_regset)
1214 	      && hard_reg_set_empty_p (temp_set2))
1215 	    {
1216 	      /* The both classes have no allocatable hard registers
1217 		 -- take all class hard registers into account and use
1218 		 reg_class_subunion and reg_class_superunion.  */
1219 	      for (i = 0;; i++)
1220 		{
1221 		  cl3 = reg_class_subclasses[cl1][i];
1222 		  if (cl3 == LIM_REG_CLASSES)
1223 		    break;
1224 		  if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
1225 					  (enum reg_class) cl3))
1226 		    ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1227 		}
1228 	      ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2];
1229 	      ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2];
1230 	      continue;
1231 	    }
1232 	  ira_reg_classes_intersect_p[cl1][cl2]
1233 	    = hard_reg_set_intersect_p (temp_hard_regset, temp_set2);
1234 	  if (important_class_p[cl1] && important_class_p[cl2]
1235 	      && hard_reg_set_subset_p (temp_hard_regset, temp_set2))
1236 	    {
1237 	      /* CL1 and CL2 are important classes and CL1 allocatable
1238 		 hard register set is inside of CL2 allocatable hard
1239 		 registers -- make CL1 a superset of CL2.  */
1240 	      enum reg_class *p;
1241 
1242 	      p = &ira_reg_class_super_classes[cl1][0];
1243 	      while (*p != LIM_REG_CLASSES)
1244 		p++;
1245 	      *p++ = (enum reg_class) cl2;
1246 	      *p = LIM_REG_CLASSES;
1247 	    }
1248 	  ira_reg_class_subunion[cl1][cl2] = NO_REGS;
1249 	  ira_reg_class_superunion[cl1][cl2] = NO_REGS;
1250 	  intersection_set = (reg_class_contents[cl1]
1251 			      & reg_class_contents[cl2]
1252 			      & ~no_unit_alloc_regs);
1253 	  union_set = ((reg_class_contents[cl1] | reg_class_contents[cl2])
1254 		       & ~no_unit_alloc_regs);
1255 	  for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
1256 	    {
1257 	      temp_hard_regset = reg_class_contents[cl3] & ~no_unit_alloc_regs;
1258 	      if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
1259 		{
1260 		  /* CL3 allocatable hard register set is inside of
1261 		     intersection of allocatable hard register sets
1262 		     of CL1 and CL2.  */
1263 		  if (important_class_p[cl3])
1264 		    {
1265 		      temp_set2
1266 			= (reg_class_contents
1267 			   [ira_reg_class_intersect[cl1][cl2]]);
1268 		      temp_set2 &= ~no_unit_alloc_regs;
1269 		      if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1270 			  /* If the allocatable hard register sets are
1271 			     the same, prefer GENERAL_REGS or the
1272 			     smallest class for debugging
1273 			     purposes.  */
1274 			  || (temp_hard_regset == temp_set2
1275 			      && (cl3 == GENERAL_REGS
1276 				  || ((ira_reg_class_intersect[cl1][cl2]
1277 				       != GENERAL_REGS)
1278 				      && hard_reg_set_subset_p
1279 				         (reg_class_contents[cl3],
1280 					  reg_class_contents
1281 					  [(int)
1282 					   ira_reg_class_intersect[cl1][cl2]])))))
1283 			ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1284 		    }
1285 		  temp_set2
1286 		    = (reg_class_contents[ira_reg_class_subset[cl1][cl2]]
1287 		       & ~no_unit_alloc_regs);
1288 		  if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1289 		      /* Ignore unavailable hard registers and prefer
1290 			 smallest class for debugging purposes.  */
1291 		      || (temp_hard_regset == temp_set2
1292 			  && hard_reg_set_subset_p
1293 			     (reg_class_contents[cl3],
1294 			      reg_class_contents
1295 			      [(int) ira_reg_class_subset[cl1][cl2]])))
1296 		    ira_reg_class_subset[cl1][cl2] = (enum reg_class) cl3;
1297 		}
1298 	      if (important_class_p[cl3]
1299 		  && hard_reg_set_subset_p (temp_hard_regset, union_set))
1300 		{
1301 		  /* CL3 allocatable hard register set is inside of
1302 		     union of allocatable hard register sets of CL1
1303 		     and CL2.  */
1304 		  temp_set2
1305 		    = (reg_class_contents[ira_reg_class_subunion[cl1][cl2]]
1306 		       & ~no_unit_alloc_regs);
1307 	 	  if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
1308 		      || (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
1309 
1310 			  && (temp_set2 != temp_hard_regset
1311 			      || cl3 == GENERAL_REGS
1312 			      /* If the allocatable hard register sets are the
1313 				 same, prefer GENERAL_REGS or the smallest
1314 				 class for debugging purposes.  */
1315 			      || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS
1316 				  && hard_reg_set_subset_p
1317 				     (reg_class_contents[cl3],
1318 				      reg_class_contents
1319 				      [(int) ira_reg_class_subunion[cl1][cl2]])))))
1320 		    ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3;
1321 		}
1322 	      if (hard_reg_set_subset_p (union_set, temp_hard_regset))
1323 		{
1324 		  /* CL3 allocatable hard register set contains union
1325 		     of allocatable hard register sets of CL1 and
1326 		     CL2.  */
1327 		  temp_set2
1328 		    = (reg_class_contents[ira_reg_class_superunion[cl1][cl2]]
1329 		       & ~no_unit_alloc_regs);
1330 	 	  if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
1331 		      || (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1332 
1333 			  && (temp_set2 != temp_hard_regset
1334 			      || cl3 == GENERAL_REGS
1335 			      /* If the allocatable hard register sets are the
1336 				 same, prefer GENERAL_REGS or the smallest
1337 				 class for debugging purposes.  */
1338 			      || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS
1339 				  && hard_reg_set_subset_p
1340 				     (reg_class_contents[cl3],
1341 				      reg_class_contents
1342 				      [(int) ira_reg_class_superunion[cl1][cl2]])))))
1343 		    ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3;
1344 		}
1345 	    }
1346 	}
1347     }
1348 }
1349 
1350 /* Output all uniform and important classes into file F.  */
1351 static void
print_uniform_and_important_classes(FILE * f)1352 print_uniform_and_important_classes (FILE *f)
1353 {
1354   int i, cl;
1355 
1356   fprintf (f, "Uniform classes:\n");
1357   for (cl = 0; cl < N_REG_CLASSES; cl++)
1358     if (ira_uniform_class_p[cl])
1359       fprintf (f, " %s", reg_class_names[cl]);
1360   fprintf (f, "\nImportant classes:\n");
1361   for (i = 0; i < ira_important_classes_num; i++)
1362     fprintf (f, " %s", reg_class_names[ira_important_classes[i]]);
1363   fprintf (f, "\n");
1364 }
1365 
1366 /* Output all possible allocno or pressure classes and their
1367    translation map into file F.  */
1368 static void
print_translated_classes(FILE * f,bool pressure_p)1369 print_translated_classes (FILE *f, bool pressure_p)
1370 {
1371   int classes_num = (pressure_p
1372 		     ? ira_pressure_classes_num : ira_allocno_classes_num);
1373   enum reg_class *classes = (pressure_p
1374 			     ? ira_pressure_classes : ira_allocno_classes);
1375   enum reg_class *class_translate = (pressure_p
1376 				     ? ira_pressure_class_translate
1377 				     : ira_allocno_class_translate);
1378   int i;
1379 
1380   fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno");
1381   for (i = 0; i < classes_num; i++)
1382     fprintf (f, " %s", reg_class_names[classes[i]]);
1383   fprintf (f, "\nClass translation:\n");
1384   for (i = 0; i < N_REG_CLASSES; i++)
1385     fprintf (f, " %s -> %s\n", reg_class_names[i],
1386 	     reg_class_names[class_translate[i]]);
1387 }
1388 
1389 /* Output all possible allocno and translation classes and the
1390    translation maps into stderr.  */
1391 void
ira_debug_allocno_classes(void)1392 ira_debug_allocno_classes (void)
1393 {
1394   print_uniform_and_important_classes (stderr);
1395   print_translated_classes (stderr, false);
1396   print_translated_classes (stderr, true);
1397 }
1398 
1399 /* Set up different arrays concerning class subsets, allocno and
1400    important classes.  */
1401 static void
find_reg_classes(void)1402 find_reg_classes (void)
1403 {
1404   setup_allocno_and_important_classes ();
1405   setup_class_translate ();
1406   reorder_important_classes ();
1407   setup_reg_class_relations ();
1408 }
1409 
1410 
1411 
1412 /* Set up the array above.  */
1413 static void
setup_hard_regno_aclass(void)1414 setup_hard_regno_aclass (void)
1415 {
1416   int i;
1417 
1418   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1419     {
1420 #if 1
1421       ira_hard_regno_allocno_class[i]
1422 	= (TEST_HARD_REG_BIT (no_unit_alloc_regs, i)
1423 	   ? NO_REGS
1424 	   : ira_allocno_class_translate[REGNO_REG_CLASS (i)]);
1425 #else
1426       int j;
1427       enum reg_class cl;
1428       ira_hard_regno_allocno_class[i] = NO_REGS;
1429       for (j = 0; j < ira_allocno_classes_num; j++)
1430  	{
1431 	  cl = ira_allocno_classes[j];
1432  	  if (ira_class_hard_reg_index[cl][i] >= 0)
1433  	    {
1434 	      ira_hard_regno_allocno_class[i] = cl;
1435  	      break;
1436  	    }
1437  	}
1438 #endif
1439     }
1440 }
1441 
1442 
1443 
1444 /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps.  */
1445 static void
setup_reg_class_nregs(void)1446 setup_reg_class_nregs (void)
1447 {
1448   int i, cl, cl2, m;
1449 
1450   for (m = 0; m < MAX_MACHINE_MODE; m++)
1451     {
1452       for (cl = 0; cl < N_REG_CLASSES; cl++)
1453 	ira_reg_class_max_nregs[cl][m]
1454 	  = ira_reg_class_min_nregs[cl][m]
1455 	  = targetm.class_max_nregs ((reg_class_t) cl, (machine_mode) m);
1456       for (cl = 0; cl < N_REG_CLASSES; cl++)
1457 	for (i = 0;
1458 	     (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES;
1459 	     i++)
1460 	  if (ira_reg_class_min_nregs[cl2][m]
1461 	      < ira_reg_class_min_nregs[cl][m])
1462 	    ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m];
1463     }
1464 }
1465 
1466 
1467 
1468 /* Set up IRA_PROHIBITED_CLASS_MODE_REGS and IRA_CLASS_SINGLETON.
1469    This function is called once IRA_CLASS_HARD_REGS has been initialized.  */
1470 static void
setup_prohibited_class_mode_regs(void)1471 setup_prohibited_class_mode_regs (void)
1472 {
1473   int j, k, hard_regno, cl, last_hard_regno, count;
1474 
1475   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1476     {
1477       temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
1478       for (j = 0; j < NUM_MACHINE_MODES; j++)
1479 	{
1480 	  count = 0;
1481 	  last_hard_regno = -1;
1482 	  CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]);
1483 	  for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1484 	    {
1485 	      hard_regno = ira_class_hard_regs[cl][k];
1486 	      if (!targetm.hard_regno_mode_ok (hard_regno, (machine_mode) j))
1487 		SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1488 				  hard_regno);
1489 	      else if (in_hard_reg_set_p (temp_hard_regset,
1490 					  (machine_mode) j, hard_regno))
1491 		{
1492 		  last_hard_regno = hard_regno;
1493 		  count++;
1494 		}
1495 	    }
1496 	  ira_class_singleton[cl][j] = (count == 1 ? last_hard_regno : -1);
1497 	}
1498     }
1499 }
1500 
1501 /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers
1502    spanning from one register pressure class to another one.  It is
1503    called after defining the pressure classes.  */
1504 static void
clarify_prohibited_class_mode_regs(void)1505 clarify_prohibited_class_mode_regs (void)
1506 {
1507   int j, k, hard_regno, cl, pclass, nregs;
1508 
1509   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1510     for (j = 0; j < NUM_MACHINE_MODES; j++)
1511       {
1512 	CLEAR_HARD_REG_SET (ira_useful_class_mode_regs[cl][j]);
1513 	for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1514 	  {
1515 	    hard_regno = ira_class_hard_regs[cl][k];
1516 	    if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno))
1517 	      continue;
1518 	    nregs = hard_regno_nregs (hard_regno, (machine_mode) j);
1519 	    if (hard_regno + nregs > FIRST_PSEUDO_REGISTER)
1520 	      {
1521 		SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1522 				  hard_regno);
1523 		 continue;
1524 	      }
1525 	    pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1526 	    for (nregs-- ;nregs >= 0; nregs--)
1527 	      if (((enum reg_class) pclass
1528 		   != ira_pressure_class_translate[REGNO_REG_CLASS
1529 						   (hard_regno + nregs)]))
1530 		{
1531 		  SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1532 				    hard_regno);
1533 		  break;
1534 		}
1535 	    if (!TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1536 				    hard_regno))
1537 	      add_to_hard_reg_set (&ira_useful_class_mode_regs[cl][j],
1538 				   (machine_mode) j, hard_regno);
1539 	  }
1540       }
1541 }
1542 
1543 /* Allocate and initialize IRA_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST
1544    and IRA_MAY_MOVE_OUT_COST for MODE.  */
1545 void
ira_init_register_move_cost(machine_mode mode)1546 ira_init_register_move_cost (machine_mode mode)
1547 {
1548   static unsigned short last_move_cost[N_REG_CLASSES][N_REG_CLASSES];
1549   bool all_match = true;
1550   unsigned int i, cl1, cl2;
1551   HARD_REG_SET ok_regs;
1552 
1553   ira_assert (ira_register_move_cost[mode] == NULL
1554 	      && ira_may_move_in_cost[mode] == NULL
1555 	      && ira_may_move_out_cost[mode] == NULL);
1556   CLEAR_HARD_REG_SET (ok_regs);
1557   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1558     if (targetm.hard_regno_mode_ok (i, mode))
1559       SET_HARD_REG_BIT (ok_regs, i);
1560 
1561   /* Note that we might be asked about the move costs of modes that
1562      cannot be stored in any hard register, for example if an inline
1563      asm tries to create a register operand with an impossible mode.
1564      We therefore can't assert have_regs_of_mode[mode] here.  */
1565   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1566     for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1567       {
1568 	int cost;
1569 	if (!hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl1])
1570 	    || !hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl2]))
1571 	  {
1572 	    if ((ira_reg_class_max_nregs[cl1][mode]
1573 		 > ira_class_hard_regs_num[cl1])
1574 		|| (ira_reg_class_max_nregs[cl2][mode]
1575 		    > ira_class_hard_regs_num[cl2]))
1576 	      cost = 65535;
1577 	    else
1578 	      cost = (ira_memory_move_cost[mode][cl1][0]
1579 		      + ira_memory_move_cost[mode][cl2][1]) * 2;
1580 	  }
1581 	else
1582 	  {
1583 	    cost = register_move_cost (mode, (enum reg_class) cl1,
1584 				       (enum reg_class) cl2);
1585 	    ira_assert (cost < 65535);
1586 	  }
1587 	all_match &= (last_move_cost[cl1][cl2] == cost);
1588 	last_move_cost[cl1][cl2] = cost;
1589       }
1590   if (all_match && last_mode_for_init_move_cost != -1)
1591     {
1592       ira_register_move_cost[mode]
1593 	= ira_register_move_cost[last_mode_for_init_move_cost];
1594       ira_may_move_in_cost[mode]
1595 	= ira_may_move_in_cost[last_mode_for_init_move_cost];
1596       ira_may_move_out_cost[mode]
1597 	= ira_may_move_out_cost[last_mode_for_init_move_cost];
1598       return;
1599     }
1600   last_mode_for_init_move_cost = mode;
1601   ira_register_move_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1602   ira_may_move_in_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1603   ira_may_move_out_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1604   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1605     for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1606       {
1607 	int cost;
1608 	enum reg_class *p1, *p2;
1609 
1610 	if (last_move_cost[cl1][cl2] == 65535)
1611 	  {
1612 	    ira_register_move_cost[mode][cl1][cl2] = 65535;
1613 	    ira_may_move_in_cost[mode][cl1][cl2] = 65535;
1614 	    ira_may_move_out_cost[mode][cl1][cl2] = 65535;
1615 	  }
1616 	else
1617 	  {
1618 	    cost = last_move_cost[cl1][cl2];
1619 
1620 	    for (p2 = &reg_class_subclasses[cl2][0];
1621 		 *p2 != LIM_REG_CLASSES; p2++)
1622 	      if (ira_class_hard_regs_num[*p2] > 0
1623 		  && (ira_reg_class_max_nregs[*p2][mode]
1624 		      <= ira_class_hard_regs_num[*p2]))
1625 		cost = MAX (cost, ira_register_move_cost[mode][cl1][*p2]);
1626 
1627 	    for (p1 = &reg_class_subclasses[cl1][0];
1628 		 *p1 != LIM_REG_CLASSES; p1++)
1629 	      if (ira_class_hard_regs_num[*p1] > 0
1630 		  && (ira_reg_class_max_nregs[*p1][mode]
1631 		      <= ira_class_hard_regs_num[*p1]))
1632 		cost = MAX (cost, ira_register_move_cost[mode][*p1][cl2]);
1633 
1634 	    ira_assert (cost <= 65535);
1635 	    ira_register_move_cost[mode][cl1][cl2] = cost;
1636 
1637 	    if (ira_class_subset_p[cl1][cl2])
1638 	      ira_may_move_in_cost[mode][cl1][cl2] = 0;
1639 	    else
1640 	      ira_may_move_in_cost[mode][cl1][cl2] = cost;
1641 
1642 	    if (ira_class_subset_p[cl2][cl1])
1643 	      ira_may_move_out_cost[mode][cl1][cl2] = 0;
1644 	    else
1645 	      ira_may_move_out_cost[mode][cl1][cl2] = cost;
1646 	  }
1647       }
1648 }
1649 
1650 
1651 
1652 /* This is called once during compiler work.  It sets up
1653    different arrays whose values don't depend on the compiled
1654    function.  */
1655 void
ira_init_once(void)1656 ira_init_once (void)
1657 {
1658   ira_init_costs_once ();
1659   lra_init_once ();
1660 
1661   ira_use_lra_p = targetm.lra_p ();
1662 }
1663 
1664 /* Free ira_max_register_move_cost, ira_may_move_in_cost and
1665    ira_may_move_out_cost for each mode.  */
1666 void
free_register_move_costs(void)1667 target_ira_int::free_register_move_costs (void)
1668 {
1669   int mode, i;
1670 
1671   /* Reset move_cost and friends, making sure we only free shared
1672      table entries once.  */
1673   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1674     if (x_ira_register_move_cost[mode])
1675       {
1676 	for (i = 0;
1677 	     i < mode && (x_ira_register_move_cost[i]
1678 			  != x_ira_register_move_cost[mode]);
1679 	     i++)
1680 	  ;
1681 	if (i == mode)
1682 	  {
1683 	    free (x_ira_register_move_cost[mode]);
1684 	    free (x_ira_may_move_in_cost[mode]);
1685 	    free (x_ira_may_move_out_cost[mode]);
1686 	  }
1687       }
1688   memset (x_ira_register_move_cost, 0, sizeof x_ira_register_move_cost);
1689   memset (x_ira_may_move_in_cost, 0, sizeof x_ira_may_move_in_cost);
1690   memset (x_ira_may_move_out_cost, 0, sizeof x_ira_may_move_out_cost);
1691   last_mode_for_init_move_cost = -1;
1692 }
1693 
~target_ira_int()1694 target_ira_int::~target_ira_int ()
1695 {
1696   free_ira_costs ();
1697   free_register_move_costs ();
1698 }
1699 
1700 /* This is called every time when register related information is
1701    changed.  */
1702 void
ira_init(void)1703 ira_init (void)
1704 {
1705   this_target_ira_int->free_register_move_costs ();
1706   setup_reg_mode_hard_regset ();
1707   setup_alloc_regs (flag_omit_frame_pointer != 0);
1708   setup_class_subset_and_memory_move_costs ();
1709   setup_reg_class_nregs ();
1710   setup_prohibited_class_mode_regs ();
1711   find_reg_classes ();
1712   clarify_prohibited_class_mode_regs ();
1713   setup_hard_regno_aclass ();
1714   ira_init_costs ();
1715 }
1716 
1717 
1718 #define ira_prohibited_mode_move_regs_initialized_p \
1719   (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p)
1720 
1721 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS.  */
1722 static void
setup_prohibited_mode_move_regs(void)1723 setup_prohibited_mode_move_regs (void)
1724 {
1725   int i, j;
1726   rtx test_reg1, test_reg2, move_pat;
1727   rtx_insn *move_insn;
1728 
1729   if (ira_prohibited_mode_move_regs_initialized_p)
1730     return;
1731   ira_prohibited_mode_move_regs_initialized_p = true;
1732   test_reg1 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
1733   test_reg2 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 2);
1734   move_pat = gen_rtx_SET (test_reg1, test_reg2);
1735   move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, move_pat, 0, -1, 0);
1736   for (i = 0; i < NUM_MACHINE_MODES; i++)
1737     {
1738       SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
1739       for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
1740 	{
1741 	  if (!targetm.hard_regno_mode_ok (j, (machine_mode) i))
1742 	    continue;
1743 	  set_mode_and_regno (test_reg1, (machine_mode) i, j);
1744 	  set_mode_and_regno (test_reg2, (machine_mode) i, j);
1745 	  INSN_CODE (move_insn) = -1;
1746 	  recog_memoized (move_insn);
1747 	  if (INSN_CODE (move_insn) < 0)
1748 	    continue;
1749 	  extract_insn (move_insn);
1750 	  /* We don't know whether the move will be in code that is optimized
1751 	     for size or speed, so consider all enabled alternatives.  */
1752 	  if (! constrain_operands (1, get_enabled_alternatives (move_insn)))
1753 	    continue;
1754 	  CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
1755 	}
1756     }
1757 }
1758 
1759 
1760 
1761 /* Extract INSN and return the set of alternatives that we should consider.
1762    This excludes any alternatives whose constraints are obviously impossible
1763    to meet (e.g. because the constraint requires a constant and the operand
1764    is nonconstant).  It also excludes alternatives that are bound to need
1765    a spill or reload, as long as we have other alternatives that match
1766    exactly.  */
1767 alternative_mask
ira_setup_alts(rtx_insn * insn)1768 ira_setup_alts (rtx_insn *insn)
1769 {
1770   int nop, nalt;
1771   bool curr_swapped;
1772   const char *p;
1773   int commutative = -1;
1774 
1775   extract_insn (insn);
1776   preprocess_constraints (insn);
1777   alternative_mask preferred = get_preferred_alternatives (insn);
1778   alternative_mask alts = 0;
1779   alternative_mask exact_alts = 0;
1780   /* Check that the hard reg set is enough for holding all
1781      alternatives.  It is hard to imagine the situation when the
1782      assertion is wrong.  */
1783   ira_assert (recog_data.n_alternatives
1784 	      <= (int) MAX (sizeof (HARD_REG_ELT_TYPE) * CHAR_BIT,
1785 			    FIRST_PSEUDO_REGISTER));
1786   for (nop = 0; nop < recog_data.n_operands; nop++)
1787     if (recog_data.constraints[nop][0] == '%')
1788       {
1789 	commutative = nop;
1790 	break;
1791       }
1792   for (curr_swapped = false;; curr_swapped = true)
1793     {
1794       for (nalt = 0; nalt < recog_data.n_alternatives; nalt++)
1795 	{
1796 	  if (!TEST_BIT (preferred, nalt) || TEST_BIT (exact_alts, nalt))
1797 	    continue;
1798 
1799 	  const operand_alternative *op_alt
1800 	    = &recog_op_alt[nalt * recog_data.n_operands];
1801 	  int this_reject = 0;
1802 	  for (nop = 0; nop < recog_data.n_operands; nop++)
1803 	    {
1804 	      int c, len;
1805 
1806 	      this_reject += op_alt[nop].reject;
1807 
1808 	      rtx op = recog_data.operand[nop];
1809 	      p = op_alt[nop].constraint;
1810 	      if (*p == 0 || *p == ',')
1811 		continue;
1812 
1813 	      bool win_p = false;
1814 	      do
1815 		switch (c = *p, len = CONSTRAINT_LEN (c, p), c)
1816 		  {
1817 		  case '#':
1818 		  case ',':
1819 		    c = '\0';
1820 		    /* FALLTHRU */
1821 		  case '\0':
1822 		    len = 0;
1823 		    break;
1824 
1825 		  case '%':
1826 		    /* The commutative modifier is handled above.  */
1827 		    break;
1828 
1829 		  case '0':  case '1':  case '2':  case '3':  case '4':
1830 		  case '5':  case '6':  case '7':  case '8':  case '9':
1831 		    {
1832 		      rtx other = recog_data.operand[c - '0'];
1833 		      if (MEM_P (other)
1834 			  ? rtx_equal_p (other, op)
1835 			  : REG_P (op) || SUBREG_P (op))
1836 			goto op_success;
1837 		      win_p = true;
1838 		    }
1839 		    break;
1840 
1841 		  case 'g':
1842 		    goto op_success;
1843 		    break;
1844 
1845 		  default:
1846 		    {
1847 		      enum constraint_num cn = lookup_constraint (p);
1848 		      switch (get_constraint_type (cn))
1849 			{
1850 			case CT_REGISTER:
1851 			  if (reg_class_for_constraint (cn) != NO_REGS)
1852 			    {
1853 			      if (REG_P (op) || SUBREG_P (op))
1854 				goto op_success;
1855 			      win_p = true;
1856 			    }
1857 			  break;
1858 
1859 			case CT_CONST_INT:
1860 			  if (CONST_INT_P (op)
1861 			      && (insn_const_int_ok_for_constraint
1862 				  (INTVAL (op), cn)))
1863 			    goto op_success;
1864 			  break;
1865 
1866 			case CT_ADDRESS:
1867 			  goto op_success;
1868 
1869 			case CT_MEMORY:
1870 			case CT_SPECIAL_MEMORY:
1871 			  if (MEM_P (op))
1872 			    goto op_success;
1873 			  win_p = true;
1874 			  break;
1875 
1876 			case CT_FIXED_FORM:
1877 			  if (constraint_satisfied_p (op, cn))
1878 			    goto op_success;
1879 			  break;
1880 			}
1881 		      break;
1882 		    }
1883 		  }
1884 	      while (p += len, c);
1885 	      if (!win_p)
1886 		break;
1887 	      /* We can make the alternative match by spilling a register
1888 		 to memory or loading something into a register.  Count a
1889 		 cost of one reload (the equivalent of the '?' constraint).  */
1890 	      this_reject += 6;
1891 	    op_success:
1892 	      ;
1893 	    }
1894 
1895 	  if (nop >= recog_data.n_operands)
1896 	    {
1897 	      alts |= ALTERNATIVE_BIT (nalt);
1898 	      if (this_reject == 0)
1899 		exact_alts |= ALTERNATIVE_BIT (nalt);
1900 	    }
1901 	}
1902       if (commutative < 0)
1903 	break;
1904       /* Swap forth and back to avoid changing recog_data.  */
1905       std::swap (recog_data.operand[commutative],
1906 		 recog_data.operand[commutative + 1]);
1907       if (curr_swapped)
1908 	break;
1909     }
1910   return exact_alts ? exact_alts : alts;
1911 }
1912 
1913 /* Return the number of the output non-early clobber operand which
1914    should be the same in any case as operand with number OP_NUM (or
1915    negative value if there is no such operand).  ALTS is the mask
1916    of alternatives that we should consider.  */
1917 int
ira_get_dup_out_num(int op_num,alternative_mask alts)1918 ira_get_dup_out_num (int op_num, alternative_mask alts)
1919 {
1920   int curr_alt, c, original, dup;
1921   bool ignore_p, use_commut_op_p;
1922   const char *str;
1923 
1924   if (op_num < 0 || recog_data.n_alternatives == 0)
1925     return -1;
1926   /* We should find duplications only for input operands.  */
1927   if (recog_data.operand_type[op_num] != OP_IN)
1928     return -1;
1929   str = recog_data.constraints[op_num];
1930   use_commut_op_p = false;
1931   for (;;)
1932     {
1933       rtx op = recog_data.operand[op_num];
1934 
1935       for (curr_alt = 0, ignore_p = !TEST_BIT (alts, curr_alt),
1936 	   original = -1;;)
1937 	{
1938 	  c = *str;
1939 	  if (c == '\0')
1940 	    break;
1941 	  if (c == '#')
1942 	    ignore_p = true;
1943 	  else if (c == ',')
1944 	    {
1945 	      curr_alt++;
1946 	      ignore_p = !TEST_BIT (alts, curr_alt);
1947 	    }
1948 	  else if (! ignore_p)
1949 	    switch (c)
1950 	      {
1951 	      case 'g':
1952 		goto fail;
1953 	      default:
1954 		{
1955 		  enum constraint_num cn = lookup_constraint (str);
1956 		  enum reg_class cl = reg_class_for_constraint (cn);
1957 		  if (cl != NO_REGS
1958 		      && !targetm.class_likely_spilled_p (cl))
1959 		    goto fail;
1960 		  if (constraint_satisfied_p (op, cn))
1961 		    goto fail;
1962 		  break;
1963 		}
1964 
1965 	      case '0': case '1': case '2': case '3': case '4':
1966 	      case '5': case '6': case '7': case '8': case '9':
1967 		if (original != -1 && original != c)
1968 		  goto fail;
1969 		original = c;
1970 		break;
1971 	      }
1972 	  str += CONSTRAINT_LEN (c, str);
1973 	}
1974       if (original == -1)
1975 	goto fail;
1976       dup = original - '0';
1977       if (recog_data.operand_type[dup] == OP_OUT)
1978 	return dup;
1979     fail:
1980       if (use_commut_op_p)
1981 	break;
1982       use_commut_op_p = true;
1983       if (recog_data.constraints[op_num][0] == '%')
1984 	str = recog_data.constraints[op_num + 1];
1985       else if (op_num > 0 && recog_data.constraints[op_num - 1][0] == '%')
1986 	str = recog_data.constraints[op_num - 1];
1987       else
1988 	break;
1989     }
1990   return -1;
1991 }
1992 
1993 
1994 
1995 /* Search forward to see if the source register of a copy insn dies
1996    before either it or the destination register is modified, but don't
1997    scan past the end of the basic block.  If so, we can replace the
1998    source with the destination and let the source die in the copy
1999    insn.
2000 
2001    This will reduce the number of registers live in that range and may
2002    enable the destination and the source coalescing, thus often saving
2003    one register in addition to a register-register copy.  */
2004 
2005 static void
decrease_live_ranges_number(void)2006 decrease_live_ranges_number (void)
2007 {
2008   basic_block bb;
2009   rtx_insn *insn;
2010   rtx set, src, dest, dest_death, note;
2011   rtx_insn *p, *q;
2012   int sregno, dregno;
2013 
2014   if (! flag_expensive_optimizations)
2015     return;
2016 
2017   if (ira_dump_file)
2018     fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n");
2019 
2020   FOR_EACH_BB_FN (bb, cfun)
2021     FOR_BB_INSNS (bb, insn)
2022       {
2023 	set = single_set (insn);
2024 	if (! set)
2025 	  continue;
2026 	src = SET_SRC (set);
2027 	dest = SET_DEST (set);
2028 	if (! REG_P (src) || ! REG_P (dest)
2029 	    || find_reg_note (insn, REG_DEAD, src))
2030 	  continue;
2031 	sregno = REGNO (src);
2032 	dregno = REGNO (dest);
2033 
2034 	/* We don't want to mess with hard regs if register classes
2035 	   are small.  */
2036 	if (sregno == dregno
2037 	    || (targetm.small_register_classes_for_mode_p (GET_MODE (src))
2038 		&& (sregno < FIRST_PSEUDO_REGISTER
2039 		    || dregno < FIRST_PSEUDO_REGISTER))
2040 	    /* We don't see all updates to SP if they are in an
2041 	       auto-inc memory reference, so we must disallow this
2042 	       optimization on them.  */
2043 	    || sregno == STACK_POINTER_REGNUM
2044 	    || dregno == STACK_POINTER_REGNUM)
2045 	  continue;
2046 
2047 	dest_death = NULL_RTX;
2048 
2049 	for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
2050 	  {
2051 	    if (! INSN_P (p))
2052 	      continue;
2053 	    if (BLOCK_FOR_INSN (p) != bb)
2054 	      break;
2055 
2056 	    if (reg_set_p (src, p) || reg_set_p (dest, p)
2057 		/* If SRC is an asm-declared register, it must not be
2058 		   replaced in any asm.  Unfortunately, the REG_EXPR
2059 		   tree for the asm variable may be absent in the SRC
2060 		   rtx, so we can't check the actual register
2061 		   declaration easily (the asm operand will have it,
2062 		   though).  To avoid complicating the test for a rare
2063 		   case, we just don't perform register replacement
2064 		   for a hard reg mentioned in an asm.  */
2065 		|| (sregno < FIRST_PSEUDO_REGISTER
2066 		    && asm_noperands (PATTERN (p)) >= 0
2067 		    && reg_overlap_mentioned_p (src, PATTERN (p)))
2068 		/* Don't change hard registers used by a call.  */
2069 		|| (CALL_P (p) && sregno < FIRST_PSEUDO_REGISTER
2070 		    && find_reg_fusage (p, USE, src))
2071 		/* Don't change a USE of a register.  */
2072 		|| (GET_CODE (PATTERN (p)) == USE
2073 		    && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
2074 	      break;
2075 
2076 	    /* See if all of SRC dies in P.  This test is slightly
2077 	       more conservative than it needs to be.  */
2078 	    if ((note = find_regno_note (p, REG_DEAD, sregno))
2079 		&& GET_MODE (XEXP (note, 0)) == GET_MODE (src))
2080 	      {
2081 		int failed = 0;
2082 
2083 		/* We can do the optimization.  Scan forward from INSN
2084 		   again, replacing regs as we go.  Set FAILED if a
2085 		   replacement can't be done.  In that case, we can't
2086 		   move the death note for SRC.  This should be
2087 		   rare.  */
2088 
2089 		/* Set to stop at next insn.  */
2090 		for (q = next_real_insn (insn);
2091 		     q != next_real_insn (p);
2092 		     q = next_real_insn (q))
2093 		  {
2094 		    if (reg_overlap_mentioned_p (src, PATTERN (q)))
2095 		      {
2096 			/* If SRC is a hard register, we might miss
2097 			   some overlapping registers with
2098 			   validate_replace_rtx, so we would have to
2099 			   undo it.  We can't if DEST is present in
2100 			   the insn, so fail in that combination of
2101 			   cases.  */
2102 			if (sregno < FIRST_PSEUDO_REGISTER
2103 			    && reg_mentioned_p (dest, PATTERN (q)))
2104 			  failed = 1;
2105 
2106 			/* Attempt to replace all uses.  */
2107 			else if (!validate_replace_rtx (src, dest, q))
2108 			  failed = 1;
2109 
2110 			/* If this succeeded, but some part of the
2111 			   register is still present, undo the
2112 			   replacement.  */
2113 			else if (sregno < FIRST_PSEUDO_REGISTER
2114 				 && reg_overlap_mentioned_p (src, PATTERN (q)))
2115 			  {
2116 			    validate_replace_rtx (dest, src, q);
2117 			    failed = 1;
2118 			  }
2119 		      }
2120 
2121 		    /* If DEST dies here, remove the death note and
2122 		       save it for later.  Make sure ALL of DEST dies
2123 		       here; again, this is overly conservative.  */
2124 		    if (! dest_death
2125 			&& (dest_death = find_regno_note (q, REG_DEAD, dregno)))
2126 		      {
2127 			if (GET_MODE (XEXP (dest_death, 0)) == GET_MODE (dest))
2128 			  remove_note (q, dest_death);
2129 			else
2130 			  {
2131 			    failed = 1;
2132 			    dest_death = 0;
2133 			  }
2134 		      }
2135 		  }
2136 
2137 		if (! failed)
2138 		  {
2139 		    /* Move death note of SRC from P to INSN.  */
2140 		    remove_note (p, note);
2141 		    XEXP (note, 1) = REG_NOTES (insn);
2142 		    REG_NOTES (insn) = note;
2143 		  }
2144 
2145 		/* DEST is also dead if INSN has a REG_UNUSED note for
2146 		   DEST.  */
2147 		if (! dest_death
2148 		    && (dest_death
2149 			= find_regno_note (insn, REG_UNUSED, dregno)))
2150 		  {
2151 		    PUT_REG_NOTE_KIND (dest_death, REG_DEAD);
2152 		    remove_note (insn, dest_death);
2153 		  }
2154 
2155 		/* Put death note of DEST on P if we saw it die.  */
2156 		if (dest_death)
2157 		  {
2158 		    XEXP (dest_death, 1) = REG_NOTES (p);
2159 		    REG_NOTES (p) = dest_death;
2160 		  }
2161 		break;
2162 	      }
2163 
2164 	    /* If SRC is a hard register which is set or killed in
2165 	       some other way, we can't do this optimization.  */
2166 	    else if (sregno < FIRST_PSEUDO_REGISTER && dead_or_set_p (p, src))
2167 	      break;
2168 	  }
2169       }
2170 }
2171 
2172 
2173 
2174 /* Return nonzero if REGNO is a particularly bad choice for reloading X.  */
2175 static bool
ira_bad_reload_regno_1(int regno,rtx x)2176 ira_bad_reload_regno_1 (int regno, rtx x)
2177 {
2178   int x_regno, n, i;
2179   ira_allocno_t a;
2180   enum reg_class pref;
2181 
2182   /* We only deal with pseudo regs.  */
2183   if (! x || GET_CODE (x) != REG)
2184     return false;
2185 
2186   x_regno = REGNO (x);
2187   if (x_regno < FIRST_PSEUDO_REGISTER)
2188     return false;
2189 
2190   /* If the pseudo prefers REGNO explicitly, then do not consider
2191      REGNO a bad spill choice.  */
2192   pref = reg_preferred_class (x_regno);
2193   if (reg_class_size[pref] == 1)
2194     return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno);
2195 
2196   /* If the pseudo conflicts with REGNO, then we consider REGNO a
2197      poor choice for a reload regno.  */
2198   a = ira_regno_allocno_map[x_regno];
2199   n = ALLOCNO_NUM_OBJECTS (a);
2200   for (i = 0; i < n; i++)
2201     {
2202       ira_object_t obj = ALLOCNO_OBJECT (a, i);
2203       if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
2204 	return true;
2205     }
2206   return false;
2207 }
2208 
2209 /* Return nonzero if REGNO is a particularly bad choice for reloading
2210    IN or OUT.  */
2211 bool
ira_bad_reload_regno(int regno,rtx in,rtx out)2212 ira_bad_reload_regno (int regno, rtx in, rtx out)
2213 {
2214   return (ira_bad_reload_regno_1 (regno, in)
2215 	  || ira_bad_reload_regno_1 (regno, out));
2216 }
2217 
2218 /* Add register clobbers from asm statements.  */
2219 static void
compute_regs_asm_clobbered(void)2220 compute_regs_asm_clobbered (void)
2221 {
2222   basic_block bb;
2223 
2224   FOR_EACH_BB_FN (bb, cfun)
2225     {
2226       rtx_insn *insn;
2227       FOR_BB_INSNS_REVERSE (bb, insn)
2228 	{
2229 	  df_ref def;
2230 
2231 	  if (NONDEBUG_INSN_P (insn) && asm_noperands (PATTERN (insn)) >= 0)
2232 	    FOR_EACH_INSN_DEF (def, insn)
2233 	      {
2234 		unsigned int dregno = DF_REF_REGNO (def);
2235 		if (HARD_REGISTER_NUM_P (dregno))
2236 		  add_to_hard_reg_set (&crtl->asm_clobbers,
2237 				       GET_MODE (DF_REF_REAL_REG (def)),
2238 				       dregno);
2239 	      }
2240 	}
2241     }
2242 }
2243 
2244 
2245 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and
2246    REGS_EVER_LIVE.  */
2247 void
ira_setup_eliminable_regset(void)2248 ira_setup_eliminable_regset (void)
2249 {
2250   int i;
2251   static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
2252   int fp_reg_count = hard_regno_nregs (HARD_FRAME_POINTER_REGNUM, Pmode);
2253 
2254   /* Setup is_leaf as frame_pointer_required may use it.  This function
2255      is called by sched_init before ira if scheduling is enabled.  */
2256   crtl->is_leaf = leaf_function_p ();
2257 
2258   /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
2259      sp for alloca.  So we can't eliminate the frame pointer in that
2260      case.  At some point, we should improve this by emitting the
2261      sp-adjusting insns for this case.  */
2262   frame_pointer_needed
2263     = (! flag_omit_frame_pointer
2264        || (cfun->calls_alloca && EXIT_IGNORE_STACK)
2265        /* We need the frame pointer to catch stack overflow exceptions if
2266 	  the stack pointer is moving (as for the alloca case just above).  */
2267        || (STACK_CHECK_MOVING_SP
2268 	   && flag_stack_check
2269 	   && flag_exceptions
2270 	   && cfun->can_throw_non_call_exceptions)
2271        || crtl->accesses_prior_frames
2272        || (SUPPORTS_STACK_ALIGNMENT && crtl->stack_realign_needed)
2273        || targetm.frame_pointer_required ());
2274 
2275     /* The chance that FRAME_POINTER_NEEDED is changed from inspecting
2276        RTL is very small.  So if we use frame pointer for RA and RTL
2277        actually prevents this, we will spill pseudos assigned to the
2278        frame pointer in LRA.  */
2279 
2280   if (frame_pointer_needed)
2281     for (i = 0; i < fp_reg_count; i++)
2282       df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true);
2283 
2284   ira_no_alloc_regs = no_unit_alloc_regs;
2285   CLEAR_HARD_REG_SET (eliminable_regset);
2286 
2287   compute_regs_asm_clobbered ();
2288 
2289   /* Build the regset of all eliminable registers and show we can't
2290      use those that we already know won't be eliminated.  */
2291   for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
2292     {
2293       bool cannot_elim
2294 	= (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
2295 	   || (eliminables[i].to == STACK_POINTER_REGNUM && frame_pointer_needed));
2296 
2297       if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from))
2298 	{
2299 	    SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
2300 
2301 	    if (cannot_elim)
2302 	      SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
2303 	}
2304       else if (cannot_elim)
2305 	error ("%s cannot be used in %<asm%> here",
2306 	       reg_names[eliminables[i].from]);
2307       else
2308 	df_set_regs_ever_live (eliminables[i].from, true);
2309     }
2310   if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
2311     {
2312       for (i = 0; i < fp_reg_count; i++)
2313 	if (global_regs[HARD_FRAME_POINTER_REGNUM + i])
2314 	  /* Nothing to do: the register is already treated as live
2315 	     where appropriate, and cannot be eliminated.  */
2316 	  ;
2317 	else if (!TEST_HARD_REG_BIT (crtl->asm_clobbers,
2318 				     HARD_FRAME_POINTER_REGNUM + i))
2319 	  {
2320 	    SET_HARD_REG_BIT (eliminable_regset,
2321 			      HARD_FRAME_POINTER_REGNUM + i);
2322 	    if (frame_pointer_needed)
2323 	      SET_HARD_REG_BIT (ira_no_alloc_regs,
2324 				HARD_FRAME_POINTER_REGNUM + i);
2325 	  }
2326 	else if (frame_pointer_needed)
2327 	  error ("%s cannot be used in %<asm%> here",
2328 		 reg_names[HARD_FRAME_POINTER_REGNUM + i]);
2329 	else
2330 	  df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true);
2331     }
2332 }
2333 
2334 
2335 
2336 /* Vector of substitutions of register numbers,
2337    used to map pseudo regs into hardware regs.
2338    This is set up as a result of register allocation.
2339    Element N is the hard reg assigned to pseudo reg N,
2340    or is -1 if no hard reg was assigned.
2341    If N is a hard reg number, element N is N.  */
2342 short *reg_renumber;
2343 
2344 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
2345    the allocation found by IRA.  */
2346 static void
setup_reg_renumber(void)2347 setup_reg_renumber (void)
2348 {
2349   int regno, hard_regno;
2350   ira_allocno_t a;
2351   ira_allocno_iterator ai;
2352 
2353   caller_save_needed = 0;
2354   FOR_EACH_ALLOCNO (a, ai)
2355     {
2356       if (ira_use_lra_p && ALLOCNO_CAP_MEMBER (a) != NULL)
2357 	continue;
2358       /* There are no caps at this point.  */
2359       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2360       if (! ALLOCNO_ASSIGNED_P (a))
2361 	/* It can happen if A is not referenced but partially anticipated
2362 	   somewhere in a region.  */
2363 	ALLOCNO_ASSIGNED_P (a) = true;
2364       ira_free_allocno_updated_costs (a);
2365       hard_regno = ALLOCNO_HARD_REGNO (a);
2366       regno = ALLOCNO_REGNO (a);
2367       reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
2368       if (hard_regno >= 0)
2369 	{
2370 	  int i, nwords;
2371 	  enum reg_class pclass;
2372 	  ira_object_t obj;
2373 
2374 	  pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
2375 	  nwords = ALLOCNO_NUM_OBJECTS (a);
2376 	  for (i = 0; i < nwords; i++)
2377 	    {
2378 	      obj = ALLOCNO_OBJECT (a, i);
2379 	      OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
2380 		|= ~reg_class_contents[pclass];
2381 	    }
2382 	  if (ira_need_caller_save_p (a, hard_regno))
2383 	    {
2384 	      ira_assert (!optimize || flag_caller_saves
2385 			  || (ALLOCNO_CALLS_CROSSED_NUM (a)
2386 			      == ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2387 			  || regno >= ira_reg_equiv_len
2388 			  || ira_equiv_no_lvalue_p (regno));
2389 	      caller_save_needed = 1;
2390 	    }
2391 	}
2392     }
2393 }
2394 
2395 /* Set up allocno assignment flags for further allocation
2396    improvements.  */
2397 static void
setup_allocno_assignment_flags(void)2398 setup_allocno_assignment_flags (void)
2399 {
2400   int hard_regno;
2401   ira_allocno_t a;
2402   ira_allocno_iterator ai;
2403 
2404   FOR_EACH_ALLOCNO (a, ai)
2405     {
2406       if (! ALLOCNO_ASSIGNED_P (a))
2407 	/* It can happen if A is not referenced but partially anticipated
2408 	   somewhere in a region.  */
2409 	ira_free_allocno_updated_costs (a);
2410       hard_regno = ALLOCNO_HARD_REGNO (a);
2411       /* Don't assign hard registers to allocnos which are destination
2412 	 of removed store at the end of loop.  It has no sense to keep
2413 	 the same value in different hard registers.  It is also
2414 	 impossible to assign hard registers correctly to such
2415 	 allocnos because the cost info and info about intersected
2416 	 calls are incorrect for them.  */
2417       ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0
2418 				|| ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p
2419 				|| (ALLOCNO_MEMORY_COST (a)
2420 				    - ALLOCNO_CLASS_COST (a)) < 0);
2421       ira_assert
2422 	(hard_regno < 0
2423 	 || ira_hard_reg_in_set_p (hard_regno, ALLOCNO_MODE (a),
2424 				   reg_class_contents[ALLOCNO_CLASS (a)]));
2425     }
2426 }
2427 
2428 /* Evaluate overall allocation cost and the costs for using hard
2429    registers and memory for allocnos.  */
2430 static void
calculate_allocation_cost(void)2431 calculate_allocation_cost (void)
2432 {
2433   int hard_regno, cost;
2434   ira_allocno_t a;
2435   ira_allocno_iterator ai;
2436 
2437   ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
2438   FOR_EACH_ALLOCNO (a, ai)
2439     {
2440       hard_regno = ALLOCNO_HARD_REGNO (a);
2441       ira_assert (hard_regno < 0
2442 		  || (ira_hard_reg_in_set_p
2443 		      (hard_regno, ALLOCNO_MODE (a),
2444 		       reg_class_contents[ALLOCNO_CLASS (a)])));
2445       if (hard_regno < 0)
2446 	{
2447 	  cost = ALLOCNO_MEMORY_COST (a);
2448 	  ira_mem_cost += cost;
2449 	}
2450       else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
2451 	{
2452 	  cost = (ALLOCNO_HARD_REG_COSTS (a)
2453 		  [ira_class_hard_reg_index
2454 		   [ALLOCNO_CLASS (a)][hard_regno]]);
2455 	  ira_reg_cost += cost;
2456 	}
2457       else
2458 	{
2459 	  cost = ALLOCNO_CLASS_COST (a);
2460 	  ira_reg_cost += cost;
2461 	}
2462       ira_overall_cost += cost;
2463     }
2464 
2465   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2466     {
2467       fprintf (ira_dump_file,
2468 	       "+++Costs: overall %" PRId64
2469 	       ", reg %" PRId64
2470 	       ", mem %" PRId64
2471 	       ", ld %" PRId64
2472 	       ", st %" PRId64
2473 	       ", move %" PRId64,
2474 	       ira_overall_cost, ira_reg_cost, ira_mem_cost,
2475 	       ira_load_cost, ira_store_cost, ira_shuffle_cost);
2476       fprintf (ira_dump_file, "\n+++       move loops %d, new jumps %d\n",
2477 	       ira_move_loops_num, ira_additional_jumps_num);
2478     }
2479 
2480 }
2481 
2482 #ifdef ENABLE_IRA_CHECKING
2483 /* Check the correctness of the allocation.  We do need this because
2484    of complicated code to transform more one region internal
2485    representation into one region representation.  */
2486 static void
check_allocation(void)2487 check_allocation (void)
2488 {
2489   ira_allocno_t a;
2490   int hard_regno, nregs, conflict_nregs;
2491   ira_allocno_iterator ai;
2492 
2493   FOR_EACH_ALLOCNO (a, ai)
2494     {
2495       int n = ALLOCNO_NUM_OBJECTS (a);
2496       int i;
2497 
2498       if (ALLOCNO_CAP_MEMBER (a) != NULL
2499 	  || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
2500 	continue;
2501       nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a));
2502       if (nregs == 1)
2503 	/* We allocated a single hard register.  */
2504 	n = 1;
2505       else if (n > 1)
2506 	/* We allocated multiple hard registers, and we will test
2507 	   conflicts in a granularity of single hard regs.  */
2508 	nregs = 1;
2509 
2510       for (i = 0; i < n; i++)
2511 	{
2512 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
2513 	  ira_object_t conflict_obj;
2514 	  ira_object_conflict_iterator oci;
2515 	  int this_regno = hard_regno;
2516 	  if (n > 1)
2517 	    {
2518 	      if (REG_WORDS_BIG_ENDIAN)
2519 		this_regno += n - i - 1;
2520 	      else
2521 		this_regno += i;
2522 	    }
2523 	  FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2524 	    {
2525 	      ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2526 	      int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
2527 	      if (conflict_hard_regno < 0)
2528 		continue;
2529 
2530 	      conflict_nregs = hard_regno_nregs (conflict_hard_regno,
2531 						 ALLOCNO_MODE (conflict_a));
2532 
2533 	      if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1
2534 		  && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a))
2535 		{
2536 		  if (REG_WORDS_BIG_ENDIAN)
2537 		    conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a)
2538 					    - OBJECT_SUBWORD (conflict_obj) - 1);
2539 		  else
2540 		    conflict_hard_regno += OBJECT_SUBWORD (conflict_obj);
2541 		  conflict_nregs = 1;
2542 		}
2543 
2544 	      if ((conflict_hard_regno <= this_regno
2545 		 && this_regno < conflict_hard_regno + conflict_nregs)
2546 		|| (this_regno <= conflict_hard_regno
2547 		    && conflict_hard_regno < this_regno + nregs))
2548 		{
2549 		  fprintf (stderr, "bad allocation for %d and %d\n",
2550 			   ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
2551 		  gcc_unreachable ();
2552 		}
2553 	    }
2554 	}
2555     }
2556 }
2557 #endif
2558 
2559 /* Allocate REG_EQUIV_INIT.  Set up it from IRA_REG_EQUIV which should
2560    be already calculated.  */
2561 static void
setup_reg_equiv_init(void)2562 setup_reg_equiv_init (void)
2563 {
2564   int i;
2565   int max_regno = max_reg_num ();
2566 
2567   for (i = 0; i < max_regno; i++)
2568     reg_equiv_init (i) = ira_reg_equiv[i].init_insns;
2569 }
2570 
2571 /* Update equiv regno from movement of FROM_REGNO to TO_REGNO.  INSNS
2572    are insns which were generated for such movement.  It is assumed
2573    that FROM_REGNO and TO_REGNO always have the same value at the
2574    point of any move containing such registers. This function is used
2575    to update equiv info for register shuffles on the region borders
2576    and for caller save/restore insns.  */
2577 void
ira_update_equiv_info_by_shuffle_insn(int to_regno,int from_regno,rtx_insn * insns)2578 ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx_insn *insns)
2579 {
2580   rtx_insn *insn;
2581   rtx x, note;
2582 
2583   if (! ira_reg_equiv[from_regno].defined_p
2584       && (! ira_reg_equiv[to_regno].defined_p
2585 	  || ((x = ira_reg_equiv[to_regno].memory) != NULL_RTX
2586 	      && ! MEM_READONLY_P (x))))
2587     return;
2588   insn = insns;
2589   if (NEXT_INSN (insn) != NULL_RTX)
2590     {
2591       if (! ira_reg_equiv[to_regno].defined_p)
2592 	{
2593 	  ira_assert (ira_reg_equiv[to_regno].init_insns == NULL_RTX);
2594 	  return;
2595 	}
2596       ira_reg_equiv[to_regno].defined_p = false;
2597       ira_reg_equiv[to_regno].memory
2598 	= ira_reg_equiv[to_regno].constant
2599 	= ira_reg_equiv[to_regno].invariant
2600 	= ira_reg_equiv[to_regno].init_insns = NULL;
2601       if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2602 	fprintf (ira_dump_file,
2603 		 "      Invalidating equiv info for reg %d\n", to_regno);
2604       return;
2605     }
2606   /* It is possible that FROM_REGNO still has no equivalence because
2607      in shuffles to_regno<-from_regno and from_regno<-to_regno the 2nd
2608      insn was not processed yet.  */
2609   if (ira_reg_equiv[from_regno].defined_p)
2610     {
2611       ira_reg_equiv[to_regno].defined_p = true;
2612       if ((x = ira_reg_equiv[from_regno].memory) != NULL_RTX)
2613 	{
2614 	  ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX
2615 		      && ira_reg_equiv[from_regno].constant == NULL_RTX);
2616 	  ira_assert (ira_reg_equiv[to_regno].memory == NULL_RTX
2617 		      || rtx_equal_p (ira_reg_equiv[to_regno].memory, x));
2618 	  ira_reg_equiv[to_regno].memory = x;
2619 	  if (! MEM_READONLY_P (x))
2620 	    /* We don't add the insn to insn init list because memory
2621 	       equivalence is just to say what memory is better to use
2622 	       when the pseudo is spilled.  */
2623 	    return;
2624 	}
2625       else if ((x = ira_reg_equiv[from_regno].constant) != NULL_RTX)
2626 	{
2627 	  ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX);
2628 	  ira_assert (ira_reg_equiv[to_regno].constant == NULL_RTX
2629 		      || rtx_equal_p (ira_reg_equiv[to_regno].constant, x));
2630 	  ira_reg_equiv[to_regno].constant = x;
2631 	}
2632       else
2633 	{
2634 	  x = ira_reg_equiv[from_regno].invariant;
2635 	  ira_assert (x != NULL_RTX);
2636 	  ira_assert (ira_reg_equiv[to_regno].invariant == NULL_RTX
2637 		      || rtx_equal_p (ira_reg_equiv[to_regno].invariant, x));
2638 	  ira_reg_equiv[to_regno].invariant = x;
2639 	}
2640       if (find_reg_note (insn, REG_EQUIV, x) == NULL_RTX)
2641 	{
2642 	  note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (x));
2643 	  gcc_assert (note != NULL_RTX);
2644 	  if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2645 	    {
2646 	      fprintf (ira_dump_file,
2647 		       "      Adding equiv note to insn %u for reg %d ",
2648 		       INSN_UID (insn), to_regno);
2649 	      dump_value_slim (ira_dump_file, x, 1);
2650 	      fprintf (ira_dump_file, "\n");
2651 	    }
2652 	}
2653     }
2654   ira_reg_equiv[to_regno].init_insns
2655     = gen_rtx_INSN_LIST (VOIDmode, insn,
2656 			 ira_reg_equiv[to_regno].init_insns);
2657   if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2658     fprintf (ira_dump_file,
2659 	     "      Adding equiv init move insn %u to reg %d\n",
2660 	     INSN_UID (insn), to_regno);
2661 }
2662 
2663 /* Fix values of array REG_EQUIV_INIT after live range splitting done
2664    by IRA.  */
2665 static void
fix_reg_equiv_init(void)2666 fix_reg_equiv_init (void)
2667 {
2668   int max_regno = max_reg_num ();
2669   int i, new_regno, max;
2670   rtx set;
2671   rtx_insn_list *x, *next, *prev;
2672   rtx_insn *insn;
2673 
2674   if (max_regno_before_ira < max_regno)
2675     {
2676       max = vec_safe_length (reg_equivs);
2677       grow_reg_equivs ();
2678       for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
2679 	for (prev = NULL, x = reg_equiv_init (i);
2680 	     x != NULL_RTX;
2681 	     x = next)
2682 	  {
2683 	    next = x->next ();
2684 	    insn = x->insn ();
2685 	    set = single_set (insn);
2686 	    ira_assert (set != NULL_RTX
2687 			&& (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
2688 	    if (REG_P (SET_DEST (set))
2689 		&& ((int) REGNO (SET_DEST (set)) == i
2690 		    || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
2691 	      new_regno = REGNO (SET_DEST (set));
2692 	    else if (REG_P (SET_SRC (set))
2693 		     && ((int) REGNO (SET_SRC (set)) == i
2694 			 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
2695 	      new_regno = REGNO (SET_SRC (set));
2696 	    else
2697  	      gcc_unreachable ();
2698 	    if (new_regno == i)
2699 	      prev = x;
2700 	    else
2701 	      {
2702 		/* Remove the wrong list element.  */
2703 		if (prev == NULL_RTX)
2704 		  reg_equiv_init (i) = next;
2705 		else
2706 		  XEXP (prev, 1) = next;
2707 		XEXP (x, 1) = reg_equiv_init (new_regno);
2708 		reg_equiv_init (new_regno) = x;
2709 	      }
2710 	  }
2711     }
2712 }
2713 
2714 #ifdef ENABLE_IRA_CHECKING
2715 /* Print redundant memory-memory copies.  */
2716 static void
print_redundant_copies(void)2717 print_redundant_copies (void)
2718 {
2719   int hard_regno;
2720   ira_allocno_t a;
2721   ira_copy_t cp, next_cp;
2722   ira_allocno_iterator ai;
2723 
2724   FOR_EACH_ALLOCNO (a, ai)
2725     {
2726       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2727 	/* It is a cap.  */
2728 	continue;
2729       hard_regno = ALLOCNO_HARD_REGNO (a);
2730       if (hard_regno >= 0)
2731 	continue;
2732       for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2733 	if (cp->first == a)
2734 	  next_cp = cp->next_first_allocno_copy;
2735 	else
2736 	  {
2737 	    next_cp = cp->next_second_allocno_copy;
2738 	    if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL
2739 		&& cp->insn != NULL_RTX
2740 		&& ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
2741 	      fprintf (ira_dump_file,
2742 		       "        Redundant move from %d(freq %d):%d\n",
2743 		       INSN_UID (cp->insn), cp->freq, hard_regno);
2744 	  }
2745     }
2746 }
2747 #endif
2748 
2749 /* Setup preferred and alternative classes for new pseudo-registers
2750    created by IRA starting with START.  */
2751 static void
setup_preferred_alternate_classes_for_new_pseudos(int start)2752 setup_preferred_alternate_classes_for_new_pseudos (int start)
2753 {
2754   int i, old_regno;
2755   int max_regno = max_reg_num ();
2756 
2757   for (i = start; i < max_regno; i++)
2758     {
2759       old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
2760       ira_assert (i != old_regno);
2761       setup_reg_classes (i, reg_preferred_class (old_regno),
2762 			 reg_alternate_class (old_regno),
2763 			 reg_allocno_class (old_regno));
2764       if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2765 	fprintf (ira_dump_file,
2766 		 "    New r%d: setting preferred %s, alternative %s\n",
2767 		 i, reg_class_names[reg_preferred_class (old_regno)],
2768 		 reg_class_names[reg_alternate_class (old_regno)]);
2769     }
2770 }
2771 
2772 
2773 /* The number of entries allocated in reg_info.  */
2774 static int allocated_reg_info_size;
2775 
2776 /* Regional allocation can create new pseudo-registers.  This function
2777    expands some arrays for pseudo-registers.  */
2778 static void
expand_reg_info(void)2779 expand_reg_info (void)
2780 {
2781   int i;
2782   int size = max_reg_num ();
2783 
2784   resize_reg_info ();
2785   for (i = allocated_reg_info_size; i < size; i++)
2786     setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
2787   setup_preferred_alternate_classes_for_new_pseudos (allocated_reg_info_size);
2788   allocated_reg_info_size = size;
2789 }
2790 
2791 /* Return TRUE if there is too high register pressure in the function.
2792    It is used to decide when stack slot sharing is worth to do.  */
2793 static bool
too_high_register_pressure_p(void)2794 too_high_register_pressure_p (void)
2795 {
2796   int i;
2797   enum reg_class pclass;
2798 
2799   for (i = 0; i < ira_pressure_classes_num; i++)
2800     {
2801       pclass = ira_pressure_classes[i];
2802       if (ira_loop_tree_root->reg_pressure[pclass] > 10000)
2803 	return true;
2804     }
2805   return false;
2806 }
2807 
2808 
2809 
2810 /* Indicate that hard register number FROM was eliminated and replaced with
2811    an offset from hard register number TO.  The status of hard registers live
2812    at the start of a basic block is updated by replacing a use of FROM with
2813    a use of TO.  */
2814 
2815 void
mark_elimination(int from,int to)2816 mark_elimination (int from, int to)
2817 {
2818   basic_block bb;
2819   bitmap r;
2820 
2821   FOR_EACH_BB_FN (bb, cfun)
2822     {
2823       r = DF_LR_IN (bb);
2824       if (bitmap_bit_p (r, from))
2825 	{
2826 	  bitmap_clear_bit (r, from);
2827 	  bitmap_set_bit (r, to);
2828 	}
2829       if (! df_live)
2830         continue;
2831       r = DF_LIVE_IN (bb);
2832       if (bitmap_bit_p (r, from))
2833 	{
2834 	  bitmap_clear_bit (r, from);
2835 	  bitmap_set_bit (r, to);
2836 	}
2837     }
2838 }
2839 
2840 
2841 
2842 /* The length of the following array.  */
2843 int ira_reg_equiv_len;
2844 
2845 /* Info about equiv. info for each register.  */
2846 struct ira_reg_equiv_s *ira_reg_equiv;
2847 
2848 /* Expand ira_reg_equiv if necessary.  */
2849 void
ira_expand_reg_equiv(void)2850 ira_expand_reg_equiv (void)
2851 {
2852   int old = ira_reg_equiv_len;
2853 
2854   if (ira_reg_equiv_len > max_reg_num ())
2855     return;
2856   ira_reg_equiv_len = max_reg_num () * 3 / 2 + 1;
2857   ira_reg_equiv
2858     = (struct ira_reg_equiv_s *) xrealloc (ira_reg_equiv,
2859 					 ira_reg_equiv_len
2860 					 * sizeof (struct ira_reg_equiv_s));
2861   gcc_assert (old < ira_reg_equiv_len);
2862   memset (ira_reg_equiv + old, 0,
2863 	  sizeof (struct ira_reg_equiv_s) * (ira_reg_equiv_len - old));
2864 }
2865 
2866 static void
init_reg_equiv(void)2867 init_reg_equiv (void)
2868 {
2869   ira_reg_equiv_len = 0;
2870   ira_reg_equiv = NULL;
2871   ira_expand_reg_equiv ();
2872 }
2873 
2874 static void
finish_reg_equiv(void)2875 finish_reg_equiv (void)
2876 {
2877   free (ira_reg_equiv);
2878 }
2879 
2880 
2881 
2882 struct equivalence
2883 {
2884   /* Set when a REG_EQUIV note is found or created.  Use to
2885      keep track of what memory accesses might be created later,
2886      e.g. by reload.  */
2887   rtx replacement;
2888   rtx *src_p;
2889 
2890   /* The list of each instruction which initializes this register.
2891 
2892      NULL indicates we know nothing about this register's equivalence
2893      properties.
2894 
2895      An INSN_LIST with a NULL insn indicates this pseudo is already
2896      known to not have a valid equivalence.  */
2897   rtx_insn_list *init_insns;
2898 
2899   /* Loop depth is used to recognize equivalences which appear
2900      to be present within the same loop (or in an inner loop).  */
2901   short loop_depth;
2902   /* Nonzero if this had a preexisting REG_EQUIV note.  */
2903   unsigned char is_arg_equivalence : 1;
2904   /* Set when an attempt should be made to replace a register
2905      with the associated src_p entry.  */
2906   unsigned char replace : 1;
2907   /* Set if this register has no known equivalence.  */
2908   unsigned char no_equiv : 1;
2909   /* Set if this register is mentioned in a paradoxical subreg.  */
2910   unsigned char pdx_subregs : 1;
2911 };
2912 
2913 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
2914    structure for that register.  */
2915 static struct equivalence *reg_equiv;
2916 
2917 /* Used for communication between the following two functions.  */
2918 struct equiv_mem_data
2919 {
2920   /* A MEM that we wish to ensure remains unchanged.  */
2921   rtx equiv_mem;
2922 
2923   /* Set true if EQUIV_MEM is modified.  */
2924   bool equiv_mem_modified;
2925 };
2926 
2927 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
2928    Called via note_stores.  */
2929 static void
validate_equiv_mem_from_store(rtx dest,const_rtx set ATTRIBUTE_UNUSED,void * data)2930 validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
2931 			       void *data)
2932 {
2933   struct equiv_mem_data *info = (struct equiv_mem_data *) data;
2934 
2935   if ((REG_P (dest)
2936        && reg_overlap_mentioned_p (dest, info->equiv_mem))
2937       || (MEM_P (dest)
2938 	  && anti_dependence (info->equiv_mem, dest)))
2939     info->equiv_mem_modified = true;
2940 }
2941 
2942 enum valid_equiv { valid_none, valid_combine, valid_reload };
2943 
2944 /* Verify that no store between START and the death of REG invalidates
2945    MEMREF.  MEMREF is invalidated by modifying a register used in MEMREF,
2946    by storing into an overlapping memory location, or with a non-const
2947    CALL_INSN.
2948 
2949    Return VALID_RELOAD if MEMREF remains valid for both reload and
2950    combine_and_move insns, VALID_COMBINE if only valid for
2951    combine_and_move_insns, and VALID_NONE otherwise.  */
2952 static enum valid_equiv
validate_equiv_mem(rtx_insn * start,rtx reg,rtx memref)2953 validate_equiv_mem (rtx_insn *start, rtx reg, rtx memref)
2954 {
2955   rtx_insn *insn;
2956   rtx note;
2957   struct equiv_mem_data info = { memref, false };
2958   enum valid_equiv ret = valid_reload;
2959 
2960   /* If the memory reference has side effects or is volatile, it isn't a
2961      valid equivalence.  */
2962   if (side_effects_p (memref))
2963     return valid_none;
2964 
2965   for (insn = start; insn; insn = NEXT_INSN (insn))
2966     {
2967       if (!INSN_P (insn))
2968 	continue;
2969 
2970       if (find_reg_note (insn, REG_DEAD, reg))
2971 	return ret;
2972 
2973       if (CALL_P (insn))
2974 	{
2975 	  /* We can combine a reg def from one insn into a reg use in
2976 	     another over a call if the memory is readonly or the call
2977 	     const/pure.  However, we can't set reg_equiv notes up for
2978 	     reload over any call.  The problem is the equivalent form
2979 	     may reference a pseudo which gets assigned a call
2980 	     clobbered hard reg.  When we later replace REG with its
2981 	     equivalent form, the value in the call-clobbered reg has
2982 	     been changed and all hell breaks loose.  */
2983 	  ret = valid_combine;
2984 	  if (!MEM_READONLY_P (memref)
2985 	      && !RTL_CONST_OR_PURE_CALL_P (insn))
2986 	    return valid_none;
2987 	}
2988 
2989       note_stores (insn, validate_equiv_mem_from_store, &info);
2990       if (info.equiv_mem_modified)
2991 	return valid_none;
2992 
2993       /* If a register mentioned in MEMREF is modified via an
2994 	 auto-increment, we lose the equivalence.  Do the same if one
2995 	 dies; although we could extend the life, it doesn't seem worth
2996 	 the trouble.  */
2997 
2998       for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
2999 	if ((REG_NOTE_KIND (note) == REG_INC
3000 	     || REG_NOTE_KIND (note) == REG_DEAD)
3001 	    && REG_P (XEXP (note, 0))
3002 	    && reg_overlap_mentioned_p (XEXP (note, 0), memref))
3003 	  return valid_none;
3004     }
3005 
3006   return valid_none;
3007 }
3008 
3009 /* Returns zero if X is known to be invariant.  */
3010 static int
equiv_init_varies_p(rtx x)3011 equiv_init_varies_p (rtx x)
3012 {
3013   RTX_CODE code = GET_CODE (x);
3014   int i;
3015   const char *fmt;
3016 
3017   switch (code)
3018     {
3019     case MEM:
3020       return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
3021 
3022     case CONST:
3023     CASE_CONST_ANY:
3024     case SYMBOL_REF:
3025     case LABEL_REF:
3026       return 0;
3027 
3028     case REG:
3029       return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
3030 
3031     case ASM_OPERANDS:
3032       if (MEM_VOLATILE_P (x))
3033 	return 1;
3034 
3035       /* Fall through.  */
3036 
3037     default:
3038       break;
3039     }
3040 
3041   fmt = GET_RTX_FORMAT (code);
3042   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3043     if (fmt[i] == 'e')
3044       {
3045 	if (equiv_init_varies_p (XEXP (x, i)))
3046 	  return 1;
3047       }
3048     else if (fmt[i] == 'E')
3049       {
3050 	int j;
3051 	for (j = 0; j < XVECLEN (x, i); j++)
3052 	  if (equiv_init_varies_p (XVECEXP (x, i, j)))
3053 	    return 1;
3054       }
3055 
3056   return 0;
3057 }
3058 
3059 /* Returns nonzero if X (used to initialize register REGNO) is movable.
3060    X is only movable if the registers it uses have equivalent initializations
3061    which appear to be within the same loop (or in an inner loop) and movable
3062    or if they are not candidates for local_alloc and don't vary.  */
3063 static int
equiv_init_movable_p(rtx x,int regno)3064 equiv_init_movable_p (rtx x, int regno)
3065 {
3066   int i, j;
3067   const char *fmt;
3068   enum rtx_code code = GET_CODE (x);
3069 
3070   switch (code)
3071     {
3072     case SET:
3073       return equiv_init_movable_p (SET_SRC (x), regno);
3074 
3075     case CC0:
3076     case CLOBBER:
3077       return 0;
3078 
3079     case PRE_INC:
3080     case PRE_DEC:
3081     case POST_INC:
3082     case POST_DEC:
3083     case PRE_MODIFY:
3084     case POST_MODIFY:
3085       return 0;
3086 
3087     case REG:
3088       return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
3089 	       && reg_equiv[REGNO (x)].replace)
3090 	      || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS
3091 		  && ! rtx_varies_p (x, 0)));
3092 
3093     case UNSPEC_VOLATILE:
3094       return 0;
3095 
3096     case ASM_OPERANDS:
3097       if (MEM_VOLATILE_P (x))
3098 	return 0;
3099 
3100       /* Fall through.  */
3101 
3102     default:
3103       break;
3104     }
3105 
3106   fmt = GET_RTX_FORMAT (code);
3107   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3108     switch (fmt[i])
3109       {
3110       case 'e':
3111 	if (! equiv_init_movable_p (XEXP (x, i), regno))
3112 	  return 0;
3113 	break;
3114       case 'E':
3115 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3116 	  if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
3117 	    return 0;
3118 	break;
3119       }
3120 
3121   return 1;
3122 }
3123 
3124 static bool memref_referenced_p (rtx memref, rtx x, bool read_p);
3125 
3126 /* Auxiliary function for memref_referenced_p.  Process setting X for
3127    MEMREF store.  */
3128 static bool
process_set_for_memref_referenced_p(rtx memref,rtx x)3129 process_set_for_memref_referenced_p (rtx memref, rtx x)
3130 {
3131   /* If we are setting a MEM, it doesn't count (its address does), but any
3132      other SET_DEST that has a MEM in it is referencing the MEM.  */
3133   if (MEM_P (x))
3134     {
3135       if (memref_referenced_p (memref, XEXP (x, 0), true))
3136 	return true;
3137     }
3138   else if (memref_referenced_p (memref, x, false))
3139     return true;
3140 
3141   return false;
3142 }
3143 
3144 /* TRUE if X references a memory location (as a read if READ_P) that
3145    would be affected by a store to MEMREF.  */
3146 static bool
memref_referenced_p(rtx memref,rtx x,bool read_p)3147 memref_referenced_p (rtx memref, rtx x, bool read_p)
3148 {
3149   int i, j;
3150   const char *fmt;
3151   enum rtx_code code = GET_CODE (x);
3152 
3153   switch (code)
3154     {
3155     case CONST:
3156     case LABEL_REF:
3157     case SYMBOL_REF:
3158     CASE_CONST_ANY:
3159     case PC:
3160     case CC0:
3161     case HIGH:
3162     case LO_SUM:
3163       return false;
3164 
3165     case REG:
3166       return (reg_equiv[REGNO (x)].replacement
3167 	      && memref_referenced_p (memref,
3168 				      reg_equiv[REGNO (x)].replacement, read_p));
3169 
3170     case MEM:
3171       /* Memory X might have another effective type than MEMREF.  */
3172       if (read_p || true_dependence (memref, VOIDmode, x))
3173 	return true;
3174       break;
3175 
3176     case SET:
3177       if (process_set_for_memref_referenced_p (memref, SET_DEST (x)))
3178 	return true;
3179 
3180       return memref_referenced_p (memref, SET_SRC (x), true);
3181 
3182     case CLOBBER:
3183       if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
3184 	return true;
3185 
3186       return false;
3187 
3188     case PRE_DEC:
3189     case POST_DEC:
3190     case PRE_INC:
3191     case POST_INC:
3192       if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
3193 	return true;
3194 
3195       return memref_referenced_p (memref, XEXP (x, 0), true);
3196 
3197     case POST_MODIFY:
3198     case PRE_MODIFY:
3199       /* op0 = op0 + op1 */
3200       if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
3201 	return true;
3202 
3203       if (memref_referenced_p (memref, XEXP (x, 0), true))
3204 	return true;
3205 
3206       return memref_referenced_p (memref, XEXP (x, 1), true);
3207 
3208     default:
3209       break;
3210     }
3211 
3212   fmt = GET_RTX_FORMAT (code);
3213   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3214     switch (fmt[i])
3215       {
3216       case 'e':
3217 	if (memref_referenced_p (memref, XEXP (x, i), read_p))
3218 	  return true;
3219 	break;
3220       case 'E':
3221 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3222 	  if (memref_referenced_p (memref, XVECEXP (x, i, j), read_p))
3223 	    return true;
3224 	break;
3225       }
3226 
3227   return false;
3228 }
3229 
3230 /* TRUE if some insn in the range (START, END] references a memory location
3231    that would be affected by a store to MEMREF.
3232 
3233    Callers should not call this routine if START is after END in the
3234    RTL chain.  */
3235 
3236 static int
memref_used_between_p(rtx memref,rtx_insn * start,rtx_insn * end)3237 memref_used_between_p (rtx memref, rtx_insn *start, rtx_insn *end)
3238 {
3239   rtx_insn *insn;
3240 
3241   for (insn = NEXT_INSN (start);
3242        insn && insn != NEXT_INSN (end);
3243        insn = NEXT_INSN (insn))
3244     {
3245       if (!NONDEBUG_INSN_P (insn))
3246 	continue;
3247 
3248       if (memref_referenced_p (memref, PATTERN (insn), false))
3249 	return 1;
3250 
3251       /* Nonconst functions may access memory.  */
3252       if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
3253 	return 1;
3254     }
3255 
3256   gcc_assert (insn == NEXT_INSN (end));
3257   return 0;
3258 }
3259 
3260 /* Mark REG as having no known equivalence.
3261    Some instructions might have been processed before and furnished
3262    with REG_EQUIV notes for this register; these notes will have to be
3263    removed.
3264    STORE is the piece of RTL that does the non-constant / conflicting
3265    assignment - a SET, CLOBBER or REG_INC note.  It is currently not used,
3266    but needs to be there because this function is called from note_stores.  */
3267 static void
no_equiv(rtx reg,const_rtx store ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)3268 no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED,
3269 	  void *data ATTRIBUTE_UNUSED)
3270 {
3271   int regno;
3272   rtx_insn_list *list;
3273 
3274   if (!REG_P (reg))
3275     return;
3276   regno = REGNO (reg);
3277   reg_equiv[regno].no_equiv = 1;
3278   list = reg_equiv[regno].init_insns;
3279   if (list && list->insn () == NULL)
3280     return;
3281   reg_equiv[regno].init_insns = gen_rtx_INSN_LIST (VOIDmode, NULL_RTX, NULL);
3282   reg_equiv[regno].replacement = NULL_RTX;
3283   /* This doesn't matter for equivalences made for argument registers, we
3284      should keep their initialization insns.  */
3285   if (reg_equiv[regno].is_arg_equivalence)
3286     return;
3287   ira_reg_equiv[regno].defined_p = false;
3288   ira_reg_equiv[regno].init_insns = NULL;
3289   for (; list; list = list->next ())
3290     {
3291       rtx_insn *insn = list->insn ();
3292       remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
3293     }
3294 }
3295 
3296 /* Check whether the SUBREG is a paradoxical subreg and set the result
3297    in PDX_SUBREGS.  */
3298 
3299 static void
set_paradoxical_subreg(rtx_insn * insn)3300 set_paradoxical_subreg (rtx_insn *insn)
3301 {
3302   subrtx_iterator::array_type array;
3303   FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
3304     {
3305       const_rtx subreg = *iter;
3306       if (GET_CODE (subreg) == SUBREG)
3307 	{
3308 	  const_rtx reg = SUBREG_REG (subreg);
3309 	  if (REG_P (reg) && paradoxical_subreg_p (subreg))
3310 	    reg_equiv[REGNO (reg)].pdx_subregs = true;
3311 	}
3312     }
3313 }
3314 
3315 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
3316    equivalent replacement.  */
3317 
3318 static rtx
adjust_cleared_regs(rtx loc,const_rtx old_rtx ATTRIBUTE_UNUSED,void * data)3319 adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data)
3320 {
3321   if (REG_P (loc))
3322     {
3323       bitmap cleared_regs = (bitmap) data;
3324       if (bitmap_bit_p (cleared_regs, REGNO (loc)))
3325 	return simplify_replace_fn_rtx (copy_rtx (*reg_equiv[REGNO (loc)].src_p),
3326 					NULL_RTX, adjust_cleared_regs, data);
3327     }
3328   return NULL_RTX;
3329 }
3330 
3331 /* Given register REGNO is set only once, return true if the defining
3332    insn dominates all uses.  */
3333 
3334 static bool
def_dominates_uses(int regno)3335 def_dominates_uses (int regno)
3336 {
3337   df_ref def = DF_REG_DEF_CHAIN (regno);
3338 
3339   struct df_insn_info *def_info = DF_REF_INSN_INFO (def);
3340   /* If this is an artificial def (eh handler regs, hard frame pointer
3341      for non-local goto, regs defined on function entry) then def_info
3342      is NULL and the reg is always live before any use.  We might
3343      reasonably return true in that case, but since the only call
3344      of this function is currently here in ira.c when we are looking
3345      at a defining insn we can't have an artificial def as that would
3346      bump DF_REG_DEF_COUNT.  */
3347   gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && def_info != NULL);
3348 
3349   rtx_insn *def_insn = DF_REF_INSN (def);
3350   basic_block def_bb = BLOCK_FOR_INSN (def_insn);
3351 
3352   for (df_ref use = DF_REG_USE_CHAIN (regno);
3353        use;
3354        use = DF_REF_NEXT_REG (use))
3355     {
3356       struct df_insn_info *use_info = DF_REF_INSN_INFO (use);
3357       /* Only check real uses, not artificial ones.  */
3358       if (use_info)
3359 	{
3360 	  rtx_insn *use_insn = DF_REF_INSN (use);
3361 	  if (!DEBUG_INSN_P (use_insn))
3362 	    {
3363 	      basic_block use_bb = BLOCK_FOR_INSN (use_insn);
3364 	      if (use_bb != def_bb
3365 		  ? !dominated_by_p (CDI_DOMINATORS, use_bb, def_bb)
3366 		  : DF_INSN_INFO_LUID (use_info) < DF_INSN_INFO_LUID (def_info))
3367 		return false;
3368 	    }
3369 	}
3370     }
3371   return true;
3372 }
3373 
3374 /* Scan the instructions before update_equiv_regs.  Record which registers
3375    are referenced as paradoxical subregs.  Also check for cases in which
3376    the current function needs to save a register that one of its call
3377    instructions clobbers.
3378 
3379    These things are logically unrelated, but it's more efficient to do
3380    them together.  */
3381 
3382 static void
update_equiv_regs_prescan(void)3383 update_equiv_regs_prescan (void)
3384 {
3385   basic_block bb;
3386   rtx_insn *insn;
3387   function_abi_aggregator callee_abis;
3388 
3389   FOR_EACH_BB_FN (bb, cfun)
3390     FOR_BB_INSNS (bb, insn)
3391       if (NONDEBUG_INSN_P (insn))
3392 	{
3393 	  set_paradoxical_subreg (insn);
3394 	  if (CALL_P (insn))
3395 	    callee_abis.note_callee_abi (insn_callee_abi (insn));
3396 	}
3397 
3398   HARD_REG_SET extra_caller_saves = callee_abis.caller_save_regs (*crtl->abi);
3399   if (!hard_reg_set_empty_p (extra_caller_saves))
3400     for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
3401       if (TEST_HARD_REG_BIT (extra_caller_saves, regno))
3402 	df_set_regs_ever_live (regno, true);
3403 }
3404 
3405 /* Find registers that are equivalent to a single value throughout the
3406    compilation (either because they can be referenced in memory or are
3407    set once from a single constant).  Lower their priority for a
3408    register.
3409 
3410    If such a register is only referenced once, try substituting its
3411    value into the using insn.  If it succeeds, we can eliminate the
3412    register completely.
3413 
3414    Initialize init_insns in ira_reg_equiv array.  */
3415 static void
update_equiv_regs(void)3416 update_equiv_regs (void)
3417 {
3418   rtx_insn *insn;
3419   basic_block bb;
3420 
3421   /* Scan the insns and find which registers have equivalences.  Do this
3422      in a separate scan of the insns because (due to -fcse-follow-jumps)
3423      a register can be set below its use.  */
3424   bitmap setjmp_crosses = regstat_get_setjmp_crosses ();
3425   FOR_EACH_BB_FN (bb, cfun)
3426     {
3427       int loop_depth = bb_loop_depth (bb);
3428 
3429       for (insn = BB_HEAD (bb);
3430 	   insn != NEXT_INSN (BB_END (bb));
3431 	   insn = NEXT_INSN (insn))
3432 	{
3433 	  rtx note;
3434 	  rtx set;
3435 	  rtx dest, src;
3436 	  int regno;
3437 
3438 	  if (! INSN_P (insn))
3439 	    continue;
3440 
3441 	  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3442 	    if (REG_NOTE_KIND (note) == REG_INC)
3443 	      no_equiv (XEXP (note, 0), note, NULL);
3444 
3445 	  set = single_set (insn);
3446 
3447 	  /* If this insn contains more (or less) than a single SET,
3448 	     only mark all destinations as having no known equivalence.  */
3449 	  if (set == NULL_RTX
3450 	      || side_effects_p (SET_SRC (set)))
3451 	    {
3452 	      note_pattern_stores (PATTERN (insn), no_equiv, NULL);
3453 	      continue;
3454 	    }
3455 	  else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3456 	    {
3457 	      int i;
3458 
3459 	      for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3460 		{
3461 		  rtx part = XVECEXP (PATTERN (insn), 0, i);
3462 		  if (part != set)
3463 		    note_pattern_stores (part, no_equiv, NULL);
3464 		}
3465 	    }
3466 
3467 	  dest = SET_DEST (set);
3468 	  src = SET_SRC (set);
3469 
3470 	  /* See if this is setting up the equivalence between an argument
3471 	     register and its stack slot.  */
3472 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3473 	  if (note)
3474 	    {
3475 	      gcc_assert (REG_P (dest));
3476 	      regno = REGNO (dest);
3477 
3478 	      /* Note that we don't want to clear init_insns in
3479 		 ira_reg_equiv even if there are multiple sets of this
3480 		 register.  */
3481 	      reg_equiv[regno].is_arg_equivalence = 1;
3482 
3483 	      /* The insn result can have equivalence memory although
3484 		 the equivalence is not set up by the insn.  We add
3485 		 this insn to init insns as it is a flag for now that
3486 		 regno has an equivalence.  We will remove the insn
3487 		 from init insn list later.  */
3488 	      if (rtx_equal_p (src, XEXP (note, 0)) || MEM_P (XEXP (note, 0)))
3489 		ira_reg_equiv[regno].init_insns
3490 		  = gen_rtx_INSN_LIST (VOIDmode, insn,
3491 				       ira_reg_equiv[regno].init_insns);
3492 
3493 	      /* Continue normally in case this is a candidate for
3494 		 replacements.  */
3495 	    }
3496 
3497 	  if (!optimize)
3498 	    continue;
3499 
3500 	  /* We only handle the case of a pseudo register being set
3501 	     once, or always to the same value.  */
3502 	  /* ??? The mn10200 port breaks if we add equivalences for
3503 	     values that need an ADDRESS_REGS register and set them equivalent
3504 	     to a MEM of a pseudo.  The actual problem is in the over-conservative
3505 	     handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
3506 	     calculate_needs, but we traditionally work around this problem
3507 	     here by rejecting equivalences when the destination is in a register
3508 	     that's likely spilled.  This is fragile, of course, since the
3509 	     preferred class of a pseudo depends on all instructions that set
3510 	     or use it.  */
3511 
3512 	  if (!REG_P (dest)
3513 	      || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
3514 	      || (reg_equiv[regno].init_insns
3515 		  && reg_equiv[regno].init_insns->insn () == NULL)
3516 	      || (targetm.class_likely_spilled_p (reg_preferred_class (regno))
3517 		  && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
3518 	    {
3519 	      /* This might be setting a SUBREG of a pseudo, a pseudo that is
3520 		 also set somewhere else to a constant.  */
3521 	      note_pattern_stores (set, no_equiv, NULL);
3522 	      continue;
3523 	    }
3524 
3525 	  /* Don't set reg mentioned in a paradoxical subreg
3526 	     equivalent to a mem.  */
3527 	  if (MEM_P (src) && reg_equiv[regno].pdx_subregs)
3528 	    {
3529 	      note_pattern_stores (set, no_equiv, NULL);
3530 	      continue;
3531 	    }
3532 
3533 	  note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
3534 
3535 	  /* cse sometimes generates function invariants, but doesn't put a
3536 	     REG_EQUAL note on the insn.  Since this note would be redundant,
3537 	     there's no point creating it earlier than here.  */
3538 	  if (! note && ! rtx_varies_p (src, 0))
3539 	    note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3540 
3541 	  /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
3542 	     since it represents a function call.  */
3543 	  if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
3544 	    note = NULL_RTX;
3545 
3546 	  if (DF_REG_DEF_COUNT (regno) != 1)
3547 	    {
3548 	      bool equal_p = true;
3549 	      rtx_insn_list *list;
3550 
3551 	      /* If we have already processed this pseudo and determined it
3552 		 cannot have an equivalence, then honor that decision.  */
3553 	      if (reg_equiv[regno].no_equiv)
3554 		continue;
3555 
3556 	      if (! note
3557 		  || rtx_varies_p (XEXP (note, 0), 0)
3558 		  || (reg_equiv[regno].replacement
3559 		      && ! rtx_equal_p (XEXP (note, 0),
3560 					reg_equiv[regno].replacement)))
3561 		{
3562 		  no_equiv (dest, set, NULL);
3563 		  continue;
3564 		}
3565 
3566 	      list = reg_equiv[regno].init_insns;
3567 	      for (; list; list = list->next ())
3568 		{
3569 		  rtx note_tmp;
3570 		  rtx_insn *insn_tmp;
3571 
3572 		  insn_tmp = list->insn ();
3573 		  note_tmp = find_reg_note (insn_tmp, REG_EQUAL, NULL_RTX);
3574 		  gcc_assert (note_tmp);
3575 		  if (! rtx_equal_p (XEXP (note, 0), XEXP (note_tmp, 0)))
3576 		    {
3577 		      equal_p = false;
3578 		      break;
3579 		    }
3580 		}
3581 
3582 	      if (! equal_p)
3583 		{
3584 		  no_equiv (dest, set, NULL);
3585 		  continue;
3586 		}
3587 	    }
3588 
3589 	  /* Record this insn as initializing this register.  */
3590 	  reg_equiv[regno].init_insns
3591 	    = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
3592 
3593 	  /* If this register is known to be equal to a constant, record that
3594 	     it is always equivalent to the constant.
3595 	     Note that it is possible to have a register use before
3596 	     the def in loops (see gcc.c-torture/execute/pr79286.c)
3597 	     where the reg is undefined on first use.  If the def insn
3598 	     won't trap we can use it as an equivalence, effectively
3599 	     choosing the "undefined" value for the reg to be the
3600 	     same as the value set by the def.  */
3601 	  if (DF_REG_DEF_COUNT (regno) == 1
3602 	      && note
3603 	      && !rtx_varies_p (XEXP (note, 0), 0)
3604 	      && (!may_trap_or_fault_p (XEXP (note, 0))
3605 		  || def_dominates_uses (regno)))
3606 	    {
3607 	      rtx note_value = XEXP (note, 0);
3608 	      remove_note (insn, note);
3609 	      set_unique_reg_note (insn, REG_EQUIV, note_value);
3610 	    }
3611 
3612 	  /* If this insn introduces a "constant" register, decrease the priority
3613 	     of that register.  Record this insn if the register is only used once
3614 	     more and the equivalence value is the same as our source.
3615 
3616 	     The latter condition is checked for two reasons:  First, it is an
3617 	     indication that it may be more efficient to actually emit the insn
3618 	     as written (if no registers are available, reload will substitute
3619 	     the equivalence).  Secondly, it avoids problems with any registers
3620 	     dying in this insn whose death notes would be missed.
3621 
3622 	     If we don't have a REG_EQUIV note, see if this insn is loading
3623 	     a register used only in one basic block from a MEM.  If so, and the
3624 	     MEM remains unchanged for the life of the register, add a REG_EQUIV
3625 	     note.  */
3626 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3627 
3628 	  rtx replacement = NULL_RTX;
3629 	  if (note)
3630 	    replacement = XEXP (note, 0);
3631 	  else if (REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
3632 		   && MEM_P (SET_SRC (set)))
3633 	    {
3634 	      enum valid_equiv validity;
3635 	      validity = validate_equiv_mem (insn, dest, SET_SRC (set));
3636 	      if (validity != valid_none)
3637 		{
3638 		  replacement = copy_rtx (SET_SRC (set));
3639 		  if (validity == valid_reload)
3640 		    note = set_unique_reg_note (insn, REG_EQUIV, replacement);
3641 		}
3642 	    }
3643 
3644 	  /* If we haven't done so, record for reload that this is an
3645 	     equivalencing insn.  */
3646 	  if (note && !reg_equiv[regno].is_arg_equivalence)
3647 	    ira_reg_equiv[regno].init_insns
3648 	      = gen_rtx_INSN_LIST (VOIDmode, insn,
3649 				   ira_reg_equiv[regno].init_insns);
3650 
3651 	  if (replacement)
3652 	    {
3653 	      reg_equiv[regno].replacement = replacement;
3654 	      reg_equiv[regno].src_p = &SET_SRC (set);
3655 	      reg_equiv[regno].loop_depth = (short) loop_depth;
3656 
3657 	      /* Don't mess with things live during setjmp.  */
3658 	      if (optimize && !bitmap_bit_p (setjmp_crosses, regno))
3659 		{
3660 		  /* If the register is referenced exactly twice, meaning it is
3661 		     set once and used once, indicate that the reference may be
3662 		     replaced by the equivalence we computed above.  Do this
3663 		     even if the register is only used in one block so that
3664 		     dependencies can be handled where the last register is
3665 		     used in a different block (i.e. HIGH / LO_SUM sequences)
3666 		     and to reduce the number of registers alive across
3667 		     calls.  */
3668 
3669 		  if (REG_N_REFS (regno) == 2
3670 		      && (rtx_equal_p (replacement, src)
3671 			  || ! equiv_init_varies_p (src))
3672 		      && NONJUMP_INSN_P (insn)
3673 		      && equiv_init_movable_p (PATTERN (insn), regno))
3674 		    reg_equiv[regno].replace = 1;
3675 		}
3676 	    }
3677 	}
3678     }
3679 }
3680 
3681 /* For insns that set a MEM to the contents of a REG that is only used
3682    in a single basic block, see if the register is always equivalent
3683    to that memory location and if moving the store from INSN to the
3684    insn that sets REG is safe.  If so, put a REG_EQUIV note on the
3685    initializing insn.  */
3686 static void
add_store_equivs(void)3687 add_store_equivs (void)
3688 {
3689   auto_bitmap seen_insns;
3690 
3691   for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
3692     {
3693       rtx set, src, dest;
3694       unsigned regno;
3695       rtx_insn *init_insn;
3696 
3697       bitmap_set_bit (seen_insns, INSN_UID (insn));
3698 
3699       if (! INSN_P (insn))
3700 	continue;
3701 
3702       set = single_set (insn);
3703       if (! set)
3704 	continue;
3705 
3706       dest = SET_DEST (set);
3707       src = SET_SRC (set);
3708 
3709       /* Don't add a REG_EQUIV note if the insn already has one.  The existing
3710 	 REG_EQUIV is likely more useful than the one we are adding.  */
3711       if (MEM_P (dest) && REG_P (src)
3712 	  && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
3713 	  && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
3714 	  && DF_REG_DEF_COUNT (regno) == 1
3715 	  && ! reg_equiv[regno].pdx_subregs
3716 	  && reg_equiv[regno].init_insns != NULL
3717 	  && (init_insn = reg_equiv[regno].init_insns->insn ()) != 0
3718 	  && bitmap_bit_p (seen_insns, INSN_UID (init_insn))
3719 	  && ! find_reg_note (init_insn, REG_EQUIV, NULL_RTX)
3720 	  && validate_equiv_mem (init_insn, src, dest) == valid_reload
3721 	  && ! memref_used_between_p (dest, init_insn, insn)
3722 	  /* Attaching a REG_EQUIV note will fail if INIT_INSN has
3723 	     multiple sets.  */
3724 	  && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
3725 	{
3726 	  /* This insn makes the equivalence, not the one initializing
3727 	     the register.  */
3728 	  ira_reg_equiv[regno].init_insns
3729 	    = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
3730 	  df_notes_rescan (init_insn);
3731 	  if (dump_file)
3732 	    fprintf (dump_file,
3733 		     "Adding REG_EQUIV to insn %d for source of insn %d\n",
3734 		     INSN_UID (init_insn),
3735 		     INSN_UID (insn));
3736 	}
3737     }
3738 }
3739 
3740 /* Scan all regs killed in an insn to see if any of them are registers
3741    only used that once.  If so, see if we can replace the reference
3742    with the equivalent form.  If we can, delete the initializing
3743    reference and this register will go away.  If we can't replace the
3744    reference, and the initializing reference is within the same loop
3745    (or in an inner loop), then move the register initialization just
3746    before the use, so that they are in the same basic block.  */
3747 static void
combine_and_move_insns(void)3748 combine_and_move_insns (void)
3749 {
3750   auto_bitmap cleared_regs;
3751   int max = max_reg_num ();
3752 
3753   for (int regno = FIRST_PSEUDO_REGISTER; regno < max; regno++)
3754     {
3755       if (!reg_equiv[regno].replace)
3756 	continue;
3757 
3758       rtx_insn *use_insn = 0;
3759       for (df_ref use = DF_REG_USE_CHAIN (regno);
3760 	   use;
3761 	   use = DF_REF_NEXT_REG (use))
3762 	if (DF_REF_INSN_INFO (use))
3763 	  {
3764 	    if (DEBUG_INSN_P (DF_REF_INSN (use)))
3765 	      continue;
3766 	    gcc_assert (!use_insn);
3767 	    use_insn = DF_REF_INSN (use);
3768 	  }
3769       gcc_assert (use_insn);
3770 
3771       /* Don't substitute into jumps.  indirect_jump_optimize does
3772 	 this for anything we are prepared to handle.  */
3773       if (JUMP_P (use_insn))
3774 	continue;
3775 
3776       /* Also don't substitute into a conditional trap insn -- it can become
3777 	 an unconditional trap, and that is a flow control insn.  */
3778       if (GET_CODE (PATTERN (use_insn)) == TRAP_IF)
3779 	continue;
3780 
3781       df_ref def = DF_REG_DEF_CHAIN (regno);
3782       gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && DF_REF_INSN_INFO (def));
3783       rtx_insn *def_insn = DF_REF_INSN (def);
3784 
3785       /* We may not move instructions that can throw, since that
3786 	 changes basic block boundaries and we are not prepared to
3787 	 adjust the CFG to match.  */
3788       if (can_throw_internal (def_insn))
3789 	continue;
3790 
3791       /* Instructions with multiple sets can only be moved if DF analysis is
3792 	 performed for all of the registers set.  See PR91052.  */
3793       if (multiple_sets (def_insn))
3794 	continue;
3795 
3796       basic_block use_bb = BLOCK_FOR_INSN (use_insn);
3797       basic_block def_bb = BLOCK_FOR_INSN (def_insn);
3798       if (bb_loop_depth (use_bb) > bb_loop_depth (def_bb))
3799 	continue;
3800 
3801       if (asm_noperands (PATTERN (def_insn)) < 0
3802 	  && validate_replace_rtx (regno_reg_rtx[regno],
3803 				   *reg_equiv[regno].src_p, use_insn))
3804 	{
3805 	  rtx link;
3806 	  /* Append the REG_DEAD notes from def_insn.  */
3807 	  for (rtx *p = &REG_NOTES (def_insn); (link = *p) != 0; )
3808 	    {
3809 	      if (REG_NOTE_KIND (XEXP (link, 0)) == REG_DEAD)
3810 		{
3811 		  *p = XEXP (link, 1);
3812 		  XEXP (link, 1) = REG_NOTES (use_insn);
3813 		  REG_NOTES (use_insn) = link;
3814 		}
3815 	      else
3816 		p = &XEXP (link, 1);
3817 	    }
3818 
3819 	  remove_death (regno, use_insn);
3820 	  SET_REG_N_REFS (regno, 0);
3821 	  REG_FREQ (regno) = 0;
3822 	  df_ref use;
3823 	  FOR_EACH_INSN_USE (use, def_insn)
3824 	    {
3825 	      unsigned int use_regno = DF_REF_REGNO (use);
3826 	      if (!HARD_REGISTER_NUM_P (use_regno))
3827 		reg_equiv[use_regno].replace = 0;
3828 	    }
3829 
3830 	  delete_insn (def_insn);
3831 
3832 	  reg_equiv[regno].init_insns = NULL;
3833 	  ira_reg_equiv[regno].init_insns = NULL;
3834 	  bitmap_set_bit (cleared_regs, regno);
3835 	}
3836 
3837       /* Move the initialization of the register to just before
3838 	 USE_INSN.  Update the flow information.  */
3839       else if (prev_nondebug_insn (use_insn) != def_insn)
3840 	{
3841 	  rtx_insn *new_insn;
3842 
3843 	  new_insn = emit_insn_before (PATTERN (def_insn), use_insn);
3844 	  REG_NOTES (new_insn) = REG_NOTES (def_insn);
3845 	  REG_NOTES (def_insn) = 0;
3846 	  /* Rescan it to process the notes.  */
3847 	  df_insn_rescan (new_insn);
3848 
3849 	  /* Make sure this insn is recognized before reload begins,
3850 	     otherwise eliminate_regs_in_insn will die.  */
3851 	  INSN_CODE (new_insn) = INSN_CODE (def_insn);
3852 
3853 	  delete_insn (def_insn);
3854 
3855 	  XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
3856 
3857 	  REG_BASIC_BLOCK (regno) = use_bb->index;
3858 	  REG_N_CALLS_CROSSED (regno) = 0;
3859 
3860 	  if (use_insn == BB_HEAD (use_bb))
3861 	    BB_HEAD (use_bb) = new_insn;
3862 
3863 	  /* We know regno dies in use_insn, but inside a loop
3864 	     REG_DEAD notes might be missing when def_insn was in
3865 	     another basic block.  However, when we move def_insn into
3866 	     this bb we'll definitely get a REG_DEAD note and reload
3867 	     will see the death.  It's possible that update_equiv_regs
3868 	     set up an equivalence referencing regno for a reg set by
3869 	     use_insn, when regno was seen as non-local.  Now that
3870 	     regno is local to this block, and dies, such an
3871 	     equivalence is invalid.  */
3872 	  if (find_reg_note (use_insn, REG_EQUIV, regno_reg_rtx[regno]))
3873 	    {
3874 	      rtx set = single_set (use_insn);
3875 	      if (set && REG_P (SET_DEST (set)))
3876 		no_equiv (SET_DEST (set), set, NULL);
3877 	    }
3878 
3879 	  ira_reg_equiv[regno].init_insns
3880 	    = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
3881 	  bitmap_set_bit (cleared_regs, regno);
3882 	}
3883     }
3884 
3885   if (!bitmap_empty_p (cleared_regs))
3886     {
3887       basic_block bb;
3888 
3889       FOR_EACH_BB_FN (bb, cfun)
3890 	{
3891 	  bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
3892 	  bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
3893 	  if (!df_live)
3894 	    continue;
3895 	  bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
3896 	  bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
3897 	}
3898 
3899       /* Last pass - adjust debug insns referencing cleared regs.  */
3900       if (MAY_HAVE_DEBUG_BIND_INSNS)
3901 	for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
3902 	  if (DEBUG_BIND_INSN_P (insn))
3903 	    {
3904 	      rtx old_loc = INSN_VAR_LOCATION_LOC (insn);
3905 	      INSN_VAR_LOCATION_LOC (insn)
3906 		= simplify_replace_fn_rtx (old_loc, NULL_RTX,
3907 					   adjust_cleared_regs,
3908 					   (void *) cleared_regs);
3909 	      if (old_loc != INSN_VAR_LOCATION_LOC (insn))
3910 		df_insn_rescan (insn);
3911 	    }
3912     }
3913 }
3914 
3915 /* A pass over indirect jumps, converting simple cases to direct jumps.
3916    Combine does this optimization too, but only within a basic block.  */
3917 static void
indirect_jump_optimize(void)3918 indirect_jump_optimize (void)
3919 {
3920   basic_block bb;
3921   bool rebuild_p = false;
3922 
3923   FOR_EACH_BB_REVERSE_FN (bb, cfun)
3924     {
3925       rtx_insn *insn = BB_END (bb);
3926       if (!JUMP_P (insn)
3927 	  || find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
3928 	continue;
3929 
3930       rtx x = pc_set (insn);
3931       if (!x || !REG_P (SET_SRC (x)))
3932 	continue;
3933 
3934       int regno = REGNO (SET_SRC (x));
3935       if (DF_REG_DEF_COUNT (regno) == 1)
3936 	{
3937 	  df_ref def = DF_REG_DEF_CHAIN (regno);
3938 	  if (!DF_REF_IS_ARTIFICIAL (def))
3939 	    {
3940 	      rtx_insn *def_insn = DF_REF_INSN (def);
3941 	      rtx lab = NULL_RTX;
3942 	      rtx set = single_set (def_insn);
3943 	      if (set && GET_CODE (SET_SRC (set)) == LABEL_REF)
3944 		lab = SET_SRC (set);
3945 	      else
3946 		{
3947 		  rtx eqnote = find_reg_note (def_insn, REG_EQUAL, NULL_RTX);
3948 		  if (eqnote && GET_CODE (XEXP (eqnote, 0)) == LABEL_REF)
3949 		    lab = XEXP (eqnote, 0);
3950 		}
3951 	      if (lab && validate_replace_rtx (SET_SRC (x), lab, insn))
3952 		rebuild_p = true;
3953 	    }
3954 	}
3955     }
3956 
3957   if (rebuild_p)
3958     {
3959       timevar_push (TV_JUMP);
3960       rebuild_jump_labels (get_insns ());
3961       if (purge_all_dead_edges ())
3962 	delete_unreachable_blocks ();
3963       timevar_pop (TV_JUMP);
3964     }
3965 }
3966 
3967 /* Set up fields memory, constant, and invariant from init_insns in
3968    the structures of array ira_reg_equiv.  */
3969 static void
setup_reg_equiv(void)3970 setup_reg_equiv (void)
3971 {
3972   int i;
3973   rtx_insn_list *elem, *prev_elem, *next_elem;
3974   rtx_insn *insn;
3975   rtx set, x;
3976 
3977   for (i = FIRST_PSEUDO_REGISTER; i < ira_reg_equiv_len; i++)
3978     for (prev_elem = NULL, elem = ira_reg_equiv[i].init_insns;
3979 	 elem;
3980 	 prev_elem = elem, elem = next_elem)
3981       {
3982 	next_elem = elem->next ();
3983 	insn = elem->insn ();
3984 	set = single_set (insn);
3985 
3986 	/* Init insns can set up equivalence when the reg is a destination or
3987 	   a source (in this case the destination is memory).  */
3988 	if (set != 0 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))))
3989 	  {
3990 	    if ((x = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL)
3991 	      {
3992 		x = XEXP (x, 0);
3993 		if (REG_P (SET_DEST (set))
3994 		    && REGNO (SET_DEST (set)) == (unsigned int) i
3995 		    && ! rtx_equal_p (SET_SRC (set), x) && MEM_P (x))
3996 		  {
3997 		    /* This insn reporting the equivalence but
3998 		       actually not setting it.  Remove it from the
3999 		       list.  */
4000 		    if (prev_elem == NULL)
4001 		      ira_reg_equiv[i].init_insns = next_elem;
4002 		    else
4003 		      XEXP (prev_elem, 1) = next_elem;
4004 		    elem = prev_elem;
4005 		  }
4006 	      }
4007 	    else if (REG_P (SET_DEST (set))
4008 		     && REGNO (SET_DEST (set)) == (unsigned int) i)
4009 	      x = SET_SRC (set);
4010 	    else
4011 	      {
4012 		gcc_assert (REG_P (SET_SRC (set))
4013 			    && REGNO (SET_SRC (set)) == (unsigned int) i);
4014 		x = SET_DEST (set);
4015 	      }
4016 	    if (! function_invariant_p (x)
4017 		|| ! flag_pic
4018 		/* A function invariant is often CONSTANT_P but may
4019 		   include a register.  We promise to only pass
4020 		   CONSTANT_P objects to LEGITIMATE_PIC_OPERAND_P.  */
4021 		|| (CONSTANT_P (x) && LEGITIMATE_PIC_OPERAND_P (x)))
4022 	      {
4023 		/* It can happen that a REG_EQUIV note contains a MEM
4024 		   that is not a legitimate memory operand.  As later
4025 		   stages of reload assume that all addresses found in
4026 		   the lra_regno_equiv_* arrays were originally
4027 		   legitimate, we ignore such REG_EQUIV notes.  */
4028 		if (memory_operand (x, VOIDmode))
4029 		  {
4030 		    ira_reg_equiv[i].defined_p = true;
4031 		    ira_reg_equiv[i].memory = x;
4032 		    continue;
4033 		  }
4034 		else if (function_invariant_p (x))
4035 		  {
4036 		    machine_mode mode;
4037 
4038 		    mode = GET_MODE (SET_DEST (set));
4039 		    if (GET_CODE (x) == PLUS
4040 			|| x == frame_pointer_rtx || x == arg_pointer_rtx)
4041 		      /* This is PLUS of frame pointer and a constant,
4042 			 or fp, or argp.  */
4043 		      ira_reg_equiv[i].invariant = x;
4044 		    else if (targetm.legitimate_constant_p (mode, x))
4045 		      ira_reg_equiv[i].constant = x;
4046 		    else
4047 		      {
4048 			ira_reg_equiv[i].memory = force_const_mem (mode, x);
4049 			if (ira_reg_equiv[i].memory == NULL_RTX)
4050 			  {
4051 			    ira_reg_equiv[i].defined_p = false;
4052 			    ira_reg_equiv[i].init_insns = NULL;
4053 			    break;
4054 			  }
4055 		      }
4056 		    ira_reg_equiv[i].defined_p = true;
4057 		    continue;
4058 		  }
4059 	      }
4060 	  }
4061 	ira_reg_equiv[i].defined_p = false;
4062 	ira_reg_equiv[i].init_insns = NULL;
4063 	break;
4064       }
4065 }
4066 
4067 
4068 
4069 /* Print chain C to FILE.  */
4070 static void
print_insn_chain(FILE * file,class insn_chain * c)4071 print_insn_chain (FILE *file, class insn_chain *c)
4072 {
4073   fprintf (file, "insn=%d, ", INSN_UID (c->insn));
4074   bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
4075   bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
4076 }
4077 
4078 
4079 /* Print all reload_insn_chains to FILE.  */
4080 static void
print_insn_chains(FILE * file)4081 print_insn_chains (FILE *file)
4082 {
4083   class insn_chain *c;
4084   for (c = reload_insn_chain; c ; c = c->next)
4085     print_insn_chain (file, c);
4086 }
4087 
4088 /* Return true if pseudo REGNO should be added to set live_throughout
4089    or dead_or_set of the insn chains for reload consideration.  */
4090 static bool
pseudo_for_reload_consideration_p(int regno)4091 pseudo_for_reload_consideration_p (int regno)
4092 {
4093   /* Consider spilled pseudos too for IRA because they still have a
4094      chance to get hard-registers in the reload when IRA is used.  */
4095   return (reg_renumber[regno] >= 0 || ira_conflicts_p);
4096 }
4097 
4098 /* Return true if we can track the individual bytes of subreg X.
4099    When returning true, set *OUTER_SIZE to the number of bytes in
4100    X itself, *INNER_SIZE to the number of bytes in the inner register
4101    and *START to the offset of the first byte.  */
4102 static bool
get_subreg_tracking_sizes(rtx x,HOST_WIDE_INT * outer_size,HOST_WIDE_INT * inner_size,HOST_WIDE_INT * start)4103 get_subreg_tracking_sizes (rtx x, HOST_WIDE_INT *outer_size,
4104 			   HOST_WIDE_INT *inner_size, HOST_WIDE_INT *start)
4105 {
4106   rtx reg = regno_reg_rtx[REGNO (SUBREG_REG (x))];
4107   return (GET_MODE_SIZE (GET_MODE (x)).is_constant (outer_size)
4108 	  && GET_MODE_SIZE (GET_MODE (reg)).is_constant (inner_size)
4109 	  && SUBREG_BYTE (x).is_constant (start));
4110 }
4111 
4112 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] for
4113    a register with SIZE bytes, making the register live if INIT_VALUE.  */
4114 static void
init_live_subregs(bool init_value,sbitmap * live_subregs,bitmap live_subregs_used,int allocnum,int size)4115 init_live_subregs (bool init_value, sbitmap *live_subregs,
4116 		   bitmap live_subregs_used, int allocnum, int size)
4117 {
4118   gcc_assert (size > 0);
4119 
4120   /* Been there, done that.  */
4121   if (bitmap_bit_p (live_subregs_used, allocnum))
4122     return;
4123 
4124   /* Create a new one.  */
4125   if (live_subregs[allocnum] == NULL)
4126     live_subregs[allocnum] = sbitmap_alloc (size);
4127 
4128   /* If the entire reg was live before blasting into subregs, we need
4129      to init all of the subregs to ones else init to 0.  */
4130   if (init_value)
4131     bitmap_ones (live_subregs[allocnum]);
4132   else
4133     bitmap_clear (live_subregs[allocnum]);
4134 
4135   bitmap_set_bit (live_subregs_used, allocnum);
4136 }
4137 
4138 /* Walk the insns of the current function and build reload_insn_chain,
4139    and record register life information.  */
4140 static void
build_insn_chain(void)4141 build_insn_chain (void)
4142 {
4143   unsigned int i;
4144   class insn_chain **p = &reload_insn_chain;
4145   basic_block bb;
4146   class insn_chain *c = NULL;
4147   class insn_chain *next = NULL;
4148   auto_bitmap live_relevant_regs;
4149   auto_bitmap elim_regset;
4150   /* live_subregs is a vector used to keep accurate information about
4151      which hardregs are live in multiword pseudos.  live_subregs and
4152      live_subregs_used are indexed by pseudo number.  The live_subreg
4153      entry for a particular pseudo is only used if the corresponding
4154      element is non zero in live_subregs_used.  The sbitmap size of
4155      live_subreg[allocno] is number of bytes that the pseudo can
4156      occupy.  */
4157   sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
4158   auto_bitmap live_subregs_used;
4159 
4160   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4161     if (TEST_HARD_REG_BIT (eliminable_regset, i))
4162       bitmap_set_bit (elim_regset, i);
4163   FOR_EACH_BB_REVERSE_FN (bb, cfun)
4164     {
4165       bitmap_iterator bi;
4166       rtx_insn *insn;
4167 
4168       CLEAR_REG_SET (live_relevant_regs);
4169       bitmap_clear (live_subregs_used);
4170 
4171       EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi)
4172 	{
4173 	  if (i >= FIRST_PSEUDO_REGISTER)
4174 	    break;
4175 	  bitmap_set_bit (live_relevant_regs, i);
4176 	}
4177 
4178       EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb),
4179 				FIRST_PSEUDO_REGISTER, i, bi)
4180 	{
4181 	  if (pseudo_for_reload_consideration_p (i))
4182 	    bitmap_set_bit (live_relevant_regs, i);
4183 	}
4184 
4185       FOR_BB_INSNS_REVERSE (bb, insn)
4186 	{
4187 	  if (!NOTE_P (insn) && !BARRIER_P (insn))
4188 	    {
4189 	      struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4190 	      df_ref def, use;
4191 
4192 	      c = new_insn_chain ();
4193 	      c->next = next;
4194 	      next = c;
4195 	      *p = c;
4196 	      p = &c->prev;
4197 
4198 	      c->insn = insn;
4199 	      c->block = bb->index;
4200 
4201 	      if (NONDEBUG_INSN_P (insn))
4202 		FOR_EACH_INSN_INFO_DEF (def, insn_info)
4203 		  {
4204 		    unsigned int regno = DF_REF_REGNO (def);
4205 
4206 		    /* Ignore may clobbers because these are generated
4207 		       from calls. However, every other kind of def is
4208 		       added to dead_or_set.  */
4209 		    if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
4210 		      {
4211 			if (regno < FIRST_PSEUDO_REGISTER)
4212 			  {
4213 			    if (!fixed_regs[regno])
4214 			      bitmap_set_bit (&c->dead_or_set, regno);
4215 			  }
4216 			else if (pseudo_for_reload_consideration_p (regno))
4217 			  bitmap_set_bit (&c->dead_or_set, regno);
4218 		      }
4219 
4220 		    if ((regno < FIRST_PSEUDO_REGISTER
4221 			 || reg_renumber[regno] >= 0
4222 			 || ira_conflicts_p)
4223 			&& (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
4224 		      {
4225 			rtx reg = DF_REF_REG (def);
4226 			HOST_WIDE_INT outer_size, inner_size, start;
4227 
4228 			/* We can usually track the liveness of individual
4229 			   bytes within a subreg.  The only exceptions are
4230 			   subregs wrapped in ZERO_EXTRACTs and subregs whose
4231 			   size is not known; in those cases we need to be
4232 			   conservative and treat the definition as a partial
4233 			   definition of the full register rather than a full
4234 			   definition of a specific part of the register.  */
4235 			if (GET_CODE (reg) == SUBREG
4236 			    && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT)
4237 			    && get_subreg_tracking_sizes (reg, &outer_size,
4238 							  &inner_size, &start))
4239 			  {
4240 			    HOST_WIDE_INT last = start + outer_size;
4241 
4242 			    init_live_subregs
4243 			      (bitmap_bit_p (live_relevant_regs, regno),
4244 			       live_subregs, live_subregs_used, regno,
4245 			       inner_size);
4246 
4247 			    if (!DF_REF_FLAGS_IS_SET
4248 				(def, DF_REF_STRICT_LOW_PART))
4249 			      {
4250 				/* Expand the range to cover entire words.
4251 				   Bytes added here are "don't care".  */
4252 				start
4253 				  = start / UNITS_PER_WORD * UNITS_PER_WORD;
4254 				last = ((last + UNITS_PER_WORD - 1)
4255 					/ UNITS_PER_WORD * UNITS_PER_WORD);
4256 			      }
4257 
4258 			    /* Ignore the paradoxical bits.  */
4259 			    if (last > SBITMAP_SIZE (live_subregs[regno]))
4260 			      last = SBITMAP_SIZE (live_subregs[regno]);
4261 
4262 			    while (start < last)
4263 			      {
4264 				bitmap_clear_bit (live_subregs[regno], start);
4265 				start++;
4266 			      }
4267 
4268 			    if (bitmap_empty_p (live_subregs[regno]))
4269 			      {
4270 				bitmap_clear_bit (live_subregs_used, regno);
4271 				bitmap_clear_bit (live_relevant_regs, regno);
4272 			      }
4273 			    else
4274 			      /* Set live_relevant_regs here because
4275 				 that bit has to be true to get us to
4276 				 look at the live_subregs fields.  */
4277 			      bitmap_set_bit (live_relevant_regs, regno);
4278 			  }
4279 			else
4280 			  {
4281 			    /* DF_REF_PARTIAL is generated for
4282 			       subregs, STRICT_LOW_PART, and
4283 			       ZERO_EXTRACT.  We handle the subreg
4284 			       case above so here we have to keep from
4285 			       modeling the def as a killing def.  */
4286 			    if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
4287 			      {
4288 				bitmap_clear_bit (live_subregs_used, regno);
4289 				bitmap_clear_bit (live_relevant_regs, regno);
4290 			      }
4291 			  }
4292 		      }
4293 		  }
4294 
4295 	      bitmap_and_compl_into (live_relevant_regs, elim_regset);
4296 	      bitmap_copy (&c->live_throughout, live_relevant_regs);
4297 
4298 	      if (NONDEBUG_INSN_P (insn))
4299 		FOR_EACH_INSN_INFO_USE (use, insn_info)
4300 		  {
4301 		    unsigned int regno = DF_REF_REGNO (use);
4302 		    rtx reg = DF_REF_REG (use);
4303 
4304 		    /* DF_REF_READ_WRITE on a use means that this use
4305 		       is fabricated from a def that is a partial set
4306 		       to a multiword reg.  Here, we only model the
4307 		       subreg case that is not wrapped in ZERO_EXTRACT
4308 		       precisely so we do not need to look at the
4309 		       fabricated use.  */
4310 		    if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
4311 			&& !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
4312 			&& DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
4313 		      continue;
4314 
4315 		    /* Add the last use of each var to dead_or_set.  */
4316 		    if (!bitmap_bit_p (live_relevant_regs, regno))
4317 		      {
4318 			if (regno < FIRST_PSEUDO_REGISTER)
4319 			  {
4320 			    if (!fixed_regs[regno])
4321 			      bitmap_set_bit (&c->dead_or_set, regno);
4322 			  }
4323 			else if (pseudo_for_reload_consideration_p (regno))
4324 			  bitmap_set_bit (&c->dead_or_set, regno);
4325 		      }
4326 
4327 		    if (regno < FIRST_PSEUDO_REGISTER
4328 			|| pseudo_for_reload_consideration_p (regno))
4329 		      {
4330 			HOST_WIDE_INT outer_size, inner_size, start;
4331 			if (GET_CODE (reg) == SUBREG
4332 			    && !DF_REF_FLAGS_IS_SET (use,
4333 						     DF_REF_SIGN_EXTRACT
4334 						     | DF_REF_ZERO_EXTRACT)
4335 			    && get_subreg_tracking_sizes (reg, &outer_size,
4336 							  &inner_size, &start))
4337 			  {
4338 			    HOST_WIDE_INT last = start + outer_size;
4339 
4340 			    init_live_subregs
4341 			      (bitmap_bit_p (live_relevant_regs, regno),
4342 			       live_subregs, live_subregs_used, regno,
4343 			       inner_size);
4344 
4345 			    /* Ignore the paradoxical bits.  */
4346 			    if (last > SBITMAP_SIZE (live_subregs[regno]))
4347 			      last = SBITMAP_SIZE (live_subregs[regno]);
4348 
4349 			    while (start < last)
4350 			      {
4351 				bitmap_set_bit (live_subregs[regno], start);
4352 				start++;
4353 			      }
4354 			  }
4355 			else
4356 			  /* Resetting the live_subregs_used is
4357 			     effectively saying do not use the subregs
4358 			     because we are reading the whole
4359 			     pseudo.  */
4360 			  bitmap_clear_bit (live_subregs_used, regno);
4361 			bitmap_set_bit (live_relevant_regs, regno);
4362 		      }
4363 		  }
4364 	    }
4365 	}
4366 
4367       /* FIXME!! The following code is a disaster.  Reload needs to see the
4368 	 labels and jump tables that are just hanging out in between
4369 	 the basic blocks.  See pr33676.  */
4370       insn = BB_HEAD (bb);
4371 
4372       /* Skip over the barriers and cruft.  */
4373       while (insn && (BARRIER_P (insn) || NOTE_P (insn)
4374 		      || BLOCK_FOR_INSN (insn) == bb))
4375 	insn = PREV_INSN (insn);
4376 
4377       /* While we add anything except barriers and notes, the focus is
4378 	 to get the labels and jump tables into the
4379 	 reload_insn_chain.  */
4380       while (insn)
4381 	{
4382 	  if (!NOTE_P (insn) && !BARRIER_P (insn))
4383 	    {
4384 	      if (BLOCK_FOR_INSN (insn))
4385 		break;
4386 
4387 	      c = new_insn_chain ();
4388 	      c->next = next;
4389 	      next = c;
4390 	      *p = c;
4391 	      p = &c->prev;
4392 
4393 	      /* The block makes no sense here, but it is what the old
4394 		 code did.  */
4395 	      c->block = bb->index;
4396 	      c->insn = insn;
4397 	      bitmap_copy (&c->live_throughout, live_relevant_regs);
4398 	    }
4399 	  insn = PREV_INSN (insn);
4400 	}
4401     }
4402 
4403   reload_insn_chain = c;
4404   *p = NULL;
4405 
4406   for (i = 0; i < (unsigned int) max_regno; i++)
4407     if (live_subregs[i] != NULL)
4408       sbitmap_free (live_subregs[i]);
4409   free (live_subregs);
4410 
4411   if (dump_file)
4412     print_insn_chains (dump_file);
4413 }
4414 
4415 /* Examine the rtx found in *LOC, which is read or written to as determined
4416    by TYPE.  Return false if we find a reason why an insn containing this
4417    rtx should not be moved (such as accesses to non-constant memory), true
4418    otherwise.  */
4419 static bool
rtx_moveable_p(rtx * loc,enum op_type type)4420 rtx_moveable_p (rtx *loc, enum op_type type)
4421 {
4422   const char *fmt;
4423   rtx x = *loc;
4424   int i, j;
4425 
4426   enum rtx_code code = GET_CODE (x);
4427   switch (code)
4428     {
4429     case CONST:
4430     CASE_CONST_ANY:
4431     case SYMBOL_REF:
4432     case LABEL_REF:
4433       return true;
4434 
4435     case PC:
4436       return type == OP_IN;
4437 
4438     case CC0:
4439       return false;
4440 
4441     case REG:
4442       if (x == frame_pointer_rtx)
4443 	return true;
4444       if (HARD_REGISTER_P (x))
4445 	return false;
4446 
4447       return true;
4448 
4449     case MEM:
4450       if (type == OP_IN && MEM_READONLY_P (x))
4451 	return rtx_moveable_p (&XEXP (x, 0), OP_IN);
4452       return false;
4453 
4454     case SET:
4455       return (rtx_moveable_p (&SET_SRC (x), OP_IN)
4456 	      && rtx_moveable_p (&SET_DEST (x), OP_OUT));
4457 
4458     case STRICT_LOW_PART:
4459       return rtx_moveable_p (&XEXP (x, 0), OP_OUT);
4460 
4461     case ZERO_EXTRACT:
4462     case SIGN_EXTRACT:
4463       return (rtx_moveable_p (&XEXP (x, 0), type)
4464 	      && rtx_moveable_p (&XEXP (x, 1), OP_IN)
4465 	      && rtx_moveable_p (&XEXP (x, 2), OP_IN));
4466 
4467     case CLOBBER:
4468       return rtx_moveable_p (&SET_DEST (x), OP_OUT);
4469 
4470     case UNSPEC_VOLATILE:
4471       /* It is a bad idea to consider insns with such rtl
4472 	 as moveable ones.  The insn scheduler also considers them as barrier
4473 	 for a reason.  */
4474       return false;
4475 
4476     case ASM_OPERANDS:
4477       /* The same is true for volatile asm: it has unknown side effects, it
4478          cannot be moved at will.  */
4479       if (MEM_VOLATILE_P (x))
4480 	return false;
4481 
4482     default:
4483       break;
4484     }
4485 
4486   fmt = GET_RTX_FORMAT (code);
4487   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4488     {
4489       if (fmt[i] == 'e')
4490 	{
4491 	  if (!rtx_moveable_p (&XEXP (x, i), type))
4492 	    return false;
4493 	}
4494       else if (fmt[i] == 'E')
4495 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4496 	  {
4497 	    if (!rtx_moveable_p (&XVECEXP (x, i, j), type))
4498 	      return false;
4499 	  }
4500     }
4501   return true;
4502 }
4503 
4504 /* A wrapper around dominated_by_p, which uses the information in UID_LUID
4505    to give dominance relationships between two insns I1 and I2.  */
4506 static bool
insn_dominated_by_p(rtx i1,rtx i2,int * uid_luid)4507 insn_dominated_by_p (rtx i1, rtx i2, int *uid_luid)
4508 {
4509   basic_block bb1 = BLOCK_FOR_INSN (i1);
4510   basic_block bb2 = BLOCK_FOR_INSN (i2);
4511 
4512   if (bb1 == bb2)
4513     return uid_luid[INSN_UID (i2)] < uid_luid[INSN_UID (i1)];
4514   return dominated_by_p (CDI_DOMINATORS, bb1, bb2);
4515 }
4516 
4517 /* Record the range of register numbers added by find_moveable_pseudos.  */
4518 int first_moveable_pseudo, last_moveable_pseudo;
4519 
4520 /* These two vectors hold data for every register added by
4521    find_movable_pseudos, with index 0 holding data for the
4522    first_moveable_pseudo.  */
4523 /* The original home register.  */
4524 static vec<rtx> pseudo_replaced_reg;
4525 
4526 /* Look for instances where we have an instruction that is known to increase
4527    register pressure, and whose result is not used immediately.  If it is
4528    possible to move the instruction downwards to just before its first use,
4529    split its lifetime into two ranges.  We create a new pseudo to compute the
4530    value, and emit a move instruction just before the first use.  If, after
4531    register allocation, the new pseudo remains unallocated, the function
4532    move_unallocated_pseudos then deletes the move instruction and places
4533    the computation just before the first use.
4534 
4535    Such a move is safe and profitable if all the input registers remain live
4536    and unchanged between the original computation and its first use.  In such
4537    a situation, the computation is known to increase register pressure, and
4538    moving it is known to at least not worsen it.
4539 
4540    We restrict moves to only those cases where a register remains unallocated,
4541    in order to avoid interfering too much with the instruction schedule.  As
4542    an exception, we may move insns which only modify their input register
4543    (typically induction variables), as this increases the freedom for our
4544    intended transformation, and does not limit the second instruction
4545    scheduler pass.  */
4546 
4547 static void
find_moveable_pseudos(void)4548 find_moveable_pseudos (void)
4549 {
4550   unsigned i;
4551   int max_regs = max_reg_num ();
4552   int max_uid = get_max_uid ();
4553   basic_block bb;
4554   int *uid_luid = XNEWVEC (int, max_uid);
4555   rtx_insn **closest_uses = XNEWVEC (rtx_insn *, max_regs);
4556   /* A set of registers which are live but not modified throughout a block.  */
4557   bitmap_head *bb_transp_live = XNEWVEC (bitmap_head,
4558 					 last_basic_block_for_fn (cfun));
4559   /* A set of registers which only exist in a given basic block.  */
4560   bitmap_head *bb_local = XNEWVEC (bitmap_head,
4561 				   last_basic_block_for_fn (cfun));
4562   /* A set of registers which are set once, in an instruction that can be
4563      moved freely downwards, but are otherwise transparent to a block.  */
4564   bitmap_head *bb_moveable_reg_sets = XNEWVEC (bitmap_head,
4565 					       last_basic_block_for_fn (cfun));
4566   auto_bitmap live, used, set, interesting, unusable_as_input;
4567   bitmap_iterator bi;
4568 
4569   first_moveable_pseudo = max_regs;
4570   pseudo_replaced_reg.release ();
4571   pseudo_replaced_reg.safe_grow_cleared (max_regs);
4572 
4573   df_analyze ();
4574   calculate_dominance_info (CDI_DOMINATORS);
4575 
4576   i = 0;
4577   FOR_EACH_BB_FN (bb, cfun)
4578     {
4579       rtx_insn *insn;
4580       bitmap transp = bb_transp_live + bb->index;
4581       bitmap moveable = bb_moveable_reg_sets + bb->index;
4582       bitmap local = bb_local + bb->index;
4583 
4584       bitmap_initialize (local, 0);
4585       bitmap_initialize (transp, 0);
4586       bitmap_initialize (moveable, 0);
4587       bitmap_copy (live, df_get_live_out (bb));
4588       bitmap_and_into (live, df_get_live_in (bb));
4589       bitmap_copy (transp, live);
4590       bitmap_clear (moveable);
4591       bitmap_clear (live);
4592       bitmap_clear (used);
4593       bitmap_clear (set);
4594       FOR_BB_INSNS (bb, insn)
4595 	if (NONDEBUG_INSN_P (insn))
4596 	  {
4597 	    df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4598 	    df_ref def, use;
4599 
4600 	    uid_luid[INSN_UID (insn)] = i++;
4601 
4602 	    def = df_single_def (insn_info);
4603 	    use = df_single_use (insn_info);
4604 	    if (use
4605 		&& def
4606 		&& DF_REF_REGNO (use) == DF_REF_REGNO (def)
4607 		&& !bitmap_bit_p (set, DF_REF_REGNO (use))
4608 		&& rtx_moveable_p (&PATTERN (insn), OP_IN))
4609 	      {
4610 		unsigned regno = DF_REF_REGNO (use);
4611 		bitmap_set_bit (moveable, regno);
4612 		bitmap_set_bit (set, regno);
4613 		bitmap_set_bit (used, regno);
4614 		bitmap_clear_bit (transp, regno);
4615 		continue;
4616 	      }
4617 	    FOR_EACH_INSN_INFO_USE (use, insn_info)
4618 	      {
4619 		unsigned regno = DF_REF_REGNO (use);
4620 		bitmap_set_bit (used, regno);
4621 		if (bitmap_clear_bit (moveable, regno))
4622 		  bitmap_clear_bit (transp, regno);
4623 	      }
4624 
4625 	    FOR_EACH_INSN_INFO_DEF (def, insn_info)
4626 	      {
4627 		unsigned regno = DF_REF_REGNO (def);
4628 		bitmap_set_bit (set, regno);
4629 		bitmap_clear_bit (transp, regno);
4630 		bitmap_clear_bit (moveable, regno);
4631 	      }
4632 	  }
4633     }
4634 
4635   FOR_EACH_BB_FN (bb, cfun)
4636     {
4637       bitmap local = bb_local + bb->index;
4638       rtx_insn *insn;
4639 
4640       FOR_BB_INSNS (bb, insn)
4641 	if (NONDEBUG_INSN_P (insn))
4642 	  {
4643 	    df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4644 	    rtx_insn *def_insn;
4645 	    rtx closest_use, note;
4646 	    df_ref def, use;
4647 	    unsigned regno;
4648 	    bool all_dominated, all_local;
4649 	    machine_mode mode;
4650 
4651 	    def = df_single_def (insn_info);
4652 	    /* There must be exactly one def in this insn.  */
4653 	    if (!def || !single_set (insn))
4654 	      continue;
4655 	    /* This must be the only definition of the reg.  We also limit
4656 	       which modes we deal with so that we can assume we can generate
4657 	       move instructions.  */
4658 	    regno = DF_REF_REGNO (def);
4659 	    mode = GET_MODE (DF_REF_REG (def));
4660 	    if (DF_REG_DEF_COUNT (regno) != 1
4661 		|| !DF_REF_INSN_INFO (def)
4662 		|| HARD_REGISTER_NUM_P (regno)
4663 		|| DF_REG_EQ_USE_COUNT (regno) > 0
4664 		|| (!INTEGRAL_MODE_P (mode) && !FLOAT_MODE_P (mode)))
4665 	      continue;
4666 	    def_insn = DF_REF_INSN (def);
4667 
4668 	    for (note = REG_NOTES (def_insn); note; note = XEXP (note, 1))
4669 	      if (REG_NOTE_KIND (note) == REG_EQUIV && MEM_P (XEXP (note, 0)))
4670 		break;
4671 
4672 	    if (note)
4673 	      {
4674 		if (dump_file)
4675 		  fprintf (dump_file, "Ignoring reg %d, has equiv memory\n",
4676 			   regno);
4677 		bitmap_set_bit (unusable_as_input, regno);
4678 		continue;
4679 	      }
4680 
4681 	    use = DF_REG_USE_CHAIN (regno);
4682 	    all_dominated = true;
4683 	    all_local = true;
4684 	    closest_use = NULL_RTX;
4685 	    for (; use; use = DF_REF_NEXT_REG (use))
4686 	      {
4687 		rtx_insn *insn;
4688 		if (!DF_REF_INSN_INFO (use))
4689 		  {
4690 		    all_dominated = false;
4691 		    all_local = false;
4692 		    break;
4693 		  }
4694 		insn = DF_REF_INSN (use);
4695 		if (DEBUG_INSN_P (insn))
4696 		  continue;
4697 		if (BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (def_insn))
4698 		  all_local = false;
4699 		if (!insn_dominated_by_p (insn, def_insn, uid_luid))
4700 		  all_dominated = false;
4701 		if (closest_use != insn && closest_use != const0_rtx)
4702 		  {
4703 		    if (closest_use == NULL_RTX)
4704 		      closest_use = insn;
4705 		    else if (insn_dominated_by_p (closest_use, insn, uid_luid))
4706 		      closest_use = insn;
4707 		    else if (!insn_dominated_by_p (insn, closest_use, uid_luid))
4708 		      closest_use = const0_rtx;
4709 		  }
4710 	      }
4711 	    if (!all_dominated)
4712 	      {
4713 		if (dump_file)
4714 		  fprintf (dump_file, "Reg %d not all uses dominated by set\n",
4715 			   regno);
4716 		continue;
4717 	      }
4718 	    if (all_local)
4719 	      bitmap_set_bit (local, regno);
4720 	    if (closest_use == const0_rtx || closest_use == NULL
4721 		|| next_nonnote_nondebug_insn (def_insn) == closest_use)
4722 	      {
4723 		if (dump_file)
4724 		  fprintf (dump_file, "Reg %d uninteresting%s\n", regno,
4725 			   closest_use == const0_rtx || closest_use == NULL
4726 			   ? " (no unique first use)" : "");
4727 		continue;
4728 	      }
4729 	    if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (closest_use)))
4730 	      {
4731 		if (dump_file)
4732 		  fprintf (dump_file, "Reg %d: closest user uses cc0\n",
4733 			   regno);
4734 		continue;
4735 	      }
4736 
4737 	    bitmap_set_bit (interesting, regno);
4738 	    /* If we get here, we know closest_use is a non-NULL insn
4739 	       (as opposed to const_0_rtx).  */
4740 	    closest_uses[regno] = as_a <rtx_insn *> (closest_use);
4741 
4742 	    if (dump_file && (all_local || all_dominated))
4743 	      {
4744 		fprintf (dump_file, "Reg %u:", regno);
4745 		if (all_local)
4746 		  fprintf (dump_file, " local to bb %d", bb->index);
4747 		if (all_dominated)
4748 		  fprintf (dump_file, " def dominates all uses");
4749 		if (closest_use != const0_rtx)
4750 		  fprintf (dump_file, " has unique first use");
4751 		fputs ("\n", dump_file);
4752 	      }
4753 	  }
4754     }
4755 
4756   EXECUTE_IF_SET_IN_BITMAP (interesting, 0, i, bi)
4757     {
4758       df_ref def = DF_REG_DEF_CHAIN (i);
4759       rtx_insn *def_insn = DF_REF_INSN (def);
4760       basic_block def_block = BLOCK_FOR_INSN (def_insn);
4761       bitmap def_bb_local = bb_local + def_block->index;
4762       bitmap def_bb_moveable = bb_moveable_reg_sets + def_block->index;
4763       bitmap def_bb_transp = bb_transp_live + def_block->index;
4764       bool local_to_bb_p = bitmap_bit_p (def_bb_local, i);
4765       rtx_insn *use_insn = closest_uses[i];
4766       df_ref use;
4767       bool all_ok = true;
4768       bool all_transp = true;
4769 
4770       if (!REG_P (DF_REF_REG (def)))
4771 	continue;
4772 
4773       if (!local_to_bb_p)
4774 	{
4775 	  if (dump_file)
4776 	    fprintf (dump_file, "Reg %u not local to one basic block\n",
4777 		     i);
4778 	  continue;
4779 	}
4780       if (reg_equiv_init (i) != NULL_RTX)
4781 	{
4782 	  if (dump_file)
4783 	    fprintf (dump_file, "Ignoring reg %u with equiv init insn\n",
4784 		     i);
4785 	  continue;
4786 	}
4787       if (!rtx_moveable_p (&PATTERN (def_insn), OP_IN))
4788 	{
4789 	  if (dump_file)
4790 	    fprintf (dump_file, "Found def insn %d for %d to be not moveable\n",
4791 		     INSN_UID (def_insn), i);
4792 	  continue;
4793 	}
4794       if (dump_file)
4795 	fprintf (dump_file, "Examining insn %d, def for %d\n",
4796 		 INSN_UID (def_insn), i);
4797       FOR_EACH_INSN_USE (use, def_insn)
4798 	{
4799 	  unsigned regno = DF_REF_REGNO (use);
4800 	  if (bitmap_bit_p (unusable_as_input, regno))
4801 	    {
4802 	      all_ok = false;
4803 	      if (dump_file)
4804 		fprintf (dump_file, "  found unusable input reg %u.\n", regno);
4805 	      break;
4806 	    }
4807 	  if (!bitmap_bit_p (def_bb_transp, regno))
4808 	    {
4809 	      if (bitmap_bit_p (def_bb_moveable, regno)
4810 		  && !control_flow_insn_p (use_insn)
4811 		  && (!HAVE_cc0 || !sets_cc0_p (use_insn)))
4812 		{
4813 		  if (modified_between_p (DF_REF_REG (use), def_insn, use_insn))
4814 		    {
4815 		      rtx_insn *x = NEXT_INSN (def_insn);
4816 		      while (!modified_in_p (DF_REF_REG (use), x))
4817 			{
4818 			  gcc_assert (x != use_insn);
4819 			  x = NEXT_INSN (x);
4820 			}
4821 		      if (dump_file)
4822 			fprintf (dump_file, "  input reg %u modified but insn %d moveable\n",
4823 				 regno, INSN_UID (x));
4824 		      emit_insn_after (PATTERN (x), use_insn);
4825 		      set_insn_deleted (x);
4826 		    }
4827 		  else
4828 		    {
4829 		      if (dump_file)
4830 			fprintf (dump_file, "  input reg %u modified between def and use\n",
4831 				 regno);
4832 		      all_transp = false;
4833 		    }
4834 		}
4835 	      else
4836 		all_transp = false;
4837 	    }
4838 	}
4839       if (!all_ok)
4840 	continue;
4841       if (!dbg_cnt (ira_move))
4842 	break;
4843       if (dump_file)
4844 	fprintf (dump_file, "  all ok%s\n", all_transp ? " and transp" : "");
4845 
4846       if (all_transp)
4847 	{
4848 	  rtx def_reg = DF_REF_REG (def);
4849 	  rtx newreg = ira_create_new_reg (def_reg);
4850 	  if (validate_change (def_insn, DF_REF_REAL_LOC (def), newreg, 0))
4851 	    {
4852 	      unsigned nregno = REGNO (newreg);
4853 	      emit_insn_before (gen_move_insn (def_reg, newreg), use_insn);
4854 	      nregno -= max_regs;
4855 	      pseudo_replaced_reg[nregno] = def_reg;
4856 	    }
4857 	}
4858     }
4859 
4860   FOR_EACH_BB_FN (bb, cfun)
4861     {
4862       bitmap_clear (bb_local + bb->index);
4863       bitmap_clear (bb_transp_live + bb->index);
4864       bitmap_clear (bb_moveable_reg_sets + bb->index);
4865     }
4866   free (uid_luid);
4867   free (closest_uses);
4868   free (bb_local);
4869   free (bb_transp_live);
4870   free (bb_moveable_reg_sets);
4871 
4872   last_moveable_pseudo = max_reg_num ();
4873 
4874   fix_reg_equiv_init ();
4875   expand_reg_info ();
4876   regstat_free_n_sets_and_refs ();
4877   regstat_free_ri ();
4878   regstat_init_n_sets_and_refs ();
4879   regstat_compute_ri ();
4880   free_dominance_info (CDI_DOMINATORS);
4881 }
4882 
4883 /* If SET pattern SET is an assignment from a hard register to a pseudo which
4884    is live at CALL_DOM (if non-NULL, otherwise this check is omitted), return
4885    the destination.  Otherwise return NULL.  */
4886 
4887 static rtx
interesting_dest_for_shprep_1(rtx set,basic_block call_dom)4888 interesting_dest_for_shprep_1 (rtx set, basic_block call_dom)
4889 {
4890   rtx src = SET_SRC (set);
4891   rtx dest = SET_DEST (set);
4892   if (!REG_P (src) || !HARD_REGISTER_P (src)
4893       || !REG_P (dest) || HARD_REGISTER_P (dest)
4894       || (call_dom && !bitmap_bit_p (df_get_live_in (call_dom), REGNO (dest))))
4895     return NULL;
4896   return dest;
4897 }
4898 
4899 /* If insn is interesting for parameter range-splitting shrink-wrapping
4900    preparation, i.e. it is a single set from a hard register to a pseudo, which
4901    is live at CALL_DOM (if non-NULL, otherwise this check is omitted), or a
4902    parallel statement with only one such statement, return the destination.
4903    Otherwise return NULL.  */
4904 
4905 static rtx
interesting_dest_for_shprep(rtx_insn * insn,basic_block call_dom)4906 interesting_dest_for_shprep (rtx_insn *insn, basic_block call_dom)
4907 {
4908   if (!INSN_P (insn))
4909     return NULL;
4910   rtx pat = PATTERN (insn);
4911   if (GET_CODE (pat) == SET)
4912     return interesting_dest_for_shprep_1 (pat, call_dom);
4913 
4914   if (GET_CODE (pat) != PARALLEL)
4915     return NULL;
4916   rtx ret = NULL;
4917   for (int i = 0; i < XVECLEN (pat, 0); i++)
4918     {
4919       rtx sub = XVECEXP (pat, 0, i);
4920       if (GET_CODE (sub) == USE || GET_CODE (sub) == CLOBBER)
4921 	continue;
4922       if (GET_CODE (sub) != SET
4923 	  || side_effects_p (sub))
4924 	return NULL;
4925       rtx dest = interesting_dest_for_shprep_1 (sub, call_dom);
4926       if (dest && ret)
4927 	return NULL;
4928       if (dest)
4929 	ret = dest;
4930     }
4931   return ret;
4932 }
4933 
4934 /* Split live ranges of pseudos that are loaded from hard registers in the
4935    first BB in a BB that dominates all non-sibling call if such a BB can be
4936    found and is not in a loop.  Return true if the function has made any
4937    changes.  */
4938 
4939 static bool
split_live_ranges_for_shrink_wrap(void)4940 split_live_ranges_for_shrink_wrap (void)
4941 {
4942   basic_block bb, call_dom = NULL;
4943   basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
4944   rtx_insn *insn, *last_interesting_insn = NULL;
4945   auto_bitmap need_new, reachable;
4946   vec<basic_block> queue;
4947 
4948   if (!SHRINK_WRAPPING_ENABLED)
4949     return false;
4950 
4951   queue.create (n_basic_blocks_for_fn (cfun));
4952 
4953   FOR_EACH_BB_FN (bb, cfun)
4954     FOR_BB_INSNS (bb, insn)
4955       if (CALL_P (insn) && !SIBLING_CALL_P (insn))
4956 	{
4957 	  if (bb == first)
4958 	    {
4959 	      queue.release ();
4960 	      return false;
4961 	    }
4962 
4963 	  bitmap_set_bit (need_new, bb->index);
4964 	  bitmap_set_bit (reachable, bb->index);
4965 	  queue.quick_push (bb);
4966 	  break;
4967 	}
4968 
4969   if (queue.is_empty ())
4970     {
4971       queue.release ();
4972       return false;
4973     }
4974 
4975   while (!queue.is_empty ())
4976     {
4977       edge e;
4978       edge_iterator ei;
4979 
4980       bb = queue.pop ();
4981       FOR_EACH_EDGE (e, ei, bb->succs)
4982 	if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
4983 	    && bitmap_set_bit (reachable, e->dest->index))
4984 	  queue.quick_push (e->dest);
4985     }
4986   queue.release ();
4987 
4988   FOR_BB_INSNS (first, insn)
4989     {
4990       rtx dest = interesting_dest_for_shprep (insn, NULL);
4991       if (!dest)
4992 	continue;
4993 
4994       if (DF_REG_DEF_COUNT (REGNO (dest)) > 1)
4995 	return false;
4996 
4997       for (df_ref use = DF_REG_USE_CHAIN (REGNO(dest));
4998 	   use;
4999 	   use = DF_REF_NEXT_REG (use))
5000 	{
5001 	  int ubbi = DF_REF_BB (use)->index;
5002 	  if (bitmap_bit_p (reachable, ubbi))
5003 	    bitmap_set_bit (need_new, ubbi);
5004 	}
5005       last_interesting_insn = insn;
5006     }
5007 
5008   if (!last_interesting_insn)
5009     return false;
5010 
5011   call_dom = nearest_common_dominator_for_set (CDI_DOMINATORS, need_new);
5012   if (call_dom == first)
5013     return false;
5014 
5015   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5016   while (bb_loop_depth (call_dom) > 0)
5017     call_dom = get_immediate_dominator (CDI_DOMINATORS, call_dom);
5018   loop_optimizer_finalize ();
5019 
5020   if (call_dom == first)
5021     return false;
5022 
5023   calculate_dominance_info (CDI_POST_DOMINATORS);
5024   if (dominated_by_p (CDI_POST_DOMINATORS, first, call_dom))
5025     {
5026       free_dominance_info (CDI_POST_DOMINATORS);
5027       return false;
5028     }
5029   free_dominance_info (CDI_POST_DOMINATORS);
5030 
5031   if (dump_file)
5032     fprintf (dump_file, "Will split live ranges of parameters at BB %i\n",
5033 	     call_dom->index);
5034 
5035   bool ret = false;
5036   FOR_BB_INSNS (first, insn)
5037     {
5038       rtx dest = interesting_dest_for_shprep (insn, call_dom);
5039       if (!dest || dest == pic_offset_table_rtx)
5040 	continue;
5041 
5042       bool need_newreg = false;
5043       df_ref use, next;
5044       for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next)
5045 	{
5046 	  rtx_insn *uin = DF_REF_INSN (use);
5047 	  next = DF_REF_NEXT_REG (use);
5048 
5049 	  if (DEBUG_INSN_P (uin))
5050 	    continue;
5051 
5052 	  basic_block ubb = BLOCK_FOR_INSN (uin);
5053 	  if (ubb == call_dom
5054 	      || dominated_by_p (CDI_DOMINATORS, ubb, call_dom))
5055 	    {
5056 	      need_newreg = true;
5057 	      break;
5058 	    }
5059 	}
5060 
5061       if (need_newreg)
5062 	{
5063 	  rtx newreg = ira_create_new_reg (dest);
5064 
5065 	  for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next)
5066 	    {
5067 	      rtx_insn *uin = DF_REF_INSN (use);
5068 	      next = DF_REF_NEXT_REG (use);
5069 
5070 	      basic_block ubb = BLOCK_FOR_INSN (uin);
5071 	      if (ubb == call_dom
5072 		  || dominated_by_p (CDI_DOMINATORS, ubb, call_dom))
5073 		validate_change (uin, DF_REF_REAL_LOC (use), newreg, true);
5074 	    }
5075 
5076 	  rtx_insn *new_move = gen_move_insn (newreg, dest);
5077 	  emit_insn_after (new_move, bb_note (call_dom));
5078 	  if (dump_file)
5079 	    {
5080 	      fprintf (dump_file, "Split live-range of register ");
5081 	      print_rtl_single (dump_file, dest);
5082 	    }
5083 	  ret = true;
5084 	}
5085 
5086       if (insn == last_interesting_insn)
5087 	break;
5088     }
5089   apply_change_group ();
5090   return ret;
5091 }
5092 
5093 /* Perform the second half of the transformation started in
5094    find_moveable_pseudos.  We look for instances where the newly introduced
5095    pseudo remains unallocated, and remove it by moving the definition to
5096    just before its use, replacing the move instruction generated by
5097    find_moveable_pseudos.  */
5098 static void
move_unallocated_pseudos(void)5099 move_unallocated_pseudos (void)
5100 {
5101   int i;
5102   for (i = first_moveable_pseudo; i < last_moveable_pseudo; i++)
5103     if (reg_renumber[i] < 0)
5104       {
5105 	int idx = i - first_moveable_pseudo;
5106 	rtx other_reg = pseudo_replaced_reg[idx];
5107 	rtx_insn *def_insn = DF_REF_INSN (DF_REG_DEF_CHAIN (i));
5108 	/* The use must follow all definitions of OTHER_REG, so we can
5109 	   insert the new definition immediately after any of them.  */
5110 	df_ref other_def = DF_REG_DEF_CHAIN (REGNO (other_reg));
5111 	rtx_insn *move_insn = DF_REF_INSN (other_def);
5112 	rtx_insn *newinsn = emit_insn_after (PATTERN (def_insn), move_insn);
5113 	rtx set;
5114 	int success;
5115 
5116 	if (dump_file)
5117 	  fprintf (dump_file, "moving def of %d (insn %d now) ",
5118 		   REGNO (other_reg), INSN_UID (def_insn));
5119 
5120 	delete_insn (move_insn);
5121 	while ((other_def = DF_REG_DEF_CHAIN (REGNO (other_reg))))
5122 	  delete_insn (DF_REF_INSN (other_def));
5123 	delete_insn (def_insn);
5124 
5125 	set = single_set (newinsn);
5126 	success = validate_change (newinsn, &SET_DEST (set), other_reg, 0);
5127 	gcc_assert (success);
5128 	if (dump_file)
5129 	  fprintf (dump_file, " %d) rather than keep unallocated replacement %d\n",
5130 		   INSN_UID (newinsn), i);
5131 	SET_REG_N_REFS (i, 0);
5132       }
5133 }
5134 
5135 /* If the backend knows where to allocate pseudos for hard
5136    register initial values, register these allocations now.  */
5137 static void
allocate_initial_values(void)5138 allocate_initial_values (void)
5139 {
5140   if (targetm.allocate_initial_value)
5141     {
5142       rtx hreg, preg, x;
5143       int i, regno;
5144 
5145       for (i = 0; HARD_REGISTER_NUM_P (i); i++)
5146 	{
5147 	  if (! initial_value_entry (i, &hreg, &preg))
5148 	    break;
5149 
5150 	  x = targetm.allocate_initial_value (hreg);
5151 	  regno = REGNO (preg);
5152 	  if (x && REG_N_SETS (regno) <= 1)
5153 	    {
5154 	      if (MEM_P (x))
5155 		reg_equiv_memory_loc (regno) = x;
5156 	      else
5157 		{
5158 		  basic_block bb;
5159 		  int new_regno;
5160 
5161 		  gcc_assert (REG_P (x));
5162 		  new_regno = REGNO (x);
5163 		  reg_renumber[regno] = new_regno;
5164 		  /* Poke the regno right into regno_reg_rtx so that even
5165 		     fixed regs are accepted.  */
5166 		  SET_REGNO (preg, new_regno);
5167 		  /* Update global register liveness information.  */
5168 		  FOR_EACH_BB_FN (bb, cfun)
5169 		    {
5170 		      if (REGNO_REG_SET_P (df_get_live_in (bb), regno))
5171 			SET_REGNO_REG_SET (df_get_live_in (bb), new_regno);
5172 		      if (REGNO_REG_SET_P (df_get_live_out (bb), regno))
5173 			SET_REGNO_REG_SET (df_get_live_out (bb), new_regno);
5174 		    }
5175 		}
5176 	    }
5177 	}
5178 
5179       gcc_checking_assert (! initial_value_entry (FIRST_PSEUDO_REGISTER,
5180 						  &hreg, &preg));
5181     }
5182 }
5183 
5184 
5185 /* True when we use LRA instead of reload pass for the current
5186    function.  */
5187 bool ira_use_lra_p;
5188 
5189 /* True if we have allocno conflicts.  It is false for non-optimized
5190    mode or when the conflict table is too big.  */
5191 bool ira_conflicts_p;
5192 
5193 /* Saved between IRA and reload.  */
5194 static int saved_flag_ira_share_spill_slots;
5195 
5196 /* This is the main entry of IRA.  */
5197 static void
ira(FILE * f)5198 ira (FILE *f)
5199 {
5200   bool loops_p;
5201   int ira_max_point_before_emit;
5202   bool saved_flag_caller_saves = flag_caller_saves;
5203   enum ira_region saved_flag_ira_region = flag_ira_region;
5204 
5205   clear_bb_flags ();
5206 
5207   /* Determine if the current function is a leaf before running IRA
5208      since this can impact optimizations done by the prologue and
5209      epilogue thus changing register elimination offsets.
5210      Other target callbacks may use crtl->is_leaf too, including
5211      SHRINK_WRAPPING_ENABLED, so initialize as early as possible.  */
5212   crtl->is_leaf = leaf_function_p ();
5213 
5214   /* Perform target specific PIC register initialization.  */
5215   targetm.init_pic_reg ();
5216 
5217   ira_conflicts_p = optimize > 0;
5218 
5219   /* Determine the number of pseudos actually requiring coloring.  */
5220   unsigned int num_used_regs = 0;
5221   for (unsigned int i = FIRST_PSEUDO_REGISTER; i < DF_REG_SIZE (df); i++)
5222     if (DF_REG_DEF_COUNT (i) || DF_REG_USE_COUNT (i))
5223       num_used_regs++;
5224 
5225   /* If there are too many pseudos and/or basic blocks (e.g. 10K
5226      pseudos and 10K blocks or 100K pseudos and 1K blocks), we will
5227      use simplified and faster algorithms in LRA.  */
5228   lra_simple_p
5229     = ira_use_lra_p
5230       && num_used_regs >= (1U << 26) / last_basic_block_for_fn (cfun);
5231 
5232   if (lra_simple_p)
5233     {
5234       /* It permits to skip live range splitting in LRA.  */
5235       flag_caller_saves = false;
5236       /* There is no sense to do regional allocation when we use
5237 	simplified LRA.  */
5238       flag_ira_region = IRA_REGION_ONE;
5239       ira_conflicts_p = false;
5240     }
5241 
5242 #ifndef IRA_NO_OBSTACK
5243   gcc_obstack_init (&ira_obstack);
5244 #endif
5245   bitmap_obstack_initialize (&ira_bitmap_obstack);
5246 
5247   /* LRA uses its own infrastructure to handle caller save registers.  */
5248   if (flag_caller_saves && !ira_use_lra_p)
5249     init_caller_save ();
5250 
5251   if (flag_ira_verbose < 10)
5252     {
5253       internal_flag_ira_verbose = flag_ira_verbose;
5254       ira_dump_file = f;
5255     }
5256   else
5257     {
5258       internal_flag_ira_verbose = flag_ira_verbose - 10;
5259       ira_dump_file = stderr;
5260     }
5261 
5262   setup_prohibited_mode_move_regs ();
5263   decrease_live_ranges_number ();
5264   df_note_add_problem ();
5265 
5266   /* DF_LIVE can't be used in the register allocator, too many other
5267      parts of the compiler depend on using the "classic" liveness
5268      interpretation of the DF_LR problem.  See PR38711.
5269      Remove the problem, so that we don't spend time updating it in
5270      any of the df_analyze() calls during IRA/LRA.  */
5271   if (optimize > 1)
5272     df_remove_problem (df_live);
5273   gcc_checking_assert (df_live == NULL);
5274 
5275   if (flag_checking)
5276     df->changeable_flags |= DF_VERIFY_SCHEDULED;
5277 
5278   df_analyze ();
5279 
5280   init_reg_equiv ();
5281   if (ira_conflicts_p)
5282     {
5283       calculate_dominance_info (CDI_DOMINATORS);
5284 
5285       if (split_live_ranges_for_shrink_wrap ())
5286 	df_analyze ();
5287 
5288       free_dominance_info (CDI_DOMINATORS);
5289     }
5290 
5291   df_clear_flags (DF_NO_INSN_RESCAN);
5292 
5293   indirect_jump_optimize ();
5294   if (delete_trivially_dead_insns (get_insns (), max_reg_num ()))
5295     df_analyze ();
5296 
5297   regstat_init_n_sets_and_refs ();
5298   regstat_compute_ri ();
5299 
5300   /* If we are not optimizing, then this is the only place before
5301      register allocation where dataflow is done.  And that is needed
5302      to generate these warnings.  */
5303   if (warn_clobbered)
5304     generate_setjmp_warnings ();
5305 
5306   if (resize_reg_info () && flag_ira_loop_pressure)
5307     ira_set_pseudo_classes (true, ira_dump_file);
5308 
5309   init_alias_analysis ();
5310   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5311   reg_equiv = XCNEWVEC (struct equivalence, max_reg_num ());
5312   update_equiv_regs_prescan ();
5313   update_equiv_regs ();
5314 
5315   /* Don't move insns if live range shrinkage or register
5316      pressure-sensitive scheduling were done because it will not
5317      improve allocation but likely worsen insn scheduling.  */
5318   if (optimize
5319       && !flag_live_range_shrinkage
5320       && !(flag_sched_pressure && flag_schedule_insns))
5321     combine_and_move_insns ();
5322 
5323   /* Gather additional equivalences with memory.  */
5324   if (optimize)
5325     add_store_equivs ();
5326 
5327   loop_optimizer_finalize ();
5328   free_dominance_info (CDI_DOMINATORS);
5329   end_alias_analysis ();
5330   free (reg_equiv);
5331 
5332   setup_reg_equiv ();
5333   grow_reg_equivs ();
5334   setup_reg_equiv_init ();
5335 
5336   allocated_reg_info_size = max_reg_num ();
5337 
5338   /* It is not worth to do such improvement when we use a simple
5339      allocation because of -O0 usage or because the function is too
5340      big.  */
5341   if (ira_conflicts_p)
5342     find_moveable_pseudos ();
5343 
5344   max_regno_before_ira = max_reg_num ();
5345   ira_setup_eliminable_regset ();
5346 
5347   ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
5348   ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
5349   ira_move_loops_num = ira_additional_jumps_num = 0;
5350 
5351   ira_assert (current_loops == NULL);
5352   if (flag_ira_region == IRA_REGION_ALL || flag_ira_region == IRA_REGION_MIXED)
5353     loop_optimizer_init (AVOID_CFG_MODIFICATIONS | LOOPS_HAVE_RECORDED_EXITS);
5354 
5355   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
5356     fprintf (ira_dump_file, "Building IRA IR\n");
5357   loops_p = ira_build ();
5358 
5359   ira_assert (ira_conflicts_p || !loops_p);
5360 
5361   saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
5362   if (too_high_register_pressure_p () || cfun->calls_setjmp)
5363     /* It is just wasting compiler's time to pack spilled pseudos into
5364        stack slots in this case -- prohibit it.  We also do this if
5365        there is setjmp call because a variable not modified between
5366        setjmp and longjmp the compiler is required to preserve its
5367        value and sharing slots does not guarantee it.  */
5368     flag_ira_share_spill_slots = FALSE;
5369 
5370   ira_color ();
5371 
5372   ira_max_point_before_emit = ira_max_point;
5373 
5374   ira_initiate_emit_data ();
5375 
5376   ira_emit (loops_p);
5377 
5378   max_regno = max_reg_num ();
5379   if (ira_conflicts_p)
5380     {
5381       if (! loops_p)
5382 	{
5383 	  if (! ira_use_lra_p)
5384 	    ira_initiate_assign ();
5385 	}
5386       else
5387 	{
5388 	  expand_reg_info ();
5389 
5390 	  if (ira_use_lra_p)
5391 	    {
5392 	      ira_allocno_t a;
5393 	      ira_allocno_iterator ai;
5394 
5395 	      FOR_EACH_ALLOCNO (a, ai)
5396                 {
5397                   int old_regno = ALLOCNO_REGNO (a);
5398                   int new_regno = REGNO (ALLOCNO_EMIT_DATA (a)->reg);
5399 
5400                   ALLOCNO_REGNO (a) = new_regno;
5401 
5402                   if (old_regno != new_regno)
5403                     setup_reg_classes (new_regno, reg_preferred_class (old_regno),
5404                                        reg_alternate_class (old_regno),
5405                                        reg_allocno_class (old_regno));
5406                 }
5407 	    }
5408 	  else
5409 	    {
5410 	      if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
5411 		fprintf (ira_dump_file, "Flattening IR\n");
5412 	      ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
5413 	    }
5414 	  /* New insns were generated: add notes and recalculate live
5415 	     info.  */
5416 	  df_analyze ();
5417 
5418 	  /* ??? Rebuild the loop tree, but why?  Does the loop tree
5419 	     change if new insns were generated?  Can that be handled
5420 	     by updating the loop tree incrementally?  */
5421 	  loop_optimizer_finalize ();
5422 	  free_dominance_info (CDI_DOMINATORS);
5423 	  loop_optimizer_init (AVOID_CFG_MODIFICATIONS
5424 			       | LOOPS_HAVE_RECORDED_EXITS);
5425 
5426 	  if (! ira_use_lra_p)
5427 	    {
5428 	      setup_allocno_assignment_flags ();
5429 	      ira_initiate_assign ();
5430 	      ira_reassign_conflict_allocnos (max_regno);
5431 	    }
5432 	}
5433     }
5434 
5435   ira_finish_emit_data ();
5436 
5437   setup_reg_renumber ();
5438 
5439   calculate_allocation_cost ();
5440 
5441 #ifdef ENABLE_IRA_CHECKING
5442   if (ira_conflicts_p && ! ira_use_lra_p)
5443     /* Opposite to reload pass, LRA does not use any conflict info
5444        from IRA.  We don't rebuild conflict info for LRA (through
5445        ira_flattening call) and cannot use the check here.  We could
5446        rebuild this info for LRA in the check mode but there is a risk
5447        that code generated with the check and without it will be a bit
5448        different.  Calling ira_flattening in any mode would be a
5449        wasting CPU time.  So do not check the allocation for LRA.  */
5450     check_allocation ();
5451 #endif
5452 
5453   if (max_regno != max_regno_before_ira)
5454     {
5455       regstat_free_n_sets_and_refs ();
5456       regstat_free_ri ();
5457       regstat_init_n_sets_and_refs ();
5458       regstat_compute_ri ();
5459     }
5460 
5461   overall_cost_before = ira_overall_cost;
5462   if (! ira_conflicts_p)
5463     grow_reg_equivs ();
5464   else
5465     {
5466       fix_reg_equiv_init ();
5467 
5468 #ifdef ENABLE_IRA_CHECKING
5469       print_redundant_copies ();
5470 #endif
5471       if (! ira_use_lra_p)
5472 	{
5473 	  ira_spilled_reg_stack_slots_num = 0;
5474 	  ira_spilled_reg_stack_slots
5475 	    = ((class ira_spilled_reg_stack_slot *)
5476 	       ira_allocate (max_regno
5477 			     * sizeof (class ira_spilled_reg_stack_slot)));
5478 	  memset ((void *)ira_spilled_reg_stack_slots, 0,
5479 		  max_regno * sizeof (class ira_spilled_reg_stack_slot));
5480 	}
5481     }
5482   allocate_initial_values ();
5483 
5484   /* See comment for find_moveable_pseudos call.  */
5485   if (ira_conflicts_p)
5486     move_unallocated_pseudos ();
5487 
5488   /* Restore original values.  */
5489   if (lra_simple_p)
5490     {
5491       flag_caller_saves = saved_flag_caller_saves;
5492       flag_ira_region = saved_flag_ira_region;
5493     }
5494 }
5495 
5496 static void
do_reload(void)5497 do_reload (void)
5498 {
5499   basic_block bb;
5500   bool need_dce;
5501   unsigned pic_offset_table_regno = INVALID_REGNUM;
5502 
5503   if (flag_ira_verbose < 10)
5504     ira_dump_file = dump_file;
5505 
5506   /* If pic_offset_table_rtx is a pseudo register, then keep it so
5507      after reload to avoid possible wrong usages of hard reg assigned
5508      to it.  */
5509   if (pic_offset_table_rtx
5510       && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
5511     pic_offset_table_regno = REGNO (pic_offset_table_rtx);
5512 
5513   timevar_push (TV_RELOAD);
5514   if (ira_use_lra_p)
5515     {
5516       if (current_loops != NULL)
5517 	{
5518 	  loop_optimizer_finalize ();
5519 	  free_dominance_info (CDI_DOMINATORS);
5520 	}
5521       FOR_ALL_BB_FN (bb, cfun)
5522 	bb->loop_father = NULL;
5523       current_loops = NULL;
5524 
5525       ira_destroy ();
5526 
5527       lra (ira_dump_file);
5528       /* ???!!! Move it before lra () when we use ira_reg_equiv in
5529 	 LRA.  */
5530       vec_free (reg_equivs);
5531       reg_equivs = NULL;
5532       need_dce = false;
5533     }
5534   else
5535     {
5536       df_set_flags (DF_NO_INSN_RESCAN);
5537       build_insn_chain ();
5538 
5539       need_dce = reload (get_insns (), ira_conflicts_p);
5540     }
5541 
5542   timevar_pop (TV_RELOAD);
5543 
5544   timevar_push (TV_IRA);
5545 
5546   if (ira_conflicts_p && ! ira_use_lra_p)
5547     {
5548       ira_free (ira_spilled_reg_stack_slots);
5549       ira_finish_assign ();
5550     }
5551 
5552   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
5553       && overall_cost_before != ira_overall_cost)
5554     fprintf (ira_dump_file, "+++Overall after reload %" PRId64 "\n",
5555 	     ira_overall_cost);
5556 
5557   flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
5558 
5559   if (! ira_use_lra_p)
5560     {
5561       ira_destroy ();
5562       if (current_loops != NULL)
5563 	{
5564 	  loop_optimizer_finalize ();
5565 	  free_dominance_info (CDI_DOMINATORS);
5566 	}
5567       FOR_ALL_BB_FN (bb, cfun)
5568 	bb->loop_father = NULL;
5569       current_loops = NULL;
5570 
5571       regstat_free_ri ();
5572       regstat_free_n_sets_and_refs ();
5573     }
5574 
5575   if (optimize)
5576     cleanup_cfg (CLEANUP_EXPENSIVE);
5577 
5578   finish_reg_equiv ();
5579 
5580   bitmap_obstack_release (&ira_bitmap_obstack);
5581 #ifndef IRA_NO_OBSTACK
5582   obstack_free (&ira_obstack, NULL);
5583 #endif
5584 
5585   /* The code after the reload has changed so much that at this point
5586      we might as well just rescan everything.  Note that
5587      df_rescan_all_insns is not going to help here because it does not
5588      touch the artificial uses and defs.  */
5589   df_finish_pass (true);
5590   df_scan_alloc (NULL);
5591   df_scan_blocks ();
5592 
5593   if (optimize > 1)
5594     {
5595       df_live_add_problem ();
5596       df_live_set_all_dirty ();
5597     }
5598 
5599   if (optimize)
5600     df_analyze ();
5601 
5602   if (need_dce && optimize)
5603     run_fast_dce ();
5604 
5605   /* Diagnose uses of the hard frame pointer when it is used as a global
5606      register.  Often we can get away with letting the user appropriate
5607      the frame pointer, but we should let them know when code generation
5608      makes that impossible.  */
5609   if (global_regs[HARD_FRAME_POINTER_REGNUM] && frame_pointer_needed)
5610     {
5611       tree decl = global_regs_decl[HARD_FRAME_POINTER_REGNUM];
5612       error_at (DECL_SOURCE_LOCATION (current_function_decl),
5613                 "frame pointer required, but reserved");
5614       inform (DECL_SOURCE_LOCATION (decl), "for %qD", decl);
5615     }
5616 
5617   /* If we are doing generic stack checking, give a warning if this
5618      function's frame size is larger than we expect.  */
5619   if (flag_stack_check == GENERIC_STACK_CHECK)
5620     {
5621       poly_int64 size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
5622 
5623       for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5624 	if (df_regs_ever_live_p (i)
5625 	    && !fixed_regs[i]
5626 	    && !crtl->abi->clobbers_full_reg_p (i))
5627 	  size += UNITS_PER_WORD;
5628 
5629       if (constant_lower_bound (size) > STACK_CHECK_MAX_FRAME_SIZE)
5630 	warning (0, "frame size too large for reliable stack checking");
5631     }
5632 
5633   if (pic_offset_table_regno != INVALID_REGNUM)
5634     pic_offset_table_rtx = gen_rtx_REG (Pmode, pic_offset_table_regno);
5635 
5636   timevar_pop (TV_IRA);
5637 }
5638 
5639 /* Run the integrated register allocator.  */
5640 
5641 namespace {
5642 
5643 const pass_data pass_data_ira =
5644 {
5645   RTL_PASS, /* type */
5646   "ira", /* name */
5647   OPTGROUP_NONE, /* optinfo_flags */
5648   TV_IRA, /* tv_id */
5649   0, /* properties_required */
5650   0, /* properties_provided */
5651   0, /* properties_destroyed */
5652   0, /* todo_flags_start */
5653   TODO_do_not_ggc_collect, /* todo_flags_finish */
5654 };
5655 
5656 class pass_ira : public rtl_opt_pass
5657 {
5658 public:
pass_ira(gcc::context * ctxt)5659   pass_ira (gcc::context *ctxt)
5660     : rtl_opt_pass (pass_data_ira, ctxt)
5661   {}
5662 
5663   /* opt_pass methods: */
gate(function *)5664   virtual bool gate (function *)
5665     {
5666       return !targetm.no_register_allocation;
5667     }
execute(function *)5668   virtual unsigned int execute (function *)
5669     {
5670       ira (dump_file);
5671       return 0;
5672     }
5673 
5674 }; // class pass_ira
5675 
5676 } // anon namespace
5677 
5678 rtl_opt_pass *
make_pass_ira(gcc::context * ctxt)5679 make_pass_ira (gcc::context *ctxt)
5680 {
5681   return new pass_ira (ctxt);
5682 }
5683 
5684 namespace {
5685 
5686 const pass_data pass_data_reload =
5687 {
5688   RTL_PASS, /* type */
5689   "reload", /* name */
5690   OPTGROUP_NONE, /* optinfo_flags */
5691   TV_RELOAD, /* tv_id */
5692   0, /* properties_required */
5693   0, /* properties_provided */
5694   0, /* properties_destroyed */
5695   0, /* todo_flags_start */
5696   0, /* todo_flags_finish */
5697 };
5698 
5699 class pass_reload : public rtl_opt_pass
5700 {
5701 public:
pass_reload(gcc::context * ctxt)5702   pass_reload (gcc::context *ctxt)
5703     : rtl_opt_pass (pass_data_reload, ctxt)
5704   {}
5705 
5706   /* opt_pass methods: */
gate(function *)5707   virtual bool gate (function *)
5708     {
5709       return !targetm.no_register_allocation;
5710     }
execute(function *)5711   virtual unsigned int execute (function *)
5712     {
5713       do_reload ();
5714       return 0;
5715     }
5716 
5717 }; // class pass_reload
5718 
5719 } // anon namespace
5720 
5721 rtl_opt_pass *
make_pass_reload(gcc::context * ctxt)5722 make_pass_reload (gcc::context *ctxt)
5723 {
5724   return new pass_reload (ctxt);
5725 }
5726