1 /* Integrated Register Allocator (IRA) entry point.
2    Copyright (C) 2006-2021 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 /* The integrated register allocator (IRA) is a
22    regional register allocator performing graph coloring on a top-down
23    traversal of nested regions.  Graph coloring in a region is based
24    on Chaitin-Briggs algorithm.  It is called integrated because
25    register coalescing, register live range splitting, and choosing a
26    better hard register are done on-the-fly during coloring.  Register
27    coalescing and choosing a cheaper hard register is done by hard
28    register preferencing during hard register assigning.  The live
29    range splitting is a byproduct of the regional register allocation.
30 
31    Major IRA notions are:
32 
33      o *Region* is a part of CFG where graph coloring based on
34        Chaitin-Briggs algorithm is done.  IRA can work on any set of
35        nested CFG regions forming a tree.  Currently the regions are
36        the entire function for the root region and natural loops for
37        the other regions.  Therefore data structure representing a
38        region is called loop_tree_node.
39 
40      o *Allocno class* is a register class used for allocation of
41        given allocno.  It means that only hard register of given
42        register class can be assigned to given allocno.  In reality,
43        even smaller subset of (*profitable*) hard registers can be
44        assigned.  In rare cases, the subset can be even smaller
45        because our modification of Chaitin-Briggs algorithm requires
46        that sets of hard registers can be assigned to allocnos forms a
47        forest, i.e. the sets can be ordered in a way where any
48        previous set is not intersected with given set or is a superset
49        of given set.
50 
51      o *Pressure class* is a register class belonging to a set of
52        register classes containing all of the hard-registers available
53        for register allocation.  The set of all pressure classes for a
54        target is defined in the corresponding machine-description file
55        according some criteria.  Register pressure is calculated only
56        for pressure classes and it affects some IRA decisions as
57        forming allocation regions.
58 
59      o *Allocno* represents the live range of a pseudo-register in a
60        region.  Besides the obvious attributes like the corresponding
61        pseudo-register number, allocno class, conflicting allocnos and
62        conflicting hard-registers, there are a few allocno attributes
63        which are important for understanding the allocation algorithm:
64 
65        - *Live ranges*.  This is a list of ranges of *program points*
66          where the allocno lives.  Program points represent places
67          where a pseudo can be born or become dead (there are
68          approximately two times more program points than the insns)
69          and they are represented by integers starting with 0.  The
70          live ranges are used to find conflicts between allocnos.
71          They also play very important role for the transformation of
72          the IRA internal representation of several regions into a one
73          region representation.  The later is used during the reload
74          pass work because each allocno represents all of the
75          corresponding pseudo-registers.
76 
77        - *Hard-register costs*.  This is a vector of size equal to the
78          number of available hard-registers of the allocno class.  The
79          cost of a callee-clobbered hard-register for an allocno is
80          increased by the cost of save/restore code around the calls
81          through the given allocno's life.  If the allocno is a move
82          instruction operand and another operand is a hard-register of
83          the allocno class, the cost of the hard-register is decreased
84          by the move cost.
85 
86          When an allocno is assigned, the hard-register with minimal
87          full cost is used.  Initially, a hard-register's full cost is
88          the corresponding value from the hard-register's cost vector.
89          If the allocno is connected by a *copy* (see below) to
90          another allocno which has just received a hard-register, the
91          cost of the hard-register is decreased.  Before choosing a
92          hard-register for an allocno, the allocno's current costs of
93          the hard-registers are modified by the conflict hard-register
94          costs of all of the conflicting allocnos which are not
95          assigned yet.
96 
97        - *Conflict hard-register costs*.  This is a vector of the same
98          size as the hard-register costs vector.  To permit an
99          unassigned allocno to get a better hard-register, IRA uses
100          this vector to calculate the final full cost of the
101          available hard-registers.  Conflict hard-register costs of an
102          unassigned allocno are also changed with a change of the
103          hard-register cost of the allocno when a copy involving the
104          allocno is processed as described above.  This is done to
105          show other unassigned allocnos that a given allocno prefers
106          some hard-registers in order to remove the move instruction
107          corresponding to the copy.
108 
109      o *Cap*.  If a pseudo-register does not live in a region but
110        lives in a nested region, IRA creates a special allocno called
111        a cap in the outer region.  A region cap is also created for a
112        subregion cap.
113 
114      o *Copy*.  Allocnos can be connected by copies.  Copies are used
115        to modify hard-register costs for allocnos during coloring.
116        Such modifications reflects a preference to use the same
117        hard-register for the allocnos connected by copies.  Usually
118        copies are created for move insns (in this case it results in
119        register coalescing).  But IRA also creates copies for operands
120        of an insn which should be assigned to the same hard-register
121        due to constraints in the machine description (it usually
122        results in removing a move generated in reload to satisfy
123        the constraints) and copies referring to the allocno which is
124        the output operand of an instruction and the allocno which is
125        an input operand dying in the instruction (creation of such
126        copies results in less register shuffling).  IRA *does not*
127        create copies between the same register allocnos from different
128        regions because we use another technique for propagating
129        hard-register preference on the borders of regions.
130 
131    Allocnos (including caps) for the upper region in the region tree
132    *accumulate* information important for coloring from allocnos with
133    the same pseudo-register from nested regions.  This includes
134    hard-register and memory costs, conflicts with hard-registers,
135    allocno conflicts, allocno copies and more.  *Thus, attributes for
136    allocnos in a region have the same values as if the region had no
137    subregions*.  It means that attributes for allocnos in the
138    outermost region corresponding to the function have the same values
139    as though the allocation used only one region which is the entire
140    function.  It also means that we can look at IRA work as if the
141    first IRA did allocation for all function then it improved the
142    allocation for loops then their subloops and so on.
143 
144    IRA major passes are:
145 
146      o Building IRA internal representation which consists of the
147        following subpasses:
148 
149        * First, IRA builds regions and creates allocnos (file
150          ira-build.c) and initializes most of their attributes.
151 
152        * Then IRA finds an allocno class for each allocno and
153          calculates its initial (non-accumulated) cost of memory and
154          each hard-register of its allocno class (file ira-cost.c).
155 
156        * IRA creates live ranges of each allocno, calculates register
157          pressure for each pressure class in each region, sets up
158          conflict hard registers for each allocno and info about calls
159          the allocno lives through (file ira-lives.c).
160 
161        * IRA removes low register pressure loops from the regions
162          mostly to speed IRA up (file ira-build.c).
163 
164        * IRA propagates accumulated allocno info from lower region
165          allocnos to corresponding upper region allocnos (file
166          ira-build.c).
167 
168        * IRA creates all caps (file ira-build.c).
169 
170        * Having live-ranges of allocnos and their classes, IRA creates
171          conflicting allocnos for each allocno.  Conflicting allocnos
172          are stored as a bit vector or array of pointers to the
173          conflicting allocnos whatever is more profitable (file
174          ira-conflicts.c).  At this point IRA creates allocno copies.
175 
176      o Coloring.  Now IRA has all necessary info to start graph coloring
177        process.  It is done in each region on top-down traverse of the
178        region tree (file ira-color.c).  There are following subpasses:
179 
180        * Finding profitable hard registers of corresponding allocno
181          class for each allocno.  For example, only callee-saved hard
182          registers are frequently profitable for allocnos living
183          through colors.  If the profitable hard register set of
184          allocno does not form a tree based on subset relation, we use
185          some approximation to form the tree.  This approximation is
186          used to figure out trivial colorability of allocnos.  The
187          approximation is a pretty rare case.
188 
189        * Putting allocnos onto the coloring stack.  IRA uses Briggs
190          optimistic coloring which is a major improvement over
191          Chaitin's coloring.  Therefore IRA does not spill allocnos at
192          this point.  There is some freedom in the order of putting
193          allocnos on the stack which can affect the final result of
194          the allocation.  IRA uses some heuristics to improve the
195          order.  The major one is to form *threads* from colorable
196          allocnos and push them on the stack by threads.  Thread is a
197          set of non-conflicting colorable allocnos connected by
198          copies.  The thread contains allocnos from the colorable
199          bucket or colorable allocnos already pushed onto the coloring
200          stack.  Pushing thread allocnos one after another onto the
201          stack increases chances of removing copies when the allocnos
202          get the same hard reg.
203 
204 	 We also use a modification of Chaitin-Briggs algorithm which
205          works for intersected register classes of allocnos.  To
206          figure out trivial colorability of allocnos, the mentioned
207          above tree of hard register sets is used.  To get an idea how
208          the algorithm works in i386 example, let us consider an
209          allocno to which any general hard register can be assigned.
210          If the allocno conflicts with eight allocnos to which only
211          EAX register can be assigned, given allocno is still
212          trivially colorable because all conflicting allocnos might be
213          assigned only to EAX and all other general hard registers are
214          still free.
215 
216 	 To get an idea of the used trivial colorability criterion, it
217 	 is also useful to read article "Graph-Coloring Register
218 	 Allocation for Irregular Architectures" by Michael D. Smith
219 	 and Glen Holloway.  Major difference between the article
220 	 approach and approach used in IRA is that Smith's approach
221 	 takes register classes only from machine description and IRA
222 	 calculate register classes from intermediate code too
223 	 (e.g. an explicit usage of hard registers in RTL code for
224 	 parameter passing can result in creation of additional
225 	 register classes which contain or exclude the hard
226 	 registers).  That makes IRA approach useful for improving
227 	 coloring even for architectures with regular register files
228 	 and in fact some benchmarking shows the improvement for
229 	 regular class architectures is even bigger than for irregular
230 	 ones.  Another difference is that Smith's approach chooses
231 	 intersection of classes of all insn operands in which a given
232 	 pseudo occurs.  IRA can use bigger classes if it is still
233 	 more profitable than memory usage.
234 
235        * Popping the allocnos from the stack and assigning them hard
236          registers.  If IRA cannot assign a hard register to an
237          allocno and the allocno is coalesced, IRA undoes the
238          coalescing and puts the uncoalesced allocnos onto the stack in
239          the hope that some such allocnos will get a hard register
240          separately.  If IRA fails to assign hard register or memory
241          is more profitable for it, IRA spills the allocno.  IRA
242          assigns the allocno the hard-register with minimal full
243          allocation cost which reflects the cost of usage of the
244          hard-register for the allocno and cost of usage of the
245          hard-register for allocnos conflicting with given allocno.
246 
247        * Chaitin-Briggs coloring assigns as many pseudos as possible
248          to hard registers.  After coloring we try to improve
249          allocation with cost point of view.  We improve the
250          allocation by spilling some allocnos and assigning the freed
251          hard registers to other allocnos if it decreases the overall
252          allocation cost.
253 
254        * After allocno assigning in the region, IRA modifies the hard
255          register and memory costs for the corresponding allocnos in
256          the subregions to reflect the cost of possible loads, stores,
257          or moves on the border of the region and its subregions.
258          When default regional allocation algorithm is used
259          (-fira-algorithm=mixed), IRA just propagates the assignment
260          for allocnos if the register pressure in the region for the
261          corresponding pressure class is less than number of available
262          hard registers for given pressure class.
263 
264      o Spill/restore code moving.  When IRA performs an allocation
265        by traversing regions in top-down order, it does not know what
266        happens below in the region tree.  Therefore, sometimes IRA
267        misses opportunities to perform a better allocation.  A simple
268        optimization tries to improve allocation in a region having
269        subregions and containing in another region.  If the
270        corresponding allocnos in the subregion are spilled, it spills
271        the region allocno if it is profitable.  The optimization
272        implements a simple iterative algorithm performing profitable
273        transformations while they are still possible.  It is fast in
274        practice, so there is no real need for a better time complexity
275        algorithm.
276 
277      o Code change.  After coloring, two allocnos representing the
278        same pseudo-register outside and inside a region respectively
279        may be assigned to different locations (hard-registers or
280        memory).  In this case IRA creates and uses a new
281        pseudo-register inside the region and adds code to move allocno
282        values on the region's borders.  This is done during top-down
283        traversal of the regions (file ira-emit.c).  In some
284        complicated cases IRA can create a new allocno to move allocno
285        values (e.g. when a swap of values stored in two hard-registers
286        is needed).  At this stage, the new allocno is marked as
287        spilled.  IRA still creates the pseudo-register and the moves
288        on the region borders even when both allocnos were assigned to
289        the same hard-register.  If the reload pass spills a
290        pseudo-register for some reason, the effect will be smaller
291        because another allocno will still be in the hard-register.  In
292        most cases, this is better then spilling both allocnos.  If
293        reload does not change the allocation for the two
294        pseudo-registers, the trivial move will be removed by
295        post-reload optimizations.  IRA does not generate moves for
296        allocnos assigned to the same hard register when the default
297        regional allocation algorithm is used and the register pressure
298        in the region for the corresponding pressure class is less than
299        number of available hard registers for given pressure class.
300        IRA also does some optimizations to remove redundant stores and
301        to reduce code duplication on the region borders.
302 
303      o Flattening internal representation.  After changing code, IRA
304        transforms its internal representation for several regions into
305        one region representation (file ira-build.c).  This process is
306        called IR flattening.  Such process is more complicated than IR
307        rebuilding would be, but is much faster.
308 
309      o After IR flattening, IRA tries to assign hard registers to all
310        spilled allocnos.  This is implemented by a simple and fast
311        priority coloring algorithm (see function
312        ira_reassign_conflict_allocnos::ira-color.c).  Here new allocnos
313        created during the code change pass can be assigned to hard
314        registers.
315 
316      o At the end IRA calls the reload pass.  The reload pass
317        communicates with IRA through several functions in file
318        ira-color.c to improve its decisions in
319 
320        * sharing stack slots for the spilled pseudos based on IRA info
321          about pseudo-register conflicts.
322 
323        * reassigning hard-registers to all spilled pseudos at the end
324          of each reload iteration.
325 
326        * choosing a better hard-register to spill based on IRA info
327          about pseudo-register live ranges and the register pressure
328          in places where the pseudo-register lives.
329 
330    IRA uses a lot of data representing the target processors.  These
331    data are initialized in file ira.c.
332 
333    If function has no loops (or the loops are ignored when
334    -fira-algorithm=CB is used), we have classic Chaitin-Briggs
335    coloring (only instead of separate pass of coalescing, we use hard
336    register preferencing).  In such case, IRA works much faster
337    because many things are not made (like IR flattening, the
338    spill/restore optimization, and the code change).
339 
340    Literature is worth to read for better understanding the code:
341 
342    o Preston Briggs, Keith D. Cooper, Linda Torczon.  Improvements to
343      Graph Coloring Register Allocation.
344 
345    o David Callahan, Brian Koblenz.  Register allocation via
346      hierarchical graph coloring.
347 
348    o Keith Cooper, Anshuman Dasgupta, Jason Eckhardt. Revisiting Graph
349      Coloring Register Allocation: A Study of the Chaitin-Briggs and
350      Callahan-Koblenz Algorithms.
351 
352    o Guei-Yuan Lueh, Thomas Gross, and Ali-Reza Adl-Tabatabai. Global
353      Register Allocation Based on Graph Fusion.
354 
355    o Michael D. Smith and Glenn Holloway.  Graph-Coloring Register
356      Allocation for Irregular Architectures
357 
358    o Vladimir Makarov. The Integrated Register Allocator for GCC.
359 
360    o Vladimir Makarov.  The top-down register allocator for irregular
361      register file architectures.
362 
363 */
364 
365 
366 #include "config.h"
367 #include "system.h"
368 #include "coretypes.h"
369 #include "backend.h"
370 #include "target.h"
371 #include "rtl.h"
372 #include "tree.h"
373 #include "df.h"
374 #include "memmodel.h"
375 #include "tm_p.h"
376 #include "insn-config.h"
377 #include "regs.h"
378 #include "ira.h"
379 #include "ira-int.h"
380 #include "diagnostic-core.h"
381 #include "cfgrtl.h"
382 #include "cfgbuild.h"
383 #include "cfgcleanup.h"
384 #include "expr.h"
385 #include "tree-pass.h"
386 #include "output.h"
387 #include "reload.h"
388 #include "cfgloop.h"
389 #include "lra.h"
390 #include "dce.h"
391 #include "dbgcnt.h"
392 #include "rtl-iter.h"
393 #include "shrink-wrap.h"
394 #include "print-rtl.h"
395 
396 struct target_ira default_target_ira;
397 class target_ira_int default_target_ira_int;
398 #if SWITCHABLE_TARGET
399 struct target_ira *this_target_ira = &default_target_ira;
400 class target_ira_int *this_target_ira_int = &default_target_ira_int;
401 #endif
402 
403 /* A modified value of flag `-fira-verbose' used internally.  */
404 int internal_flag_ira_verbose;
405 
406 /* Dump file of the allocator if it is not NULL.  */
407 FILE *ira_dump_file;
408 
409 /* The number of elements in the following array.  */
410 int ira_spilled_reg_stack_slots_num;
411 
412 /* The following array contains info about spilled pseudo-registers
413    stack slots used in current function so far.  */
414 class ira_spilled_reg_stack_slot *ira_spilled_reg_stack_slots;
415 
416 /* Correspondingly overall cost of the allocation, overall cost before
417    reload, cost of the allocnos assigned to hard-registers, cost of
418    the allocnos assigned to memory, cost of loads, stores and register
419    move insns generated for pseudo-register live range splitting (see
420    ira-emit.c).  */
421 int64_t ira_overall_cost, overall_cost_before;
422 int64_t ira_reg_cost, ira_mem_cost;
423 int64_t ira_load_cost, ira_store_cost, ira_shuffle_cost;
424 int ira_move_loops_num, ira_additional_jumps_num;
425 
426 /* All registers that can be eliminated.  */
427 
428 HARD_REG_SET eliminable_regset;
429 
430 /* Value of max_reg_num () before IRA work start.  This value helps
431    us to recognize a situation when new pseudos were created during
432    IRA work.  */
433 static int max_regno_before_ira;
434 
435 /* Temporary hard reg set used for a different calculation.  */
436 static HARD_REG_SET temp_hard_regset;
437 
438 #define last_mode_for_init_move_cost \
439   (this_target_ira_int->x_last_mode_for_init_move_cost)
440 
441 
442 /* The function sets up the map IRA_REG_MODE_HARD_REGSET.  */
443 static void
setup_reg_mode_hard_regset(void)444 setup_reg_mode_hard_regset (void)
445 {
446   int i, m, hard_regno;
447 
448   for (m = 0; m < NUM_MACHINE_MODES; m++)
449     for (hard_regno = 0; hard_regno < FIRST_PSEUDO_REGISTER; hard_regno++)
450       {
451 	CLEAR_HARD_REG_SET (ira_reg_mode_hard_regset[hard_regno][m]);
452 	for (i = hard_regno_nregs (hard_regno, (machine_mode) m) - 1;
453 	     i >= 0; i--)
454 	  if (hard_regno + i < FIRST_PSEUDO_REGISTER)
455 	    SET_HARD_REG_BIT (ira_reg_mode_hard_regset[hard_regno][m],
456 			      hard_regno + i);
457       }
458 }
459 
460 
461 #define no_unit_alloc_regs \
462   (this_target_ira_int->x_no_unit_alloc_regs)
463 
464 /* The function sets up the three arrays declared above.  */
465 static void
setup_class_hard_regs(void)466 setup_class_hard_regs (void)
467 {
468   int cl, i, hard_regno, n;
469   HARD_REG_SET processed_hard_reg_set;
470 
471   ira_assert (SHRT_MAX >= FIRST_PSEUDO_REGISTER);
472   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
473     {
474       temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
475       CLEAR_HARD_REG_SET (processed_hard_reg_set);
476       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
477 	{
478 	  ira_non_ordered_class_hard_regs[cl][i] = -1;
479 	  ira_class_hard_reg_index[cl][i] = -1;
480 	}
481       for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
482 	{
483 #ifdef REG_ALLOC_ORDER
484 	  hard_regno = reg_alloc_order[i];
485 #else
486 	  hard_regno = i;
487 #endif
488 	  if (TEST_HARD_REG_BIT (processed_hard_reg_set, hard_regno))
489 	    continue;
490 	  SET_HARD_REG_BIT (processed_hard_reg_set, hard_regno);
491       	  if (! TEST_HARD_REG_BIT (temp_hard_regset, hard_regno))
492 	    ira_class_hard_reg_index[cl][hard_regno] = -1;
493 	  else
494 	    {
495 	      ira_class_hard_reg_index[cl][hard_regno] = n;
496 	      ira_class_hard_regs[cl][n++] = hard_regno;
497 	    }
498 	}
499       ira_class_hard_regs_num[cl] = n;
500       for (n = 0, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
501 	if (TEST_HARD_REG_BIT (temp_hard_regset, i))
502 	  ira_non_ordered_class_hard_regs[cl][n++] = i;
503       ira_assert (ira_class_hard_regs_num[cl] == n);
504     }
505 }
506 
507 /* Set up global variables defining info about hard registers for the
508    allocation.  These depend on USE_HARD_FRAME_P whose TRUE value means
509    that we can use the hard frame pointer for the allocation.  */
510 static void
setup_alloc_regs(bool use_hard_frame_p)511 setup_alloc_regs (bool use_hard_frame_p)
512 {
513 #ifdef ADJUST_REG_ALLOC_ORDER
514   ADJUST_REG_ALLOC_ORDER;
515 #endif
516   no_unit_alloc_regs = fixed_nonglobal_reg_set;
517   if (! use_hard_frame_p)
518     add_to_hard_reg_set (&no_unit_alloc_regs, Pmode,
519 			 HARD_FRAME_POINTER_REGNUM);
520   setup_class_hard_regs ();
521 }
522 
523 
524 
525 #define alloc_reg_class_subclasses \
526   (this_target_ira_int->x_alloc_reg_class_subclasses)
527 
528 /* Initialize the table of subclasses of each reg class.  */
529 static void
setup_reg_subclasses(void)530 setup_reg_subclasses (void)
531 {
532   int i, j;
533   HARD_REG_SET temp_hard_regset2;
534 
535   for (i = 0; i < N_REG_CLASSES; i++)
536     for (j = 0; j < N_REG_CLASSES; j++)
537       alloc_reg_class_subclasses[i][j] = LIM_REG_CLASSES;
538 
539   for (i = 0; i < N_REG_CLASSES; i++)
540     {
541       if (i == (int) NO_REGS)
542 	continue;
543 
544       temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs;
545       if (hard_reg_set_empty_p (temp_hard_regset))
546 	continue;
547       for (j = 0; j < N_REG_CLASSES; j++)
548 	if (i != j)
549 	  {
550 	    enum reg_class *p;
551 
552 	    temp_hard_regset2 = reg_class_contents[j] & ~no_unit_alloc_regs;
553 	    if (! hard_reg_set_subset_p (temp_hard_regset,
554 					 temp_hard_regset2))
555 	      continue;
556 	    p = &alloc_reg_class_subclasses[j][0];
557 	    while (*p != LIM_REG_CLASSES) p++;
558 	    *p = (enum reg_class) i;
559 	  }
560     }
561 }
562 
563 
564 
565 /* Set up IRA_MEMORY_MOVE_COST and IRA_MAX_MEMORY_MOVE_COST.  */
566 static void
setup_class_subset_and_memory_move_costs(void)567 setup_class_subset_and_memory_move_costs (void)
568 {
569   int cl, cl2, mode, cost;
570   HARD_REG_SET temp_hard_regset2;
571 
572   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
573     ira_memory_move_cost[mode][NO_REGS][0]
574       = ira_memory_move_cost[mode][NO_REGS][1] = SHRT_MAX;
575   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
576     {
577       if (cl != (int) NO_REGS)
578 	for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
579 	  {
580 	    ira_max_memory_move_cost[mode][cl][0]
581 	      = ira_memory_move_cost[mode][cl][0]
582 	      = memory_move_cost ((machine_mode) mode,
583 				  (reg_class_t) cl, false);
584 	    ira_max_memory_move_cost[mode][cl][1]
585 	      = ira_memory_move_cost[mode][cl][1]
586 	      = memory_move_cost ((machine_mode) mode,
587 				  (reg_class_t) cl, true);
588 	    /* Costs for NO_REGS are used in cost calculation on the
589 	       1st pass when the preferred register classes are not
590 	       known yet.  In this case we take the best scenario.  */
591 	    if (ira_memory_move_cost[mode][NO_REGS][0]
592 		> ira_memory_move_cost[mode][cl][0])
593 	      ira_max_memory_move_cost[mode][NO_REGS][0]
594 		= ira_memory_move_cost[mode][NO_REGS][0]
595 		= ira_memory_move_cost[mode][cl][0];
596 	    if (ira_memory_move_cost[mode][NO_REGS][1]
597 		> ira_memory_move_cost[mode][cl][1])
598 	      ira_max_memory_move_cost[mode][NO_REGS][1]
599 		= ira_memory_move_cost[mode][NO_REGS][1]
600 		= ira_memory_move_cost[mode][cl][1];
601 	  }
602     }
603   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
604     for (cl2 = (int) N_REG_CLASSES - 1; cl2 >= 0; cl2--)
605       {
606 	temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
607 	temp_hard_regset2 = reg_class_contents[cl2] & ~no_unit_alloc_regs;
608 	ira_class_subset_p[cl][cl2]
609 	  = hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2);
610 	if (! hard_reg_set_empty_p (temp_hard_regset2)
611 	    && hard_reg_set_subset_p (reg_class_contents[cl2],
612 				      reg_class_contents[cl]))
613 	  for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
614 	    {
615 	      cost = ira_memory_move_cost[mode][cl2][0];
616 	      if (cost > ira_max_memory_move_cost[mode][cl][0])
617 		ira_max_memory_move_cost[mode][cl][0] = cost;
618 	      cost = ira_memory_move_cost[mode][cl2][1];
619 	      if (cost > ira_max_memory_move_cost[mode][cl][1])
620 		ira_max_memory_move_cost[mode][cl][1] = cost;
621 	    }
622       }
623   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
624     for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
625       {
626 	ira_memory_move_cost[mode][cl][0]
627 	  = ira_max_memory_move_cost[mode][cl][0];
628 	ira_memory_move_cost[mode][cl][1]
629 	  = ira_max_memory_move_cost[mode][cl][1];
630       }
631   setup_reg_subclasses ();
632 }
633 
634 
635 
636 /* Define the following macro if allocation through malloc if
637    preferable.  */
638 #define IRA_NO_OBSTACK
639 
640 #ifndef IRA_NO_OBSTACK
641 /* Obstack used for storing all dynamic data (except bitmaps) of the
642    IRA.  */
643 static struct obstack ira_obstack;
644 #endif
645 
646 /* Obstack used for storing all bitmaps of the IRA.  */
647 static struct bitmap_obstack ira_bitmap_obstack;
648 
649 /* Allocate memory of size LEN for IRA data.  */
650 void *
ira_allocate(size_t len)651 ira_allocate (size_t len)
652 {
653   void *res;
654 
655 #ifndef IRA_NO_OBSTACK
656   res = obstack_alloc (&ira_obstack, len);
657 #else
658   res = xmalloc (len);
659 #endif
660   return res;
661 }
662 
663 /* Free memory ADDR allocated for IRA data.  */
664 void
ira_free(void * addr ATTRIBUTE_UNUSED)665 ira_free (void *addr ATTRIBUTE_UNUSED)
666 {
667 #ifndef IRA_NO_OBSTACK
668   /* do nothing */
669 #else
670   free (addr);
671 #endif
672 }
673 
674 
675 /* Allocate and returns bitmap for IRA.  */
676 bitmap
ira_allocate_bitmap(void)677 ira_allocate_bitmap (void)
678 {
679   return BITMAP_ALLOC (&ira_bitmap_obstack);
680 }
681 
682 /* Free bitmap B allocated for IRA.  */
683 void
ira_free_bitmap(bitmap b ATTRIBUTE_UNUSED)684 ira_free_bitmap (bitmap b ATTRIBUTE_UNUSED)
685 {
686   /* do nothing */
687 }
688 
689 
690 
691 /* Output information about allocation of all allocnos (except for
692    caps) into file F.  */
693 void
ira_print_disposition(FILE * f)694 ira_print_disposition (FILE *f)
695 {
696   int i, n, max_regno;
697   ira_allocno_t a;
698   basic_block bb;
699 
700   fprintf (f, "Disposition:");
701   max_regno = max_reg_num ();
702   for (n = 0, i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
703     for (a = ira_regno_allocno_map[i];
704 	 a != NULL;
705 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
706       {
707 	if (n % 4 == 0)
708 	  fprintf (f, "\n");
709 	n++;
710 	fprintf (f, " %4d:r%-4d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
711 	if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
712 	  fprintf (f, "b%-3d", bb->index);
713 	else
714 	  fprintf (f, "l%-3d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
715 	if (ALLOCNO_HARD_REGNO (a) >= 0)
716 	  fprintf (f, " %3d", ALLOCNO_HARD_REGNO (a));
717 	else
718 	  fprintf (f, " mem");
719       }
720   fprintf (f, "\n");
721 }
722 
723 /* Outputs information about allocation of all allocnos into
724    stderr.  */
725 void
ira_debug_disposition(void)726 ira_debug_disposition (void)
727 {
728   ira_print_disposition (stderr);
729 }
730 
731 
732 
733 /* Set up ira_stack_reg_pressure_class which is the biggest pressure
734    register class containing stack registers or NO_REGS if there are
735    no stack registers.  To find this class, we iterate through all
736    register pressure classes and choose the first register pressure
737    class containing all the stack registers and having the biggest
738    size.  */
739 static void
setup_stack_reg_pressure_class(void)740 setup_stack_reg_pressure_class (void)
741 {
742   ira_stack_reg_pressure_class = NO_REGS;
743 #ifdef STACK_REGS
744   {
745     int i, best, size;
746     enum reg_class cl;
747     HARD_REG_SET temp_hard_regset2;
748 
749     CLEAR_HARD_REG_SET (temp_hard_regset);
750     for (i = FIRST_STACK_REG; i <= LAST_STACK_REG; i++)
751       SET_HARD_REG_BIT (temp_hard_regset, i);
752     best = 0;
753     for (i = 0; i < ira_pressure_classes_num; i++)
754       {
755 	cl = ira_pressure_classes[i];
756 	temp_hard_regset2 = temp_hard_regset & reg_class_contents[cl];
757 	size = hard_reg_set_size (temp_hard_regset2);
758 	if (best < size)
759 	  {
760 	    best = size;
761 	    ira_stack_reg_pressure_class = cl;
762 	  }
763       }
764   }
765 #endif
766 }
767 
768 /* Find pressure classes which are register classes for which we
769    calculate register pressure in IRA, register pressure sensitive
770    insn scheduling, and register pressure sensitive loop invariant
771    motion.
772 
773    To make register pressure calculation easy, we always use
774    non-intersected register pressure classes.  A move of hard
775    registers from one register pressure class is not more expensive
776    than load and store of the hard registers.  Most likely an allocno
777    class will be a subset of a register pressure class and in many
778    cases a register pressure class.  That makes usage of register
779    pressure classes a good approximation to find a high register
780    pressure.  */
781 static void
setup_pressure_classes(void)782 setup_pressure_classes (void)
783 {
784   int cost, i, n, curr;
785   int cl, cl2;
786   enum reg_class pressure_classes[N_REG_CLASSES];
787   int m;
788   HARD_REG_SET temp_hard_regset2;
789   bool insert_p;
790 
791   if (targetm.compute_pressure_classes)
792     n = targetm.compute_pressure_classes (pressure_classes);
793   else
794     {
795       n = 0;
796       for (cl = 0; cl < N_REG_CLASSES; cl++)
797 	{
798 	  if (ira_class_hard_regs_num[cl] == 0)
799 	    continue;
800 	  if (ira_class_hard_regs_num[cl] != 1
801 	      /* A register class without subclasses may contain a few
802 		 hard registers and movement between them is costly
803 		 (e.g. SPARC FPCC registers).  We still should consider it
804 		 as a candidate for a pressure class.  */
805 	      && alloc_reg_class_subclasses[cl][0] < cl)
806 	    {
807 	      /* Check that the moves between any hard registers of the
808 		 current class are not more expensive for a legal mode
809 		 than load/store of the hard registers of the current
810 		 class.  Such class is a potential candidate to be a
811 		 register pressure class.  */
812 	      for (m = 0; m < NUM_MACHINE_MODES; m++)
813 		{
814 		  temp_hard_regset
815 		    = (reg_class_contents[cl]
816 		       & ~(no_unit_alloc_regs
817 			   | ira_prohibited_class_mode_regs[cl][m]));
818 		  if (hard_reg_set_empty_p (temp_hard_regset))
819 		    continue;
820 		  ira_init_register_move_cost_if_necessary ((machine_mode) m);
821 		  cost = ira_register_move_cost[m][cl][cl];
822 		  if (cost <= ira_max_memory_move_cost[m][cl][1]
823 		      || cost <= ira_max_memory_move_cost[m][cl][0])
824 		    break;
825 		}
826 	      if (m >= NUM_MACHINE_MODES)
827 		continue;
828 	    }
829 	  curr = 0;
830 	  insert_p = true;
831 	  temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
832 	  /* Remove so far added pressure classes which are subset of the
833 	     current candidate class.  Prefer GENERAL_REGS as a pressure
834 	     register class to another class containing the same
835 	     allocatable hard registers.  We do this because machine
836 	     dependent cost hooks might give wrong costs for the latter
837 	     class but always give the right cost for the former class
838 	     (GENERAL_REGS).  */
839 	  for (i = 0; i < n; i++)
840 	    {
841 	      cl2 = pressure_classes[i];
842 	      temp_hard_regset2 = (reg_class_contents[cl2]
843 				   & ~no_unit_alloc_regs);
844 	      if (hard_reg_set_subset_p (temp_hard_regset, temp_hard_regset2)
845 		  && (temp_hard_regset != temp_hard_regset2
846 		      || cl2 == (int) GENERAL_REGS))
847 		{
848 		  pressure_classes[curr++] = (enum reg_class) cl2;
849 		  insert_p = false;
850 		  continue;
851 		}
852 	      if (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset)
853 		  && (temp_hard_regset2 != temp_hard_regset
854 		      || cl == (int) GENERAL_REGS))
855 		continue;
856 	      if (temp_hard_regset2 == temp_hard_regset)
857 		insert_p = false;
858 	      pressure_classes[curr++] = (enum reg_class) cl2;
859 	    }
860 	  /* If the current candidate is a subset of a so far added
861 	     pressure class, don't add it to the list of the pressure
862 	     classes.  */
863 	  if (insert_p)
864 	    pressure_classes[curr++] = (enum reg_class) cl;
865 	  n = curr;
866 	}
867     }
868 #ifdef ENABLE_IRA_CHECKING
869   {
870     HARD_REG_SET ignore_hard_regs;
871 
872     /* Check pressure classes correctness: here we check that hard
873        registers from all register pressure classes contains all hard
874        registers available for the allocation.  */
875     CLEAR_HARD_REG_SET (temp_hard_regset);
876     CLEAR_HARD_REG_SET (temp_hard_regset2);
877     ignore_hard_regs = no_unit_alloc_regs;
878     for (cl = 0; cl < LIM_REG_CLASSES; cl++)
879       {
880 	/* For some targets (like MIPS with MD_REGS), there are some
881 	   classes with hard registers available for allocation but
882 	   not able to hold value of any mode.  */
883 	for (m = 0; m < NUM_MACHINE_MODES; m++)
884 	  if (contains_reg_of_mode[cl][m])
885 	    break;
886 	if (m >= NUM_MACHINE_MODES)
887 	  {
888 	    ignore_hard_regs |= reg_class_contents[cl];
889 	    continue;
890 	  }
891 	for (i = 0; i < n; i++)
892 	  if ((int) pressure_classes[i] == cl)
893 	    break;
894 	temp_hard_regset2 |= reg_class_contents[cl];
895 	if (i < n)
896 	  temp_hard_regset |= reg_class_contents[cl];
897       }
898     for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
899       /* Some targets (like SPARC with ICC reg) have allocatable regs
900 	 for which no reg class is defined.  */
901       if (REGNO_REG_CLASS (i) == NO_REGS)
902 	SET_HARD_REG_BIT (ignore_hard_regs, i);
903     temp_hard_regset &= ~ignore_hard_regs;
904     temp_hard_regset2 &= ~ignore_hard_regs;
905     ira_assert (hard_reg_set_subset_p (temp_hard_regset2, temp_hard_regset));
906   }
907 #endif
908   ira_pressure_classes_num = 0;
909   for (i = 0; i < n; i++)
910     {
911       cl = (int) pressure_classes[i];
912       ira_reg_pressure_class_p[cl] = true;
913       ira_pressure_classes[ira_pressure_classes_num++] = (enum reg_class) cl;
914     }
915   setup_stack_reg_pressure_class ();
916 }
917 
918 /* Set up IRA_UNIFORM_CLASS_P.  Uniform class is a register class
919    whose register move cost between any registers of the class is the
920    same as for all its subclasses.  We use the data to speed up the
921    2nd pass of calculations of allocno costs.  */
922 static void
setup_uniform_class_p(void)923 setup_uniform_class_p (void)
924 {
925   int i, cl, cl2, m;
926 
927   for (cl = 0; cl < N_REG_CLASSES; cl++)
928     {
929       ira_uniform_class_p[cl] = false;
930       if (ira_class_hard_regs_num[cl] == 0)
931 	continue;
932       /* We cannot use alloc_reg_class_subclasses here because move
933 	 cost hooks does not take into account that some registers are
934 	 unavailable for the subtarget.  E.g. for i686, INT_SSE_REGS
935 	 is element of alloc_reg_class_subclasses for GENERAL_REGS
936 	 because SSE regs are unavailable.  */
937       for (i = 0; (cl2 = reg_class_subclasses[cl][i]) != LIM_REG_CLASSES; i++)
938 	{
939 	  if (ira_class_hard_regs_num[cl2] == 0)
940 	    continue;
941       	  for (m = 0; m < NUM_MACHINE_MODES; m++)
942 	    if (contains_reg_of_mode[cl][m] && contains_reg_of_mode[cl2][m])
943 	      {
944 		ira_init_register_move_cost_if_necessary ((machine_mode) m);
945 		if (ira_register_move_cost[m][cl][cl]
946 		    != ira_register_move_cost[m][cl2][cl2])
947 		  break;
948 	      }
949 	  if (m < NUM_MACHINE_MODES)
950 	    break;
951 	}
952       if (cl2 == LIM_REG_CLASSES)
953 	ira_uniform_class_p[cl] = true;
954     }
955 }
956 
957 /* Set up IRA_ALLOCNO_CLASSES, IRA_ALLOCNO_CLASSES_NUM,
958    IRA_IMPORTANT_CLASSES, and IRA_IMPORTANT_CLASSES_NUM.
959 
960    Target may have many subtargets and not all target hard registers can
961    be used for allocation, e.g. x86 port in 32-bit mode cannot use
962    hard registers introduced in x86-64 like r8-r15).  Some classes
963    might have the same allocatable hard registers, e.g.  INDEX_REGS
964    and GENERAL_REGS in x86 port in 32-bit mode.  To decrease different
965    calculations efforts we introduce allocno classes which contain
966    unique non-empty sets of allocatable hard-registers.
967 
968    Pseudo class cost calculation in ira-costs.c is very expensive.
969    Therefore we are trying to decrease number of classes involved in
970    such calculation.  Register classes used in the cost calculation
971    are called important classes.  They are allocno classes and other
972    non-empty classes whose allocatable hard register sets are inside
973    of an allocno class hard register set.  From the first sight, it
974    looks like that they are just allocno classes.  It is not true.  In
975    example of x86-port in 32-bit mode, allocno classes will contain
976    GENERAL_REGS but not LEGACY_REGS (because allocatable hard
977    registers are the same for the both classes).  The important
978    classes will contain GENERAL_REGS and LEGACY_REGS.  It is done
979    because a machine description insn constraint may refers for
980    LEGACY_REGS and code in ira-costs.c is mostly base on investigation
981    of the insn constraints.  */
982 static void
setup_allocno_and_important_classes(void)983 setup_allocno_and_important_classes (void)
984 {
985   int i, j, n, cl;
986   bool set_p;
987   HARD_REG_SET temp_hard_regset2;
988   static enum reg_class classes[LIM_REG_CLASSES + 1];
989 
990   n = 0;
991   /* Collect classes which contain unique sets of allocatable hard
992      registers.  Prefer GENERAL_REGS to other classes containing the
993      same set of hard registers.  */
994   for (i = 0; i < LIM_REG_CLASSES; i++)
995     {
996       temp_hard_regset = reg_class_contents[i] & ~no_unit_alloc_regs;
997       for (j = 0; j < n; j++)
998 	{
999 	  cl = classes[j];
1000 	  temp_hard_regset2 = reg_class_contents[cl] & ~no_unit_alloc_regs;
1001 	  if (temp_hard_regset == temp_hard_regset2)
1002 	    break;
1003 	}
1004       if (j >= n || targetm.additional_allocno_class_p (i))
1005 	classes[n++] = (enum reg_class) i;
1006       else if (i == GENERAL_REGS)
1007 	/* Prefer general regs.  For i386 example, it means that
1008 	   we prefer GENERAL_REGS over INDEX_REGS or LEGACY_REGS
1009 	   (all of them consists of the same available hard
1010 	   registers).  */
1011 	classes[j] = (enum reg_class) i;
1012     }
1013   classes[n] = LIM_REG_CLASSES;
1014 
1015   /* Set up classes which can be used for allocnos as classes
1016      containing non-empty unique sets of allocatable hard
1017      registers.  */
1018   ira_allocno_classes_num = 0;
1019   for (i = 0; (cl = classes[i]) != LIM_REG_CLASSES; i++)
1020     if (ira_class_hard_regs_num[cl] > 0)
1021       ira_allocno_classes[ira_allocno_classes_num++] = (enum reg_class) cl;
1022   ira_important_classes_num = 0;
1023   /* Add non-allocno classes containing to non-empty set of
1024      allocatable hard regs.  */
1025   for (cl = 0; cl < N_REG_CLASSES; cl++)
1026     if (ira_class_hard_regs_num[cl] > 0)
1027       {
1028 	temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
1029 	set_p = false;
1030 	for (j = 0; j < ira_allocno_classes_num; j++)
1031 	  {
1032 	    temp_hard_regset2 = (reg_class_contents[ira_allocno_classes[j]]
1033 				 & ~no_unit_alloc_regs);
1034 	    if ((enum reg_class) cl == ira_allocno_classes[j])
1035 	      break;
1036 	    else if (hard_reg_set_subset_p (temp_hard_regset,
1037 					    temp_hard_regset2))
1038 	      set_p = true;
1039 	  }
1040 	if (set_p && j >= ira_allocno_classes_num)
1041 	  ira_important_classes[ira_important_classes_num++]
1042 	    = (enum reg_class) cl;
1043       }
1044   /* Now add allocno classes to the important classes.  */
1045   for (j = 0; j < ira_allocno_classes_num; j++)
1046     ira_important_classes[ira_important_classes_num++]
1047       = ira_allocno_classes[j];
1048   for (cl = 0; cl < N_REG_CLASSES; cl++)
1049     {
1050       ira_reg_allocno_class_p[cl] = false;
1051       ira_reg_pressure_class_p[cl] = false;
1052     }
1053   for (j = 0; j < ira_allocno_classes_num; j++)
1054     ira_reg_allocno_class_p[ira_allocno_classes[j]] = true;
1055   setup_pressure_classes ();
1056   setup_uniform_class_p ();
1057 }
1058 
1059 /* Setup translation in CLASS_TRANSLATE of all classes into a class
1060    given by array CLASSES of length CLASSES_NUM.  The function is used
1061    make translation any reg class to an allocno class or to an
1062    pressure class.  This translation is necessary for some
1063    calculations when we can use only allocno or pressure classes and
1064    such translation represents an approximate representation of all
1065    classes.
1066 
1067    The translation in case when allocatable hard register set of a
1068    given class is subset of allocatable hard register set of a class
1069    in CLASSES is pretty simple.  We use smallest classes from CLASSES
1070    containing a given class.  If allocatable hard register set of a
1071    given class is not a subset of any corresponding set of a class
1072    from CLASSES, we use the cheapest (with load/store point of view)
1073    class from CLASSES whose set intersects with given class set.  */
1074 static void
setup_class_translate_array(enum reg_class * class_translate,int classes_num,enum reg_class * classes)1075 setup_class_translate_array (enum reg_class *class_translate,
1076 			     int classes_num, enum reg_class *classes)
1077 {
1078   int cl, mode;
1079   enum reg_class aclass, best_class, *cl_ptr;
1080   int i, cost, min_cost, best_cost;
1081 
1082   for (cl = 0; cl < N_REG_CLASSES; cl++)
1083     class_translate[cl] = NO_REGS;
1084 
1085   for (i = 0; i < classes_num; i++)
1086     {
1087       aclass = classes[i];
1088       for (cl_ptr = &alloc_reg_class_subclasses[aclass][0];
1089 	   (cl = *cl_ptr) != LIM_REG_CLASSES;
1090 	   cl_ptr++)
1091 	if (class_translate[cl] == NO_REGS)
1092 	  class_translate[cl] = aclass;
1093       class_translate[aclass] = aclass;
1094     }
1095   /* For classes which are not fully covered by one of given classes
1096      (in other words covered by more one given class), use the
1097      cheapest class.  */
1098   for (cl = 0; cl < N_REG_CLASSES; cl++)
1099     {
1100       if (cl == NO_REGS || class_translate[cl] != NO_REGS)
1101 	continue;
1102       best_class = NO_REGS;
1103       best_cost = INT_MAX;
1104       for (i = 0; i < classes_num; i++)
1105 	{
1106 	  aclass = classes[i];
1107 	  temp_hard_regset = (reg_class_contents[aclass]
1108 			      & reg_class_contents[cl]
1109 			      & ~no_unit_alloc_regs);
1110 	  if (! hard_reg_set_empty_p (temp_hard_regset))
1111 	    {
1112 	      min_cost = INT_MAX;
1113 	      for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1114 		{
1115 		  cost = (ira_memory_move_cost[mode][aclass][0]
1116 			  + ira_memory_move_cost[mode][aclass][1]);
1117 		  if (min_cost > cost)
1118 		    min_cost = cost;
1119 		}
1120 	      if (best_class == NO_REGS || best_cost > min_cost)
1121 		{
1122 		  best_class = aclass;
1123 		  best_cost = min_cost;
1124 		}
1125 	    }
1126 	}
1127       class_translate[cl] = best_class;
1128     }
1129 }
1130 
1131 /* Set up array IRA_ALLOCNO_CLASS_TRANSLATE and
1132    IRA_PRESSURE_CLASS_TRANSLATE.  */
1133 static void
setup_class_translate(void)1134 setup_class_translate (void)
1135 {
1136   setup_class_translate_array (ira_allocno_class_translate,
1137 			       ira_allocno_classes_num, ira_allocno_classes);
1138   setup_class_translate_array (ira_pressure_class_translate,
1139 			       ira_pressure_classes_num, ira_pressure_classes);
1140 }
1141 
1142 /* Order numbers of allocno classes in original target allocno class
1143    array, -1 for non-allocno classes.  */
1144 static int allocno_class_order[N_REG_CLASSES];
1145 
1146 /* The function used to sort the important classes.  */
1147 static int
comp_reg_classes_func(const void * v1p,const void * v2p)1148 comp_reg_classes_func (const void *v1p, const void *v2p)
1149 {
1150   enum reg_class cl1 = *(const enum reg_class *) v1p;
1151   enum reg_class cl2 = *(const enum reg_class *) v2p;
1152   enum reg_class tcl1, tcl2;
1153   int diff;
1154 
1155   tcl1 = ira_allocno_class_translate[cl1];
1156   tcl2 = ira_allocno_class_translate[cl2];
1157   if (tcl1 != NO_REGS && tcl2 != NO_REGS
1158       && (diff = allocno_class_order[tcl1] - allocno_class_order[tcl2]) != 0)
1159     return diff;
1160   return (int) cl1 - (int) cl2;
1161 }
1162 
1163 /* For correct work of function setup_reg_class_relation we need to
1164    reorder important classes according to the order of their allocno
1165    classes.  It places important classes containing the same
1166    allocatable hard register set adjacent to each other and allocno
1167    class with the allocatable hard register set right after the other
1168    important classes with the same set.
1169 
1170    In example from comments of function
1171    setup_allocno_and_important_classes, it places LEGACY_REGS and
1172    GENERAL_REGS close to each other and GENERAL_REGS is after
1173    LEGACY_REGS.  */
1174 static void
reorder_important_classes(void)1175 reorder_important_classes (void)
1176 {
1177   int i;
1178 
1179   for (i = 0; i < N_REG_CLASSES; i++)
1180     allocno_class_order[i] = -1;
1181   for (i = 0; i < ira_allocno_classes_num; i++)
1182     allocno_class_order[ira_allocno_classes[i]] = i;
1183   qsort (ira_important_classes, ira_important_classes_num,
1184 	 sizeof (enum reg_class), comp_reg_classes_func);
1185   for (i = 0; i < ira_important_classes_num; i++)
1186     ira_important_class_nums[ira_important_classes[i]] = i;
1187 }
1188 
1189 /* Set up IRA_REG_CLASS_SUBUNION, IRA_REG_CLASS_SUPERUNION,
1190    IRA_REG_CLASS_SUPER_CLASSES, IRA_REG_CLASSES_INTERSECT, and
1191    IRA_REG_CLASSES_INTERSECT_P.  For the meaning of the relations,
1192    please see corresponding comments in ira-int.h.  */
1193 static void
setup_reg_class_relations(void)1194 setup_reg_class_relations (void)
1195 {
1196   int i, cl1, cl2, cl3;
1197   HARD_REG_SET intersection_set, union_set, temp_set2;
1198   bool important_class_p[N_REG_CLASSES];
1199 
1200   memset (important_class_p, 0, sizeof (important_class_p));
1201   for (i = 0; i < ira_important_classes_num; i++)
1202     important_class_p[ira_important_classes[i]] = true;
1203   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1204     {
1205       ira_reg_class_super_classes[cl1][0] = LIM_REG_CLASSES;
1206       for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1207 	{
1208 	  ira_reg_classes_intersect_p[cl1][cl2] = false;
1209 	  ira_reg_class_intersect[cl1][cl2] = NO_REGS;
1210 	  ira_reg_class_subset[cl1][cl2] = NO_REGS;
1211 	  temp_hard_regset = reg_class_contents[cl1] & ~no_unit_alloc_regs;
1212 	  temp_set2 = reg_class_contents[cl2] & ~no_unit_alloc_regs;
1213 	  if (hard_reg_set_empty_p (temp_hard_regset)
1214 	      && hard_reg_set_empty_p (temp_set2))
1215 	    {
1216 	      /* The both classes have no allocatable hard registers
1217 		 -- take all class hard registers into account and use
1218 		 reg_class_subunion and reg_class_superunion.  */
1219 	      for (i = 0;; i++)
1220 		{
1221 		  cl3 = reg_class_subclasses[cl1][i];
1222 		  if (cl3 == LIM_REG_CLASSES)
1223 		    break;
1224 		  if (reg_class_subset_p (ira_reg_class_intersect[cl1][cl2],
1225 					  (enum reg_class) cl3))
1226 		    ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1227 		}
1228 	      ira_reg_class_subunion[cl1][cl2] = reg_class_subunion[cl1][cl2];
1229 	      ira_reg_class_superunion[cl1][cl2] = reg_class_superunion[cl1][cl2];
1230 	      continue;
1231 	    }
1232 	  ira_reg_classes_intersect_p[cl1][cl2]
1233 	    = hard_reg_set_intersect_p (temp_hard_regset, temp_set2);
1234 	  if (important_class_p[cl1] && important_class_p[cl2]
1235 	      && hard_reg_set_subset_p (temp_hard_regset, temp_set2))
1236 	    {
1237 	      /* CL1 and CL2 are important classes and CL1 allocatable
1238 		 hard register set is inside of CL2 allocatable hard
1239 		 registers -- make CL1 a superset of CL2.  */
1240 	      enum reg_class *p;
1241 
1242 	      p = &ira_reg_class_super_classes[cl1][0];
1243 	      while (*p != LIM_REG_CLASSES)
1244 		p++;
1245 	      *p++ = (enum reg_class) cl2;
1246 	      *p = LIM_REG_CLASSES;
1247 	    }
1248 	  ira_reg_class_subunion[cl1][cl2] = NO_REGS;
1249 	  ira_reg_class_superunion[cl1][cl2] = NO_REGS;
1250 	  intersection_set = (reg_class_contents[cl1]
1251 			      & reg_class_contents[cl2]
1252 			      & ~no_unit_alloc_regs);
1253 	  union_set = ((reg_class_contents[cl1] | reg_class_contents[cl2])
1254 		       & ~no_unit_alloc_regs);
1255 	  for (cl3 = 0; cl3 < N_REG_CLASSES; cl3++)
1256 	    {
1257 	      temp_hard_regset = reg_class_contents[cl3] & ~no_unit_alloc_regs;
1258 	      if (hard_reg_set_subset_p (temp_hard_regset, intersection_set))
1259 		{
1260 		  /* CL3 allocatable hard register set is inside of
1261 		     intersection of allocatable hard register sets
1262 		     of CL1 and CL2.  */
1263 		  if (important_class_p[cl3])
1264 		    {
1265 		      temp_set2
1266 			= (reg_class_contents
1267 			   [ira_reg_class_intersect[cl1][cl2]]);
1268 		      temp_set2 &= ~no_unit_alloc_regs;
1269 		      if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1270 			  /* If the allocatable hard register sets are
1271 			     the same, prefer GENERAL_REGS or the
1272 			     smallest class for debugging
1273 			     purposes.  */
1274 			  || (temp_hard_regset == temp_set2
1275 			      && (cl3 == GENERAL_REGS
1276 				  || ((ira_reg_class_intersect[cl1][cl2]
1277 				       != GENERAL_REGS)
1278 				      && hard_reg_set_subset_p
1279 				         (reg_class_contents[cl3],
1280 					  reg_class_contents
1281 					  [(int)
1282 					   ira_reg_class_intersect[cl1][cl2]])))))
1283 			ira_reg_class_intersect[cl1][cl2] = (enum reg_class) cl3;
1284 		    }
1285 		  temp_set2
1286 		    = (reg_class_contents[ira_reg_class_subset[cl1][cl2]]
1287 		       & ~no_unit_alloc_regs);
1288 		  if (! hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1289 		      /* Ignore unavailable hard registers and prefer
1290 			 smallest class for debugging purposes.  */
1291 		      || (temp_hard_regset == temp_set2
1292 			  && hard_reg_set_subset_p
1293 			     (reg_class_contents[cl3],
1294 			      reg_class_contents
1295 			      [(int) ira_reg_class_subset[cl1][cl2]])))
1296 		    ira_reg_class_subset[cl1][cl2] = (enum reg_class) cl3;
1297 		}
1298 	      if (important_class_p[cl3]
1299 		  && hard_reg_set_subset_p (temp_hard_regset, union_set))
1300 		{
1301 		  /* CL3 allocatable hard register set is inside of
1302 		     union of allocatable hard register sets of CL1
1303 		     and CL2.  */
1304 		  temp_set2
1305 		    = (reg_class_contents[ira_reg_class_subunion[cl1][cl2]]
1306 		       & ~no_unit_alloc_regs);
1307 	 	  if (ira_reg_class_subunion[cl1][cl2] == NO_REGS
1308 		      || (hard_reg_set_subset_p (temp_set2, temp_hard_regset)
1309 
1310 			  && (temp_set2 != temp_hard_regset
1311 			      || cl3 == GENERAL_REGS
1312 			      /* If the allocatable hard register sets are the
1313 				 same, prefer GENERAL_REGS or the smallest
1314 				 class for debugging purposes.  */
1315 			      || (ira_reg_class_subunion[cl1][cl2] != GENERAL_REGS
1316 				  && hard_reg_set_subset_p
1317 				     (reg_class_contents[cl3],
1318 				      reg_class_contents
1319 				      [(int) ira_reg_class_subunion[cl1][cl2]])))))
1320 		    ira_reg_class_subunion[cl1][cl2] = (enum reg_class) cl3;
1321 		}
1322 	      if (hard_reg_set_subset_p (union_set, temp_hard_regset))
1323 		{
1324 		  /* CL3 allocatable hard register set contains union
1325 		     of allocatable hard register sets of CL1 and
1326 		     CL2.  */
1327 		  temp_set2
1328 		    = (reg_class_contents[ira_reg_class_superunion[cl1][cl2]]
1329 		       & ~no_unit_alloc_regs);
1330 	 	  if (ira_reg_class_superunion[cl1][cl2] == NO_REGS
1331 		      || (hard_reg_set_subset_p (temp_hard_regset, temp_set2)
1332 
1333 			  && (temp_set2 != temp_hard_regset
1334 			      || cl3 == GENERAL_REGS
1335 			      /* If the allocatable hard register sets are the
1336 				 same, prefer GENERAL_REGS or the smallest
1337 				 class for debugging purposes.  */
1338 			      || (ira_reg_class_superunion[cl1][cl2] != GENERAL_REGS
1339 				  && hard_reg_set_subset_p
1340 				     (reg_class_contents[cl3],
1341 				      reg_class_contents
1342 				      [(int) ira_reg_class_superunion[cl1][cl2]])))))
1343 		    ira_reg_class_superunion[cl1][cl2] = (enum reg_class) cl3;
1344 		}
1345 	    }
1346 	}
1347     }
1348 }
1349 
1350 /* Output all uniform and important classes into file F.  */
1351 static void
print_uniform_and_important_classes(FILE * f)1352 print_uniform_and_important_classes (FILE *f)
1353 {
1354   int i, cl;
1355 
1356   fprintf (f, "Uniform classes:\n");
1357   for (cl = 0; cl < N_REG_CLASSES; cl++)
1358     if (ira_uniform_class_p[cl])
1359       fprintf (f, " %s", reg_class_names[cl]);
1360   fprintf (f, "\nImportant classes:\n");
1361   for (i = 0; i < ira_important_classes_num; i++)
1362     fprintf (f, " %s", reg_class_names[ira_important_classes[i]]);
1363   fprintf (f, "\n");
1364 }
1365 
1366 /* Output all possible allocno or pressure classes and their
1367    translation map into file F.  */
1368 static void
print_translated_classes(FILE * f,bool pressure_p)1369 print_translated_classes (FILE *f, bool pressure_p)
1370 {
1371   int classes_num = (pressure_p
1372 		     ? ira_pressure_classes_num : ira_allocno_classes_num);
1373   enum reg_class *classes = (pressure_p
1374 			     ? ira_pressure_classes : ira_allocno_classes);
1375   enum reg_class *class_translate = (pressure_p
1376 				     ? ira_pressure_class_translate
1377 				     : ira_allocno_class_translate);
1378   int i;
1379 
1380   fprintf (f, "%s classes:\n", pressure_p ? "Pressure" : "Allocno");
1381   for (i = 0; i < classes_num; i++)
1382     fprintf (f, " %s", reg_class_names[classes[i]]);
1383   fprintf (f, "\nClass translation:\n");
1384   for (i = 0; i < N_REG_CLASSES; i++)
1385     fprintf (f, " %s -> %s\n", reg_class_names[i],
1386 	     reg_class_names[class_translate[i]]);
1387 }
1388 
1389 /* Output all possible allocno and translation classes and the
1390    translation maps into stderr.  */
1391 void
ira_debug_allocno_classes(void)1392 ira_debug_allocno_classes (void)
1393 {
1394   print_uniform_and_important_classes (stderr);
1395   print_translated_classes (stderr, false);
1396   print_translated_classes (stderr, true);
1397 }
1398 
1399 /* Set up different arrays concerning class subsets, allocno and
1400    important classes.  */
1401 static void
find_reg_classes(void)1402 find_reg_classes (void)
1403 {
1404   setup_allocno_and_important_classes ();
1405   setup_class_translate ();
1406   reorder_important_classes ();
1407   setup_reg_class_relations ();
1408 }
1409 
1410 
1411 
1412 /* Set up the array above.  */
1413 static void
setup_hard_regno_aclass(void)1414 setup_hard_regno_aclass (void)
1415 {
1416   int i;
1417 
1418   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1419     {
1420 #if 1
1421       ira_hard_regno_allocno_class[i]
1422 	= (TEST_HARD_REG_BIT (no_unit_alloc_regs, i)
1423 	   ? NO_REGS
1424 	   : ira_allocno_class_translate[REGNO_REG_CLASS (i)]);
1425 #else
1426       int j;
1427       enum reg_class cl;
1428       ira_hard_regno_allocno_class[i] = NO_REGS;
1429       for (j = 0; j < ira_allocno_classes_num; j++)
1430  	{
1431 	  cl = ira_allocno_classes[j];
1432  	  if (ira_class_hard_reg_index[cl][i] >= 0)
1433  	    {
1434 	      ira_hard_regno_allocno_class[i] = cl;
1435  	      break;
1436  	    }
1437  	}
1438 #endif
1439     }
1440 }
1441 
1442 
1443 
1444 /* Form IRA_REG_CLASS_MAX_NREGS and IRA_REG_CLASS_MIN_NREGS maps.  */
1445 static void
setup_reg_class_nregs(void)1446 setup_reg_class_nregs (void)
1447 {
1448   int i, cl, cl2, m;
1449 
1450   for (m = 0; m < MAX_MACHINE_MODE; m++)
1451     {
1452       for (cl = 0; cl < N_REG_CLASSES; cl++)
1453 	ira_reg_class_max_nregs[cl][m]
1454 	  = ira_reg_class_min_nregs[cl][m]
1455 	  = targetm.class_max_nregs ((reg_class_t) cl, (machine_mode) m);
1456       for (cl = 0; cl < N_REG_CLASSES; cl++)
1457 	for (i = 0;
1458 	     (cl2 = alloc_reg_class_subclasses[cl][i]) != LIM_REG_CLASSES;
1459 	     i++)
1460 	  if (ira_reg_class_min_nregs[cl2][m]
1461 	      < ira_reg_class_min_nregs[cl][m])
1462 	    ira_reg_class_min_nregs[cl][m] = ira_reg_class_min_nregs[cl2][m];
1463     }
1464 }
1465 
1466 
1467 
1468 /* Set up IRA_PROHIBITED_CLASS_MODE_REGS and IRA_CLASS_SINGLETON.
1469    This function is called once IRA_CLASS_HARD_REGS has been initialized.  */
1470 static void
setup_prohibited_class_mode_regs(void)1471 setup_prohibited_class_mode_regs (void)
1472 {
1473   int j, k, hard_regno, cl, last_hard_regno, count;
1474 
1475   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1476     {
1477       temp_hard_regset = reg_class_contents[cl] & ~no_unit_alloc_regs;
1478       for (j = 0; j < NUM_MACHINE_MODES; j++)
1479 	{
1480 	  count = 0;
1481 	  last_hard_regno = -1;
1482 	  CLEAR_HARD_REG_SET (ira_prohibited_class_mode_regs[cl][j]);
1483 	  for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1484 	    {
1485 	      hard_regno = ira_class_hard_regs[cl][k];
1486 	      if (!targetm.hard_regno_mode_ok (hard_regno, (machine_mode) j))
1487 		SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1488 				  hard_regno);
1489 	      else if (in_hard_reg_set_p (temp_hard_regset,
1490 					  (machine_mode) j, hard_regno))
1491 		{
1492 		  last_hard_regno = hard_regno;
1493 		  count++;
1494 		}
1495 	    }
1496 	  ira_class_singleton[cl][j] = (count == 1 ? last_hard_regno : -1);
1497 	}
1498     }
1499 }
1500 
1501 /* Clarify IRA_PROHIBITED_CLASS_MODE_REGS by excluding hard registers
1502    spanning from one register pressure class to another one.  It is
1503    called after defining the pressure classes.  */
1504 static void
clarify_prohibited_class_mode_regs(void)1505 clarify_prohibited_class_mode_regs (void)
1506 {
1507   int j, k, hard_regno, cl, pclass, nregs;
1508 
1509   for (cl = (int) N_REG_CLASSES - 1; cl >= 0; cl--)
1510     for (j = 0; j < NUM_MACHINE_MODES; j++)
1511       {
1512 	CLEAR_HARD_REG_SET (ira_useful_class_mode_regs[cl][j]);
1513 	for (k = ira_class_hard_regs_num[cl] - 1; k >= 0; k--)
1514 	  {
1515 	    hard_regno = ira_class_hard_regs[cl][k];
1516 	    if (TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j], hard_regno))
1517 	      continue;
1518 	    nregs = hard_regno_nregs (hard_regno, (machine_mode) j);
1519 	    if (hard_regno + nregs > FIRST_PSEUDO_REGISTER)
1520 	      {
1521 		SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1522 				  hard_regno);
1523 		 continue;
1524 	      }
1525 	    pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
1526 	    for (nregs-- ;nregs >= 0; nregs--)
1527 	      if (((enum reg_class) pclass
1528 		   != ira_pressure_class_translate[REGNO_REG_CLASS
1529 						   (hard_regno + nregs)]))
1530 		{
1531 		  SET_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1532 				    hard_regno);
1533 		  break;
1534 		}
1535 	    if (!TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs[cl][j],
1536 				    hard_regno))
1537 	      add_to_hard_reg_set (&ira_useful_class_mode_regs[cl][j],
1538 				   (machine_mode) j, hard_regno);
1539 	  }
1540       }
1541 }
1542 
1543 /* Allocate and initialize IRA_REGISTER_MOVE_COST, IRA_MAY_MOVE_IN_COST
1544    and IRA_MAY_MOVE_OUT_COST for MODE.  */
1545 void
ira_init_register_move_cost(machine_mode mode)1546 ira_init_register_move_cost (machine_mode mode)
1547 {
1548   static unsigned short last_move_cost[N_REG_CLASSES][N_REG_CLASSES];
1549   bool all_match = true;
1550   unsigned int i, cl1, cl2;
1551   HARD_REG_SET ok_regs;
1552 
1553   ira_assert (ira_register_move_cost[mode] == NULL
1554 	      && ira_may_move_in_cost[mode] == NULL
1555 	      && ira_may_move_out_cost[mode] == NULL);
1556   CLEAR_HARD_REG_SET (ok_regs);
1557   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1558     if (targetm.hard_regno_mode_ok (i, mode))
1559       SET_HARD_REG_BIT (ok_regs, i);
1560 
1561   /* Note that we might be asked about the move costs of modes that
1562      cannot be stored in any hard register, for example if an inline
1563      asm tries to create a register operand with an impossible mode.
1564      We therefore can't assert have_regs_of_mode[mode] here.  */
1565   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1566     for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1567       {
1568 	int cost;
1569 	if (!hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl1])
1570 	    || !hard_reg_set_intersect_p (ok_regs, reg_class_contents[cl2]))
1571 	  {
1572 	    if ((ira_reg_class_max_nregs[cl1][mode]
1573 		 > ira_class_hard_regs_num[cl1])
1574 		|| (ira_reg_class_max_nregs[cl2][mode]
1575 		    > ira_class_hard_regs_num[cl2]))
1576 	      cost = 65535;
1577 	    else
1578 	      cost = (ira_memory_move_cost[mode][cl1][0]
1579 		      + ira_memory_move_cost[mode][cl2][1]) * 2;
1580 	  }
1581 	else
1582 	  {
1583 	    cost = register_move_cost (mode, (enum reg_class) cl1,
1584 				       (enum reg_class) cl2);
1585 	    ira_assert (cost < 65535);
1586 	  }
1587 	all_match &= (last_move_cost[cl1][cl2] == cost);
1588 	last_move_cost[cl1][cl2] = cost;
1589       }
1590   if (all_match && last_mode_for_init_move_cost != -1)
1591     {
1592       ira_register_move_cost[mode]
1593 	= ira_register_move_cost[last_mode_for_init_move_cost];
1594       ira_may_move_in_cost[mode]
1595 	= ira_may_move_in_cost[last_mode_for_init_move_cost];
1596       ira_may_move_out_cost[mode]
1597 	= ira_may_move_out_cost[last_mode_for_init_move_cost];
1598       return;
1599     }
1600   last_mode_for_init_move_cost = mode;
1601   ira_register_move_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1602   ira_may_move_in_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1603   ira_may_move_out_cost[mode] = XNEWVEC (move_table, N_REG_CLASSES);
1604   for (cl1 = 0; cl1 < N_REG_CLASSES; cl1++)
1605     for (cl2 = 0; cl2 < N_REG_CLASSES; cl2++)
1606       {
1607 	int cost;
1608 	enum reg_class *p1, *p2;
1609 
1610 	if (last_move_cost[cl1][cl2] == 65535)
1611 	  {
1612 	    ira_register_move_cost[mode][cl1][cl2] = 65535;
1613 	    ira_may_move_in_cost[mode][cl1][cl2] = 65535;
1614 	    ira_may_move_out_cost[mode][cl1][cl2] = 65535;
1615 	  }
1616 	else
1617 	  {
1618 	    cost = last_move_cost[cl1][cl2];
1619 
1620 	    for (p2 = &reg_class_subclasses[cl2][0];
1621 		 *p2 != LIM_REG_CLASSES; p2++)
1622 	      if (ira_class_hard_regs_num[*p2] > 0
1623 		  && (ira_reg_class_max_nregs[*p2][mode]
1624 		      <= ira_class_hard_regs_num[*p2]))
1625 		cost = MAX (cost, ira_register_move_cost[mode][cl1][*p2]);
1626 
1627 	    for (p1 = &reg_class_subclasses[cl1][0];
1628 		 *p1 != LIM_REG_CLASSES; p1++)
1629 	      if (ira_class_hard_regs_num[*p1] > 0
1630 		  && (ira_reg_class_max_nregs[*p1][mode]
1631 		      <= ira_class_hard_regs_num[*p1]))
1632 		cost = MAX (cost, ira_register_move_cost[mode][*p1][cl2]);
1633 
1634 	    ira_assert (cost <= 65535);
1635 	    ira_register_move_cost[mode][cl1][cl2] = cost;
1636 
1637 	    if (ira_class_subset_p[cl1][cl2])
1638 	      ira_may_move_in_cost[mode][cl1][cl2] = 0;
1639 	    else
1640 	      ira_may_move_in_cost[mode][cl1][cl2] = cost;
1641 
1642 	    if (ira_class_subset_p[cl2][cl1])
1643 	      ira_may_move_out_cost[mode][cl1][cl2] = 0;
1644 	    else
1645 	      ira_may_move_out_cost[mode][cl1][cl2] = cost;
1646 	  }
1647       }
1648 }
1649 
1650 
1651 
1652 /* This is called once during compiler work.  It sets up
1653    different arrays whose values don't depend on the compiled
1654    function.  */
1655 void
ira_init_once(void)1656 ira_init_once (void)
1657 {
1658   ira_init_costs_once ();
1659   lra_init_once ();
1660 
1661   ira_use_lra_p = targetm.lra_p ();
1662 }
1663 
1664 /* Free ira_max_register_move_cost, ira_may_move_in_cost and
1665    ira_may_move_out_cost for each mode.  */
1666 void
free_register_move_costs(void)1667 target_ira_int::free_register_move_costs (void)
1668 {
1669   int mode, i;
1670 
1671   /* Reset move_cost and friends, making sure we only free shared
1672      table entries once.  */
1673   for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
1674     if (x_ira_register_move_cost[mode])
1675       {
1676 	for (i = 0;
1677 	     i < mode && (x_ira_register_move_cost[i]
1678 			  != x_ira_register_move_cost[mode]);
1679 	     i++)
1680 	  ;
1681 	if (i == mode)
1682 	  {
1683 	    free (x_ira_register_move_cost[mode]);
1684 	    free (x_ira_may_move_in_cost[mode]);
1685 	    free (x_ira_may_move_out_cost[mode]);
1686 	  }
1687       }
1688   memset (x_ira_register_move_cost, 0, sizeof x_ira_register_move_cost);
1689   memset (x_ira_may_move_in_cost, 0, sizeof x_ira_may_move_in_cost);
1690   memset (x_ira_may_move_out_cost, 0, sizeof x_ira_may_move_out_cost);
1691   last_mode_for_init_move_cost = -1;
1692 }
1693 
~target_ira_int()1694 target_ira_int::~target_ira_int ()
1695 {
1696   free_ira_costs ();
1697   free_register_move_costs ();
1698 }
1699 
1700 /* This is called every time when register related information is
1701    changed.  */
1702 void
ira_init(void)1703 ira_init (void)
1704 {
1705   this_target_ira_int->free_register_move_costs ();
1706   setup_reg_mode_hard_regset ();
1707   setup_alloc_regs (flag_omit_frame_pointer != 0);
1708   setup_class_subset_and_memory_move_costs ();
1709   setup_reg_class_nregs ();
1710   setup_prohibited_class_mode_regs ();
1711   find_reg_classes ();
1712   clarify_prohibited_class_mode_regs ();
1713   setup_hard_regno_aclass ();
1714   ira_init_costs ();
1715 }
1716 
1717 
1718 #define ira_prohibited_mode_move_regs_initialized_p \
1719   (this_target_ira_int->x_ira_prohibited_mode_move_regs_initialized_p)
1720 
1721 /* Set up IRA_PROHIBITED_MODE_MOVE_REGS.  */
1722 static void
setup_prohibited_mode_move_regs(void)1723 setup_prohibited_mode_move_regs (void)
1724 {
1725   int i, j;
1726   rtx test_reg1, test_reg2, move_pat;
1727   rtx_insn *move_insn;
1728 
1729   if (ira_prohibited_mode_move_regs_initialized_p)
1730     return;
1731   ira_prohibited_mode_move_regs_initialized_p = true;
1732   test_reg1 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 1);
1733   test_reg2 = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER + 2);
1734   move_pat = gen_rtx_SET (test_reg1, test_reg2);
1735   move_insn = gen_rtx_INSN (VOIDmode, 0, 0, 0, move_pat, 0, -1, 0);
1736   for (i = 0; i < NUM_MACHINE_MODES; i++)
1737     {
1738       SET_HARD_REG_SET (ira_prohibited_mode_move_regs[i]);
1739       for (j = 0; j < FIRST_PSEUDO_REGISTER; j++)
1740 	{
1741 	  if (!targetm.hard_regno_mode_ok (j, (machine_mode) i))
1742 	    continue;
1743 	  set_mode_and_regno (test_reg1, (machine_mode) i, j);
1744 	  set_mode_and_regno (test_reg2, (machine_mode) i, j);
1745 	  INSN_CODE (move_insn) = -1;
1746 	  recog_memoized (move_insn);
1747 	  if (INSN_CODE (move_insn) < 0)
1748 	    continue;
1749 	  extract_insn (move_insn);
1750 	  /* We don't know whether the move will be in code that is optimized
1751 	     for size or speed, so consider all enabled alternatives.  */
1752 	  if (! constrain_operands (1, get_enabled_alternatives (move_insn)))
1753 	    continue;
1754 	  CLEAR_HARD_REG_BIT (ira_prohibited_mode_move_regs[i], j);
1755 	}
1756     }
1757 }
1758 
1759 
1760 
1761 /* Extract INSN and return the set of alternatives that we should consider.
1762    This excludes any alternatives whose constraints are obviously impossible
1763    to meet (e.g. because the constraint requires a constant and the operand
1764    is nonconstant).  It also excludes alternatives that are bound to need
1765    a spill or reload, as long as we have other alternatives that match
1766    exactly.  */
1767 alternative_mask
ira_setup_alts(rtx_insn * insn)1768 ira_setup_alts (rtx_insn *insn)
1769 {
1770   int nop, nalt;
1771   bool curr_swapped;
1772   const char *p;
1773   int commutative = -1;
1774 
1775   extract_insn (insn);
1776   preprocess_constraints (insn);
1777   alternative_mask preferred = get_preferred_alternatives (insn);
1778   alternative_mask alts = 0;
1779   alternative_mask exact_alts = 0;
1780   /* Check that the hard reg set is enough for holding all
1781      alternatives.  It is hard to imagine the situation when the
1782      assertion is wrong.  */
1783   ira_assert (recog_data.n_alternatives
1784 	      <= (int) MAX (sizeof (HARD_REG_ELT_TYPE) * CHAR_BIT,
1785 			    FIRST_PSEUDO_REGISTER));
1786   for (nop = 0; nop < recog_data.n_operands; nop++)
1787     if (recog_data.constraints[nop][0] == '%')
1788       {
1789 	commutative = nop;
1790 	break;
1791       }
1792   for (curr_swapped = false;; curr_swapped = true)
1793     {
1794       for (nalt = 0; nalt < recog_data.n_alternatives; nalt++)
1795 	{
1796 	  if (!TEST_BIT (preferred, nalt) || TEST_BIT (exact_alts, nalt))
1797 	    continue;
1798 
1799 	  const operand_alternative *op_alt
1800 	    = &recog_op_alt[nalt * recog_data.n_operands];
1801 	  int this_reject = 0;
1802 	  for (nop = 0; nop < recog_data.n_operands; nop++)
1803 	    {
1804 	      int c, len;
1805 
1806 	      this_reject += op_alt[nop].reject;
1807 
1808 	      rtx op = recog_data.operand[nop];
1809 	      p = op_alt[nop].constraint;
1810 	      if (*p == 0 || *p == ',')
1811 		continue;
1812 
1813 	      bool win_p = false;
1814 	      do
1815 		switch (c = *p, len = CONSTRAINT_LEN (c, p), c)
1816 		  {
1817 		  case '#':
1818 		  case ',':
1819 		    c = '\0';
1820 		    /* FALLTHRU */
1821 		  case '\0':
1822 		    len = 0;
1823 		    break;
1824 
1825 		  case '%':
1826 		    /* The commutative modifier is handled above.  */
1827 		    break;
1828 
1829 		  case '0':  case '1':  case '2':  case '3':  case '4':
1830 		  case '5':  case '6':  case '7':  case '8':  case '9':
1831 		    {
1832 		      char *end;
1833 		      unsigned long dup = strtoul (p, &end, 10);
1834 		      rtx other = recog_data.operand[dup];
1835 		      len = end - p;
1836 		      if (MEM_P (other)
1837 			  ? rtx_equal_p (other, op)
1838 			  : REG_P (op) || SUBREG_P (op))
1839 			goto op_success;
1840 		      win_p = true;
1841 		    }
1842 		    break;
1843 
1844 		  case 'g':
1845 		    goto op_success;
1846 		    break;
1847 
1848 		  default:
1849 		    {
1850 		      enum constraint_num cn = lookup_constraint (p);
1851 		      rtx mem = NULL;
1852 		      switch (get_constraint_type (cn))
1853 			{
1854 			case CT_REGISTER:
1855 			  if (reg_class_for_constraint (cn) != NO_REGS)
1856 			    {
1857 			      if (REG_P (op) || SUBREG_P (op))
1858 				goto op_success;
1859 			      win_p = true;
1860 			    }
1861 			  break;
1862 
1863 			case CT_CONST_INT:
1864 			  if (CONST_INT_P (op)
1865 			      && (insn_const_int_ok_for_constraint
1866 				  (INTVAL (op), cn)))
1867 			    goto op_success;
1868 			  break;
1869 
1870 			case CT_ADDRESS:
1871 			  goto op_success;
1872 
1873 			case CT_MEMORY:
1874 			case CT_RELAXED_MEMORY:
1875 			  mem = op;
1876 			  /* Fall through.  */
1877 			case CT_SPECIAL_MEMORY:
1878 			  if (!mem)
1879 			    mem = extract_mem_from_operand (op);
1880 			  if (MEM_P (mem))
1881 			    goto op_success;
1882 			  win_p = true;
1883 			  break;
1884 
1885 			case CT_FIXED_FORM:
1886 			  if (constraint_satisfied_p (op, cn))
1887 			    goto op_success;
1888 			  break;
1889 			}
1890 		      break;
1891 		    }
1892 		  }
1893 	      while (p += len, c);
1894 	      if (!win_p)
1895 		break;
1896 	      /* We can make the alternative match by spilling a register
1897 		 to memory or loading something into a register.  Count a
1898 		 cost of one reload (the equivalent of the '?' constraint).  */
1899 	      this_reject += 6;
1900 	    op_success:
1901 	      ;
1902 	    }
1903 
1904 	  if (nop >= recog_data.n_operands)
1905 	    {
1906 	      alts |= ALTERNATIVE_BIT (nalt);
1907 	      if (this_reject == 0)
1908 		exact_alts |= ALTERNATIVE_BIT (nalt);
1909 	    }
1910 	}
1911       if (commutative < 0)
1912 	break;
1913       /* Swap forth and back to avoid changing recog_data.  */
1914       std::swap (recog_data.operand[commutative],
1915 		 recog_data.operand[commutative + 1]);
1916       if (curr_swapped)
1917 	break;
1918     }
1919   return exact_alts ? exact_alts : alts;
1920 }
1921 
1922 /* Return the number of the output non-early clobber operand which
1923    should be the same in any case as operand with number OP_NUM (or
1924    negative value if there is no such operand).  ALTS is the mask
1925    of alternatives that we should consider.  SINGLE_INPUT_OP_HAS_CSTR_P
1926    should be set in this function, it indicates whether there is only
1927    a single input operand which has the matching constraint on the
1928    output operand at the position specified in return value.  If the
1929    pattern allows any one of several input operands holds the matching
1930    constraint, it's set as false, one typical case is destructive FMA
1931    instruction on target rs6000.  Note that for a non-NO_REG preferred
1932    register class with no free register move copy, if the parameter
1933    PARAM_IRA_CONSIDER_DUP_IN_ALL_ALTS is set to one, this function
1934    will check all available alternatives for matching constraints,
1935    even if it has found or will find one alternative with non-NO_REG
1936    regclass, it can respect more cases with matching constraints.  If
1937    PARAM_IRA_CONSIDER_DUP_IN_ALL_ALTS is set to zero,
1938    SINGLE_INPUT_OP_HAS_CSTR_P is always true, it will stop to find
1939    matching constraint relationship once it hits some alternative with
1940    some non-NO_REG regclass.  */
1941 int
ira_get_dup_out_num(int op_num,alternative_mask alts,bool & single_input_op_has_cstr_p)1942 ira_get_dup_out_num (int op_num, alternative_mask alts,
1943 		     bool &single_input_op_has_cstr_p)
1944 {
1945   int curr_alt, c, original;
1946   bool ignore_p, use_commut_op_p;
1947   const char *str;
1948 
1949   if (op_num < 0 || recog_data.n_alternatives == 0)
1950     return -1;
1951   /* We should find duplications only for input operands.  */
1952   if (recog_data.operand_type[op_num] != OP_IN)
1953     return -1;
1954   str = recog_data.constraints[op_num];
1955   use_commut_op_p = false;
1956   single_input_op_has_cstr_p = true;
1957 
1958   rtx op = recog_data.operand[op_num];
1959   int op_regno = reg_or_subregno (op);
1960   enum reg_class op_pref_cl = reg_preferred_class (op_regno);
1961   machine_mode op_mode = GET_MODE (op);
1962 
1963   ira_init_register_move_cost_if_necessary (op_mode);
1964   /* If the preferred regclass isn't NO_REG, continue to find the matching
1965      constraint in all available alternatives with preferred regclass, even
1966      if we have found or will find one alternative whose constraint stands
1967      for a REG (non-NO_REG) regclass.  Note that it would be fine not to
1968      respect matching constraint if the register copy is free, so exclude
1969      it.  */
1970   bool respect_dup_despite_reg_cstr
1971     = param_ira_consider_dup_in_all_alts
1972       && op_pref_cl != NO_REGS
1973       && ira_register_move_cost[op_mode][op_pref_cl][op_pref_cl] > 0;
1974 
1975   /* Record the alternative whose constraint uses the same regclass as the
1976      preferred regclass, later if we find one matching constraint for this
1977      operand with preferred reclass, we will visit these recorded
1978      alternatives to check whether if there is one alternative in which no
1979      any INPUT operands have one matching constraint same as our candidate.
1980      If yes, it means there is one alternative which is perfectly fine
1981      without satisfying this matching constraint.  If no, it means in any
1982      alternatives there is one other INPUT operand holding this matching
1983      constraint, it's fine to respect this matching constraint and further
1984      create this constraint copy since it would become harmless once some
1985      other takes preference and it's interfered.  */
1986   alternative_mask pref_cl_alts;
1987 
1988   for (;;)
1989     {
1990       pref_cl_alts = 0;
1991 
1992       for (curr_alt = 0, ignore_p = !TEST_BIT (alts, curr_alt),
1993 	   original = -1;;)
1994 	{
1995 	  c = *str;
1996 	  if (c == '\0')
1997 	    break;
1998 	  if (c == '#')
1999 	    ignore_p = true;
2000 	  else if (c == ',')
2001 	    {
2002 	      curr_alt++;
2003 	      ignore_p = !TEST_BIT (alts, curr_alt);
2004 	    }
2005 	  else if (! ignore_p)
2006 	    switch (c)
2007 	      {
2008 	      case 'g':
2009 		goto fail;
2010 	      default:
2011 		{
2012 		  enum constraint_num cn = lookup_constraint (str);
2013 		  enum reg_class cl = reg_class_for_constraint (cn);
2014 		  if (cl != NO_REGS && !targetm.class_likely_spilled_p (cl))
2015 		    {
2016 		      if (respect_dup_despite_reg_cstr)
2017 			{
2018 			  /* If it's free to move from one preferred class to
2019 			     the one without matching constraint, it doesn't
2020 			     have to respect this constraint with costs.  */
2021 			  if (cl != op_pref_cl
2022 			      && (ira_reg_class_intersect[cl][op_pref_cl]
2023 				  != NO_REGS)
2024 			      && (ira_may_move_in_cost[op_mode][op_pref_cl][cl]
2025 				  == 0))
2026 			    goto fail;
2027 			  else if (cl == op_pref_cl)
2028 			    pref_cl_alts |= ALTERNATIVE_BIT (curr_alt);
2029 			}
2030 		      else
2031 			goto fail;
2032 		    }
2033 		  if (constraint_satisfied_p (op, cn))
2034 		    goto fail;
2035 		  break;
2036 		}
2037 
2038 	      case '0': case '1': case '2': case '3': case '4':
2039 	      case '5': case '6': case '7': case '8': case '9':
2040 		{
2041 		  char *end;
2042 		  int n = (int) strtoul (str, &end, 10);
2043 		  str = end;
2044 		  if (original != -1 && original != n)
2045 		    goto fail;
2046 		  gcc_assert (n < recog_data.n_operands);
2047 		  if (respect_dup_despite_reg_cstr)
2048 		    {
2049 		      const operand_alternative *op_alt
2050 			= &recog_op_alt[curr_alt * recog_data.n_operands];
2051 		      /* Only respect the one with preferred rclass, without
2052 			 respect_dup_despite_reg_cstr it's possible to get
2053 			 one whose regclass isn't preferred first before,
2054 			 but it would fail since there should be other
2055 			 alternatives with preferred regclass.  */
2056 		      if (op_alt[n].cl == op_pref_cl)
2057 			original = n;
2058 		    }
2059 		  else
2060 		    original = n;
2061 		  continue;
2062 		}
2063 	      }
2064 	  str += CONSTRAINT_LEN (c, str);
2065 	}
2066       if (original == -1)
2067 	goto fail;
2068       if (recog_data.operand_type[original] == OP_OUT)
2069 	{
2070 	  if (pref_cl_alts == 0)
2071 	    return original;
2072 	  /* Visit these recorded alternatives to check whether
2073 	     there is one alternative in which no any INPUT operands
2074 	     have one matching constraint same as our candidate.
2075 	     Give up this candidate if so.  */
2076 	  int nop, nalt;
2077 	  for (nalt = 0; nalt < recog_data.n_alternatives; nalt++)
2078 	    {
2079 	      if (!TEST_BIT (pref_cl_alts, nalt))
2080 		continue;
2081 	      const operand_alternative *op_alt
2082 		= &recog_op_alt[nalt * recog_data.n_operands];
2083 	      bool dup_in_other = false;
2084 	      for (nop = 0; nop < recog_data.n_operands; nop++)
2085 		{
2086 		  if (recog_data.operand_type[nop] != OP_IN)
2087 		    continue;
2088 		  if (nop == op_num)
2089 		    continue;
2090 		  if (op_alt[nop].matches == original)
2091 		    {
2092 		      dup_in_other = true;
2093 		      break;
2094 		    }
2095 		}
2096 	      if (!dup_in_other)
2097 		return -1;
2098 	    }
2099 	  single_input_op_has_cstr_p = false;
2100 	  return original;
2101 	}
2102     fail:
2103       if (use_commut_op_p)
2104 	break;
2105       use_commut_op_p = true;
2106       if (recog_data.constraints[op_num][0] == '%')
2107 	str = recog_data.constraints[op_num + 1];
2108       else if (op_num > 0 && recog_data.constraints[op_num - 1][0] == '%')
2109 	str = recog_data.constraints[op_num - 1];
2110       else
2111 	break;
2112     }
2113   return -1;
2114 }
2115 
2116 
2117 
2118 /* Search forward to see if the source register of a copy insn dies
2119    before either it or the destination register is modified, but don't
2120    scan past the end of the basic block.  If so, we can replace the
2121    source with the destination and let the source die in the copy
2122    insn.
2123 
2124    This will reduce the number of registers live in that range and may
2125    enable the destination and the source coalescing, thus often saving
2126    one register in addition to a register-register copy.  */
2127 
2128 static void
decrease_live_ranges_number(void)2129 decrease_live_ranges_number (void)
2130 {
2131   basic_block bb;
2132   rtx_insn *insn;
2133   rtx set, src, dest, dest_death, note;
2134   rtx_insn *p, *q;
2135   int sregno, dregno;
2136 
2137   if (! flag_expensive_optimizations)
2138     return;
2139 
2140   if (ira_dump_file)
2141     fprintf (ira_dump_file, "Starting decreasing number of live ranges...\n");
2142 
2143   FOR_EACH_BB_FN (bb, cfun)
2144     FOR_BB_INSNS (bb, insn)
2145       {
2146 	set = single_set (insn);
2147 	if (! set)
2148 	  continue;
2149 	src = SET_SRC (set);
2150 	dest = SET_DEST (set);
2151 	if (! REG_P (src) || ! REG_P (dest)
2152 	    || find_reg_note (insn, REG_DEAD, src))
2153 	  continue;
2154 	sregno = REGNO (src);
2155 	dregno = REGNO (dest);
2156 
2157 	/* We don't want to mess with hard regs if register classes
2158 	   are small.  */
2159 	if (sregno == dregno
2160 	    || (targetm.small_register_classes_for_mode_p (GET_MODE (src))
2161 		&& (sregno < FIRST_PSEUDO_REGISTER
2162 		    || dregno < FIRST_PSEUDO_REGISTER))
2163 	    /* We don't see all updates to SP if they are in an
2164 	       auto-inc memory reference, so we must disallow this
2165 	       optimization on them.  */
2166 	    || sregno == STACK_POINTER_REGNUM
2167 	    || dregno == STACK_POINTER_REGNUM)
2168 	  continue;
2169 
2170 	dest_death = NULL_RTX;
2171 
2172 	for (p = NEXT_INSN (insn); p; p = NEXT_INSN (p))
2173 	  {
2174 	    if (! INSN_P (p))
2175 	      continue;
2176 	    if (BLOCK_FOR_INSN (p) != bb)
2177 	      break;
2178 
2179 	    if (reg_set_p (src, p) || reg_set_p (dest, p)
2180 		/* If SRC is an asm-declared register, it must not be
2181 		   replaced in any asm.  Unfortunately, the REG_EXPR
2182 		   tree for the asm variable may be absent in the SRC
2183 		   rtx, so we can't check the actual register
2184 		   declaration easily (the asm operand will have it,
2185 		   though).  To avoid complicating the test for a rare
2186 		   case, we just don't perform register replacement
2187 		   for a hard reg mentioned in an asm.  */
2188 		|| (sregno < FIRST_PSEUDO_REGISTER
2189 		    && asm_noperands (PATTERN (p)) >= 0
2190 		    && reg_overlap_mentioned_p (src, PATTERN (p)))
2191 		/* Don't change hard registers used by a call.  */
2192 		|| (CALL_P (p) && sregno < FIRST_PSEUDO_REGISTER
2193 		    && find_reg_fusage (p, USE, src))
2194 		/* Don't change a USE of a register.  */
2195 		|| (GET_CODE (PATTERN (p)) == USE
2196 		    && reg_overlap_mentioned_p (src, XEXP (PATTERN (p), 0))))
2197 	      break;
2198 
2199 	    /* See if all of SRC dies in P.  This test is slightly
2200 	       more conservative than it needs to be.  */
2201 	    if ((note = find_regno_note (p, REG_DEAD, sregno))
2202 		&& GET_MODE (XEXP (note, 0)) == GET_MODE (src))
2203 	      {
2204 		int failed = 0;
2205 
2206 		/* We can do the optimization.  Scan forward from INSN
2207 		   again, replacing regs as we go.  Set FAILED if a
2208 		   replacement can't be done.  In that case, we can't
2209 		   move the death note for SRC.  This should be
2210 		   rare.  */
2211 
2212 		/* Set to stop at next insn.  */
2213 		for (q = next_real_insn (insn);
2214 		     q != next_real_insn (p);
2215 		     q = next_real_insn (q))
2216 		  {
2217 		    if (reg_overlap_mentioned_p (src, PATTERN (q)))
2218 		      {
2219 			/* If SRC is a hard register, we might miss
2220 			   some overlapping registers with
2221 			   validate_replace_rtx, so we would have to
2222 			   undo it.  We can't if DEST is present in
2223 			   the insn, so fail in that combination of
2224 			   cases.  */
2225 			if (sregno < FIRST_PSEUDO_REGISTER
2226 			    && reg_mentioned_p (dest, PATTERN (q)))
2227 			  failed = 1;
2228 
2229 			/* Attempt to replace all uses.  */
2230 			else if (!validate_replace_rtx (src, dest, q))
2231 			  failed = 1;
2232 
2233 			/* If this succeeded, but some part of the
2234 			   register is still present, undo the
2235 			   replacement.  */
2236 			else if (sregno < FIRST_PSEUDO_REGISTER
2237 				 && reg_overlap_mentioned_p (src, PATTERN (q)))
2238 			  {
2239 			    validate_replace_rtx (dest, src, q);
2240 			    failed = 1;
2241 			  }
2242 		      }
2243 
2244 		    /* If DEST dies here, remove the death note and
2245 		       save it for later.  Make sure ALL of DEST dies
2246 		       here; again, this is overly conservative.  */
2247 		    if (! dest_death
2248 			&& (dest_death = find_regno_note (q, REG_DEAD, dregno)))
2249 		      {
2250 			if (GET_MODE (XEXP (dest_death, 0)) == GET_MODE (dest))
2251 			  remove_note (q, dest_death);
2252 			else
2253 			  {
2254 			    failed = 1;
2255 			    dest_death = 0;
2256 			  }
2257 		      }
2258 		  }
2259 
2260 		if (! failed)
2261 		  {
2262 		    /* Move death note of SRC from P to INSN.  */
2263 		    remove_note (p, note);
2264 		    XEXP (note, 1) = REG_NOTES (insn);
2265 		    REG_NOTES (insn) = note;
2266 		  }
2267 
2268 		/* DEST is also dead if INSN has a REG_UNUSED note for
2269 		   DEST.  */
2270 		if (! dest_death
2271 		    && (dest_death
2272 			= find_regno_note (insn, REG_UNUSED, dregno)))
2273 		  {
2274 		    PUT_REG_NOTE_KIND (dest_death, REG_DEAD);
2275 		    remove_note (insn, dest_death);
2276 		  }
2277 
2278 		/* Put death note of DEST on P if we saw it die.  */
2279 		if (dest_death)
2280 		  {
2281 		    XEXP (dest_death, 1) = REG_NOTES (p);
2282 		    REG_NOTES (p) = dest_death;
2283 		  }
2284 		break;
2285 	      }
2286 
2287 	    /* If SRC is a hard register which is set or killed in
2288 	       some other way, we can't do this optimization.  */
2289 	    else if (sregno < FIRST_PSEUDO_REGISTER && dead_or_set_p (p, src))
2290 	      break;
2291 	  }
2292       }
2293 }
2294 
2295 
2296 
2297 /* Return nonzero if REGNO is a particularly bad choice for reloading X.  */
2298 static bool
ira_bad_reload_regno_1(int regno,rtx x)2299 ira_bad_reload_regno_1 (int regno, rtx x)
2300 {
2301   int x_regno, n, i;
2302   ira_allocno_t a;
2303   enum reg_class pref;
2304 
2305   /* We only deal with pseudo regs.  */
2306   if (! x || GET_CODE (x) != REG)
2307     return false;
2308 
2309   x_regno = REGNO (x);
2310   if (x_regno < FIRST_PSEUDO_REGISTER)
2311     return false;
2312 
2313   /* If the pseudo prefers REGNO explicitly, then do not consider
2314      REGNO a bad spill choice.  */
2315   pref = reg_preferred_class (x_regno);
2316   if (reg_class_size[pref] == 1)
2317     return !TEST_HARD_REG_BIT (reg_class_contents[pref], regno);
2318 
2319   /* If the pseudo conflicts with REGNO, then we consider REGNO a
2320      poor choice for a reload regno.  */
2321   a = ira_regno_allocno_map[x_regno];
2322   n = ALLOCNO_NUM_OBJECTS (a);
2323   for (i = 0; i < n; i++)
2324     {
2325       ira_object_t obj = ALLOCNO_OBJECT (a, i);
2326       if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
2327 	return true;
2328     }
2329   return false;
2330 }
2331 
2332 /* Return nonzero if REGNO is a particularly bad choice for reloading
2333    IN or OUT.  */
2334 bool
ira_bad_reload_regno(int regno,rtx in,rtx out)2335 ira_bad_reload_regno (int regno, rtx in, rtx out)
2336 {
2337   return (ira_bad_reload_regno_1 (regno, in)
2338 	  || ira_bad_reload_regno_1 (regno, out));
2339 }
2340 
2341 /* Add register clobbers from asm statements.  */
2342 static void
compute_regs_asm_clobbered(void)2343 compute_regs_asm_clobbered (void)
2344 {
2345   basic_block bb;
2346 
2347   FOR_EACH_BB_FN (bb, cfun)
2348     {
2349       rtx_insn *insn;
2350       FOR_BB_INSNS_REVERSE (bb, insn)
2351 	{
2352 	  df_ref def;
2353 
2354 	  if (NONDEBUG_INSN_P (insn) && asm_noperands (PATTERN (insn)) >= 0)
2355 	    FOR_EACH_INSN_DEF (def, insn)
2356 	      {
2357 		unsigned int dregno = DF_REF_REGNO (def);
2358 		if (HARD_REGISTER_NUM_P (dregno))
2359 		  add_to_hard_reg_set (&crtl->asm_clobbers,
2360 				       GET_MODE (DF_REF_REAL_REG (def)),
2361 				       dregno);
2362 	      }
2363 	}
2364     }
2365 }
2366 
2367 
2368 /* Set up ELIMINABLE_REGSET, IRA_NO_ALLOC_REGS, and
2369    REGS_EVER_LIVE.  */
2370 void
ira_setup_eliminable_regset(void)2371 ira_setup_eliminable_regset (void)
2372 {
2373   int i;
2374   static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS;
2375   int fp_reg_count = hard_regno_nregs (HARD_FRAME_POINTER_REGNUM, Pmode);
2376 
2377   /* Setup is_leaf as frame_pointer_required may use it.  This function
2378      is called by sched_init before ira if scheduling is enabled.  */
2379   crtl->is_leaf = leaf_function_p ();
2380 
2381   /* FIXME: If EXIT_IGNORE_STACK is set, we will not save and restore
2382      sp for alloca.  So we can't eliminate the frame pointer in that
2383      case.  At some point, we should improve this by emitting the
2384      sp-adjusting insns for this case.  */
2385   frame_pointer_needed
2386     = (! flag_omit_frame_pointer
2387        || (cfun->calls_alloca && EXIT_IGNORE_STACK)
2388        /* We need the frame pointer to catch stack overflow exceptions if
2389 	  the stack pointer is moving (as for the alloca case just above).  */
2390        || (STACK_CHECK_MOVING_SP
2391 	   && flag_stack_check
2392 	   && flag_exceptions
2393 	   && cfun->can_throw_non_call_exceptions)
2394        || crtl->accesses_prior_frames
2395        || (SUPPORTS_STACK_ALIGNMENT && crtl->stack_realign_needed)
2396        || targetm.frame_pointer_required ());
2397 
2398     /* The chance that FRAME_POINTER_NEEDED is changed from inspecting
2399        RTL is very small.  So if we use frame pointer for RA and RTL
2400        actually prevents this, we will spill pseudos assigned to the
2401        frame pointer in LRA.  */
2402 
2403   if (frame_pointer_needed)
2404     for (i = 0; i < fp_reg_count; i++)
2405       df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true);
2406 
2407   ira_no_alloc_regs = no_unit_alloc_regs;
2408   CLEAR_HARD_REG_SET (eliminable_regset);
2409 
2410   compute_regs_asm_clobbered ();
2411 
2412   /* Build the regset of all eliminable registers and show we can't
2413      use those that we already know won't be eliminated.  */
2414   for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++)
2415     {
2416       bool cannot_elim
2417 	= (! targetm.can_eliminate (eliminables[i].from, eliminables[i].to)
2418 	   || (eliminables[i].to == STACK_POINTER_REGNUM && frame_pointer_needed));
2419 
2420       if (!TEST_HARD_REG_BIT (crtl->asm_clobbers, eliminables[i].from))
2421 	{
2422 	    SET_HARD_REG_BIT (eliminable_regset, eliminables[i].from);
2423 
2424 	    if (cannot_elim)
2425 	      SET_HARD_REG_BIT (ira_no_alloc_regs, eliminables[i].from);
2426 	}
2427       else if (cannot_elim)
2428 	error ("%s cannot be used in %<asm%> here",
2429 	       reg_names[eliminables[i].from]);
2430       else
2431 	df_set_regs_ever_live (eliminables[i].from, true);
2432     }
2433   if (!HARD_FRAME_POINTER_IS_FRAME_POINTER)
2434     {
2435       for (i = 0; i < fp_reg_count; i++)
2436 	if (global_regs[HARD_FRAME_POINTER_REGNUM + i])
2437 	  /* Nothing to do: the register is already treated as live
2438 	     where appropriate, and cannot be eliminated.  */
2439 	  ;
2440 	else if (!TEST_HARD_REG_BIT (crtl->asm_clobbers,
2441 				     HARD_FRAME_POINTER_REGNUM + i))
2442 	  {
2443 	    SET_HARD_REG_BIT (eliminable_regset,
2444 			      HARD_FRAME_POINTER_REGNUM + i);
2445 	    if (frame_pointer_needed)
2446 	      SET_HARD_REG_BIT (ira_no_alloc_regs,
2447 				HARD_FRAME_POINTER_REGNUM + i);
2448 	  }
2449 	else if (frame_pointer_needed)
2450 	  error ("%s cannot be used in %<asm%> here",
2451 		 reg_names[HARD_FRAME_POINTER_REGNUM + i]);
2452 	else
2453 	  df_set_regs_ever_live (HARD_FRAME_POINTER_REGNUM + i, true);
2454     }
2455 }
2456 
2457 
2458 
2459 /* Vector of substitutions of register numbers,
2460    used to map pseudo regs into hardware regs.
2461    This is set up as a result of register allocation.
2462    Element N is the hard reg assigned to pseudo reg N,
2463    or is -1 if no hard reg was assigned.
2464    If N is a hard reg number, element N is N.  */
2465 short *reg_renumber;
2466 
2467 /* Set up REG_RENUMBER and CALLER_SAVE_NEEDED (used by reload) from
2468    the allocation found by IRA.  */
2469 static void
setup_reg_renumber(void)2470 setup_reg_renumber (void)
2471 {
2472   int regno, hard_regno;
2473   ira_allocno_t a;
2474   ira_allocno_iterator ai;
2475 
2476   caller_save_needed = 0;
2477   FOR_EACH_ALLOCNO (a, ai)
2478     {
2479       if (ira_use_lra_p && ALLOCNO_CAP_MEMBER (a) != NULL)
2480 	continue;
2481       /* There are no caps at this point.  */
2482       ira_assert (ALLOCNO_CAP_MEMBER (a) == NULL);
2483       if (! ALLOCNO_ASSIGNED_P (a))
2484 	/* It can happen if A is not referenced but partially anticipated
2485 	   somewhere in a region.  */
2486 	ALLOCNO_ASSIGNED_P (a) = true;
2487       ira_free_allocno_updated_costs (a);
2488       hard_regno = ALLOCNO_HARD_REGNO (a);
2489       regno = ALLOCNO_REGNO (a);
2490       reg_renumber[regno] = (hard_regno < 0 ? -1 : hard_regno);
2491       if (hard_regno >= 0)
2492 	{
2493 	  int i, nwords;
2494 	  enum reg_class pclass;
2495 	  ira_object_t obj;
2496 
2497 	  pclass = ira_pressure_class_translate[REGNO_REG_CLASS (hard_regno)];
2498 	  nwords = ALLOCNO_NUM_OBJECTS (a);
2499 	  for (i = 0; i < nwords; i++)
2500 	    {
2501 	      obj = ALLOCNO_OBJECT (a, i);
2502 	      OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
2503 		|= ~reg_class_contents[pclass];
2504 	    }
2505 	  if (ira_need_caller_save_p (a, hard_regno))
2506 	    {
2507 	      ira_assert (!optimize || flag_caller_saves
2508 			  || (ALLOCNO_CALLS_CROSSED_NUM (a)
2509 			      == ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2510 			  || regno >= ira_reg_equiv_len
2511 			  || ira_equiv_no_lvalue_p (regno));
2512 	      caller_save_needed = 1;
2513 	    }
2514 	}
2515     }
2516 }
2517 
2518 /* Set up allocno assignment flags for further allocation
2519    improvements.  */
2520 static void
setup_allocno_assignment_flags(void)2521 setup_allocno_assignment_flags (void)
2522 {
2523   int hard_regno;
2524   ira_allocno_t a;
2525   ira_allocno_iterator ai;
2526 
2527   FOR_EACH_ALLOCNO (a, ai)
2528     {
2529       if (! ALLOCNO_ASSIGNED_P (a))
2530 	/* It can happen if A is not referenced but partially anticipated
2531 	   somewhere in a region.  */
2532 	ira_free_allocno_updated_costs (a);
2533       hard_regno = ALLOCNO_HARD_REGNO (a);
2534       /* Don't assign hard registers to allocnos which are destination
2535 	 of removed store at the end of loop.  It has no sense to keep
2536 	 the same value in different hard registers.  It is also
2537 	 impossible to assign hard registers correctly to such
2538 	 allocnos because the cost info and info about intersected
2539 	 calls are incorrect for them.  */
2540       ALLOCNO_ASSIGNED_P (a) = (hard_regno >= 0
2541 				|| ALLOCNO_EMIT_DATA (a)->mem_optimized_dest_p
2542 				|| (ALLOCNO_MEMORY_COST (a)
2543 				    - ALLOCNO_CLASS_COST (a)) < 0);
2544       ira_assert
2545 	(hard_regno < 0
2546 	 || ira_hard_reg_in_set_p (hard_regno, ALLOCNO_MODE (a),
2547 				   reg_class_contents[ALLOCNO_CLASS (a)]));
2548     }
2549 }
2550 
2551 /* Evaluate overall allocation cost and the costs for using hard
2552    registers and memory for allocnos.  */
2553 static void
calculate_allocation_cost(void)2554 calculate_allocation_cost (void)
2555 {
2556   int hard_regno, cost;
2557   ira_allocno_t a;
2558   ira_allocno_iterator ai;
2559 
2560   ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
2561   FOR_EACH_ALLOCNO (a, ai)
2562     {
2563       hard_regno = ALLOCNO_HARD_REGNO (a);
2564       ira_assert (hard_regno < 0
2565 		  || (ira_hard_reg_in_set_p
2566 		      (hard_regno, ALLOCNO_MODE (a),
2567 		       reg_class_contents[ALLOCNO_CLASS (a)])));
2568       if (hard_regno < 0)
2569 	{
2570 	  cost = ALLOCNO_MEMORY_COST (a);
2571 	  ira_mem_cost += cost;
2572 	}
2573       else if (ALLOCNO_HARD_REG_COSTS (a) != NULL)
2574 	{
2575 	  cost = (ALLOCNO_HARD_REG_COSTS (a)
2576 		  [ira_class_hard_reg_index
2577 		   [ALLOCNO_CLASS (a)][hard_regno]]);
2578 	  ira_reg_cost += cost;
2579 	}
2580       else
2581 	{
2582 	  cost = ALLOCNO_CLASS_COST (a);
2583 	  ira_reg_cost += cost;
2584 	}
2585       ira_overall_cost += cost;
2586     }
2587 
2588   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
2589     {
2590       fprintf (ira_dump_file,
2591 	       "+++Costs: overall %" PRId64
2592 	       ", reg %" PRId64
2593 	       ", mem %" PRId64
2594 	       ", ld %" PRId64
2595 	       ", st %" PRId64
2596 	       ", move %" PRId64,
2597 	       ira_overall_cost, ira_reg_cost, ira_mem_cost,
2598 	       ira_load_cost, ira_store_cost, ira_shuffle_cost);
2599       fprintf (ira_dump_file, "\n+++       move loops %d, new jumps %d\n",
2600 	       ira_move_loops_num, ira_additional_jumps_num);
2601     }
2602 
2603 }
2604 
2605 #ifdef ENABLE_IRA_CHECKING
2606 /* Check the correctness of the allocation.  We do need this because
2607    of complicated code to transform more one region internal
2608    representation into one region representation.  */
2609 static void
check_allocation(void)2610 check_allocation (void)
2611 {
2612   ira_allocno_t a;
2613   int hard_regno, nregs, conflict_nregs;
2614   ira_allocno_iterator ai;
2615 
2616   FOR_EACH_ALLOCNO (a, ai)
2617     {
2618       int n = ALLOCNO_NUM_OBJECTS (a);
2619       int i;
2620 
2621       if (ALLOCNO_CAP_MEMBER (a) != NULL
2622 	  || (hard_regno = ALLOCNO_HARD_REGNO (a)) < 0)
2623 	continue;
2624       nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (a));
2625       if (nregs == 1)
2626 	/* We allocated a single hard register.  */
2627 	n = 1;
2628       else if (n > 1)
2629 	/* We allocated multiple hard registers, and we will test
2630 	   conflicts in a granularity of single hard regs.  */
2631 	nregs = 1;
2632 
2633       for (i = 0; i < n; i++)
2634 	{
2635 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
2636 	  ira_object_t conflict_obj;
2637 	  ira_object_conflict_iterator oci;
2638 	  int this_regno = hard_regno;
2639 	  if (n > 1)
2640 	    {
2641 	      if (REG_WORDS_BIG_ENDIAN)
2642 		this_regno += n - i - 1;
2643 	      else
2644 		this_regno += i;
2645 	    }
2646 	  FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
2647 	    {
2648 	      ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
2649 	      int conflict_hard_regno = ALLOCNO_HARD_REGNO (conflict_a);
2650 	      if (conflict_hard_regno < 0)
2651 		continue;
2652 
2653 	      conflict_nregs = hard_regno_nregs (conflict_hard_regno,
2654 						 ALLOCNO_MODE (conflict_a));
2655 
2656 	      if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1
2657 		  && conflict_nregs == ALLOCNO_NUM_OBJECTS (conflict_a))
2658 		{
2659 		  if (REG_WORDS_BIG_ENDIAN)
2660 		    conflict_hard_regno += (ALLOCNO_NUM_OBJECTS (conflict_a)
2661 					    - OBJECT_SUBWORD (conflict_obj) - 1);
2662 		  else
2663 		    conflict_hard_regno += OBJECT_SUBWORD (conflict_obj);
2664 		  conflict_nregs = 1;
2665 		}
2666 
2667 	      if ((conflict_hard_regno <= this_regno
2668 		 && this_regno < conflict_hard_regno + conflict_nregs)
2669 		|| (this_regno <= conflict_hard_regno
2670 		    && conflict_hard_regno < this_regno + nregs))
2671 		{
2672 		  fprintf (stderr, "bad allocation for %d and %d\n",
2673 			   ALLOCNO_REGNO (a), ALLOCNO_REGNO (conflict_a));
2674 		  gcc_unreachable ();
2675 		}
2676 	    }
2677 	}
2678     }
2679 }
2680 #endif
2681 
2682 /* Allocate REG_EQUIV_INIT.  Set up it from IRA_REG_EQUIV which should
2683    be already calculated.  */
2684 static void
setup_reg_equiv_init(void)2685 setup_reg_equiv_init (void)
2686 {
2687   int i;
2688   int max_regno = max_reg_num ();
2689 
2690   for (i = 0; i < max_regno; i++)
2691     reg_equiv_init (i) = ira_reg_equiv[i].init_insns;
2692 }
2693 
2694 /* Update equiv regno from movement of FROM_REGNO to TO_REGNO.  INSNS
2695    are insns which were generated for such movement.  It is assumed
2696    that FROM_REGNO and TO_REGNO always have the same value at the
2697    point of any move containing such registers. This function is used
2698    to update equiv info for register shuffles on the region borders
2699    and for caller save/restore insns.  */
2700 void
ira_update_equiv_info_by_shuffle_insn(int to_regno,int from_regno,rtx_insn * insns)2701 ira_update_equiv_info_by_shuffle_insn (int to_regno, int from_regno, rtx_insn *insns)
2702 {
2703   rtx_insn *insn;
2704   rtx x, note;
2705 
2706   if (! ira_reg_equiv[from_regno].defined_p
2707       && (! ira_reg_equiv[to_regno].defined_p
2708 	  || ((x = ira_reg_equiv[to_regno].memory) != NULL_RTX
2709 	      && ! MEM_READONLY_P (x))))
2710     return;
2711   insn = insns;
2712   if (NEXT_INSN (insn) != NULL_RTX)
2713     {
2714       if (! ira_reg_equiv[to_regno].defined_p)
2715 	{
2716 	  ira_assert (ira_reg_equiv[to_regno].init_insns == NULL_RTX);
2717 	  return;
2718 	}
2719       ira_reg_equiv[to_regno].defined_p = false;
2720       ira_reg_equiv[to_regno].memory
2721 	= ira_reg_equiv[to_regno].constant
2722 	= ira_reg_equiv[to_regno].invariant
2723 	= ira_reg_equiv[to_regno].init_insns = NULL;
2724       if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2725 	fprintf (ira_dump_file,
2726 		 "      Invalidating equiv info for reg %d\n", to_regno);
2727       return;
2728     }
2729   /* It is possible that FROM_REGNO still has no equivalence because
2730      in shuffles to_regno<-from_regno and from_regno<-to_regno the 2nd
2731      insn was not processed yet.  */
2732   if (ira_reg_equiv[from_regno].defined_p)
2733     {
2734       ira_reg_equiv[to_regno].defined_p = true;
2735       if ((x = ira_reg_equiv[from_regno].memory) != NULL_RTX)
2736 	{
2737 	  ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX
2738 		      && ira_reg_equiv[from_regno].constant == NULL_RTX);
2739 	  ira_assert (ira_reg_equiv[to_regno].memory == NULL_RTX
2740 		      || rtx_equal_p (ira_reg_equiv[to_regno].memory, x));
2741 	  ira_reg_equiv[to_regno].memory = x;
2742 	  if (! MEM_READONLY_P (x))
2743 	    /* We don't add the insn to insn init list because memory
2744 	       equivalence is just to say what memory is better to use
2745 	       when the pseudo is spilled.  */
2746 	    return;
2747 	}
2748       else if ((x = ira_reg_equiv[from_regno].constant) != NULL_RTX)
2749 	{
2750 	  ira_assert (ira_reg_equiv[from_regno].invariant == NULL_RTX);
2751 	  ira_assert (ira_reg_equiv[to_regno].constant == NULL_RTX
2752 		      || rtx_equal_p (ira_reg_equiv[to_regno].constant, x));
2753 	  ira_reg_equiv[to_regno].constant = x;
2754 	}
2755       else
2756 	{
2757 	  x = ira_reg_equiv[from_regno].invariant;
2758 	  ira_assert (x != NULL_RTX);
2759 	  ira_assert (ira_reg_equiv[to_regno].invariant == NULL_RTX
2760 		      || rtx_equal_p (ira_reg_equiv[to_regno].invariant, x));
2761 	  ira_reg_equiv[to_regno].invariant = x;
2762 	}
2763       if (find_reg_note (insn, REG_EQUIV, x) == NULL_RTX)
2764 	{
2765 	  note = set_unique_reg_note (insn, REG_EQUIV, copy_rtx (x));
2766 	  gcc_assert (note != NULL_RTX);
2767 	  if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2768 	    {
2769 	      fprintf (ira_dump_file,
2770 		       "      Adding equiv note to insn %u for reg %d ",
2771 		       INSN_UID (insn), to_regno);
2772 	      dump_value_slim (ira_dump_file, x, 1);
2773 	      fprintf (ira_dump_file, "\n");
2774 	    }
2775 	}
2776     }
2777   ira_reg_equiv[to_regno].init_insns
2778     = gen_rtx_INSN_LIST (VOIDmode, insn,
2779 			 ira_reg_equiv[to_regno].init_insns);
2780   if (internal_flag_ira_verbose > 3 && ira_dump_file != NULL)
2781     fprintf (ira_dump_file,
2782 	     "      Adding equiv init move insn %u to reg %d\n",
2783 	     INSN_UID (insn), to_regno);
2784 }
2785 
2786 /* Fix values of array REG_EQUIV_INIT after live range splitting done
2787    by IRA.  */
2788 static void
fix_reg_equiv_init(void)2789 fix_reg_equiv_init (void)
2790 {
2791   int max_regno = max_reg_num ();
2792   int i, new_regno, max;
2793   rtx set;
2794   rtx_insn_list *x, *next, *prev;
2795   rtx_insn *insn;
2796 
2797   if (max_regno_before_ira < max_regno)
2798     {
2799       max = vec_safe_length (reg_equivs);
2800       grow_reg_equivs ();
2801       for (i = FIRST_PSEUDO_REGISTER; i < max; i++)
2802 	for (prev = NULL, x = reg_equiv_init (i);
2803 	     x != NULL_RTX;
2804 	     x = next)
2805 	  {
2806 	    next = x->next ();
2807 	    insn = x->insn ();
2808 	    set = single_set (insn);
2809 	    ira_assert (set != NULL_RTX
2810 			&& (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))));
2811 	    if (REG_P (SET_DEST (set))
2812 		&& ((int) REGNO (SET_DEST (set)) == i
2813 		    || (int) ORIGINAL_REGNO (SET_DEST (set)) == i))
2814 	      new_regno = REGNO (SET_DEST (set));
2815 	    else if (REG_P (SET_SRC (set))
2816 		     && ((int) REGNO (SET_SRC (set)) == i
2817 			 || (int) ORIGINAL_REGNO (SET_SRC (set)) == i))
2818 	      new_regno = REGNO (SET_SRC (set));
2819 	    else
2820  	      gcc_unreachable ();
2821 	    if (new_regno == i)
2822 	      prev = x;
2823 	    else
2824 	      {
2825 		/* Remove the wrong list element.  */
2826 		if (prev == NULL_RTX)
2827 		  reg_equiv_init (i) = next;
2828 		else
2829 		  XEXP (prev, 1) = next;
2830 		XEXP (x, 1) = reg_equiv_init (new_regno);
2831 		reg_equiv_init (new_regno) = x;
2832 	      }
2833 	  }
2834     }
2835 }
2836 
2837 #ifdef ENABLE_IRA_CHECKING
2838 /* Print redundant memory-memory copies.  */
2839 static void
print_redundant_copies(void)2840 print_redundant_copies (void)
2841 {
2842   int hard_regno;
2843   ira_allocno_t a;
2844   ira_copy_t cp, next_cp;
2845   ira_allocno_iterator ai;
2846 
2847   FOR_EACH_ALLOCNO (a, ai)
2848     {
2849       if (ALLOCNO_CAP_MEMBER (a) != NULL)
2850 	/* It is a cap.  */
2851 	continue;
2852       hard_regno = ALLOCNO_HARD_REGNO (a);
2853       if (hard_regno >= 0)
2854 	continue;
2855       for (cp = ALLOCNO_COPIES (a); cp != NULL; cp = next_cp)
2856 	if (cp->first == a)
2857 	  next_cp = cp->next_first_allocno_copy;
2858 	else
2859 	  {
2860 	    next_cp = cp->next_second_allocno_copy;
2861 	    if (internal_flag_ira_verbose > 4 && ira_dump_file != NULL
2862 		&& cp->insn != NULL_RTX
2863 		&& ALLOCNO_HARD_REGNO (cp->first) == hard_regno)
2864 	      fprintf (ira_dump_file,
2865 		       "        Redundant move from %d(freq %d):%d\n",
2866 		       INSN_UID (cp->insn), cp->freq, hard_regno);
2867 	  }
2868     }
2869 }
2870 #endif
2871 
2872 /* Setup preferred and alternative classes for new pseudo-registers
2873    created by IRA starting with START.  */
2874 static void
setup_preferred_alternate_classes_for_new_pseudos(int start)2875 setup_preferred_alternate_classes_for_new_pseudos (int start)
2876 {
2877   int i, old_regno;
2878   int max_regno = max_reg_num ();
2879 
2880   for (i = start; i < max_regno; i++)
2881     {
2882       old_regno = ORIGINAL_REGNO (regno_reg_rtx[i]);
2883       ira_assert (i != old_regno);
2884       setup_reg_classes (i, reg_preferred_class (old_regno),
2885 			 reg_alternate_class (old_regno),
2886 			 reg_allocno_class (old_regno));
2887       if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
2888 	fprintf (ira_dump_file,
2889 		 "    New r%d: setting preferred %s, alternative %s\n",
2890 		 i, reg_class_names[reg_preferred_class (old_regno)],
2891 		 reg_class_names[reg_alternate_class (old_regno)]);
2892     }
2893 }
2894 
2895 
2896 /* The number of entries allocated in reg_info.  */
2897 static int allocated_reg_info_size;
2898 
2899 /* Regional allocation can create new pseudo-registers.  This function
2900    expands some arrays for pseudo-registers.  */
2901 static void
expand_reg_info(void)2902 expand_reg_info (void)
2903 {
2904   int i;
2905   int size = max_reg_num ();
2906 
2907   resize_reg_info ();
2908   for (i = allocated_reg_info_size; i < size; i++)
2909     setup_reg_classes (i, GENERAL_REGS, ALL_REGS, GENERAL_REGS);
2910   setup_preferred_alternate_classes_for_new_pseudos (allocated_reg_info_size);
2911   allocated_reg_info_size = size;
2912 }
2913 
2914 /* Return TRUE if there is too high register pressure in the function.
2915    It is used to decide when stack slot sharing is worth to do.  */
2916 static bool
too_high_register_pressure_p(void)2917 too_high_register_pressure_p (void)
2918 {
2919   int i;
2920   enum reg_class pclass;
2921 
2922   for (i = 0; i < ira_pressure_classes_num; i++)
2923     {
2924       pclass = ira_pressure_classes[i];
2925       if (ira_loop_tree_root->reg_pressure[pclass] > 10000)
2926 	return true;
2927     }
2928   return false;
2929 }
2930 
2931 
2932 
2933 /* Indicate that hard register number FROM was eliminated and replaced with
2934    an offset from hard register number TO.  The status of hard registers live
2935    at the start of a basic block is updated by replacing a use of FROM with
2936    a use of TO.  */
2937 
2938 void
mark_elimination(int from,int to)2939 mark_elimination (int from, int to)
2940 {
2941   basic_block bb;
2942   bitmap r;
2943 
2944   FOR_EACH_BB_FN (bb, cfun)
2945     {
2946       r = DF_LR_IN (bb);
2947       if (bitmap_bit_p (r, from))
2948 	{
2949 	  bitmap_clear_bit (r, from);
2950 	  bitmap_set_bit (r, to);
2951 	}
2952       if (! df_live)
2953         continue;
2954       r = DF_LIVE_IN (bb);
2955       if (bitmap_bit_p (r, from))
2956 	{
2957 	  bitmap_clear_bit (r, from);
2958 	  bitmap_set_bit (r, to);
2959 	}
2960     }
2961 }
2962 
2963 
2964 
2965 /* The length of the following array.  */
2966 int ira_reg_equiv_len;
2967 
2968 /* Info about equiv. info for each register.  */
2969 struct ira_reg_equiv_s *ira_reg_equiv;
2970 
2971 /* Expand ira_reg_equiv if necessary.  */
2972 void
ira_expand_reg_equiv(void)2973 ira_expand_reg_equiv (void)
2974 {
2975   int old = ira_reg_equiv_len;
2976 
2977   if (ira_reg_equiv_len > max_reg_num ())
2978     return;
2979   ira_reg_equiv_len = max_reg_num () * 3 / 2 + 1;
2980   ira_reg_equiv
2981     = (struct ira_reg_equiv_s *) xrealloc (ira_reg_equiv,
2982 					 ira_reg_equiv_len
2983 					 * sizeof (struct ira_reg_equiv_s));
2984   gcc_assert (old < ira_reg_equiv_len);
2985   memset (ira_reg_equiv + old, 0,
2986 	  sizeof (struct ira_reg_equiv_s) * (ira_reg_equiv_len - old));
2987 }
2988 
2989 static void
init_reg_equiv(void)2990 init_reg_equiv (void)
2991 {
2992   ira_reg_equiv_len = 0;
2993   ira_reg_equiv = NULL;
2994   ira_expand_reg_equiv ();
2995 }
2996 
2997 static void
finish_reg_equiv(void)2998 finish_reg_equiv (void)
2999 {
3000   free (ira_reg_equiv);
3001 }
3002 
3003 
3004 
3005 struct equivalence
3006 {
3007   /* Set when a REG_EQUIV note is found or created.  Use to
3008      keep track of what memory accesses might be created later,
3009      e.g. by reload.  */
3010   rtx replacement;
3011   rtx *src_p;
3012 
3013   /* The list of each instruction which initializes this register.
3014 
3015      NULL indicates we know nothing about this register's equivalence
3016      properties.
3017 
3018      An INSN_LIST with a NULL insn indicates this pseudo is already
3019      known to not have a valid equivalence.  */
3020   rtx_insn_list *init_insns;
3021 
3022   /* Loop depth is used to recognize equivalences which appear
3023      to be present within the same loop (or in an inner loop).  */
3024   short loop_depth;
3025   /* Nonzero if this had a preexisting REG_EQUIV note.  */
3026   unsigned char is_arg_equivalence : 1;
3027   /* Set when an attempt should be made to replace a register
3028      with the associated src_p entry.  */
3029   unsigned char replace : 1;
3030   /* Set if this register has no known equivalence.  */
3031   unsigned char no_equiv : 1;
3032   /* Set if this register is mentioned in a paradoxical subreg.  */
3033   unsigned char pdx_subregs : 1;
3034 };
3035 
3036 /* reg_equiv[N] (where N is a pseudo reg number) is the equivalence
3037    structure for that register.  */
3038 static struct equivalence *reg_equiv;
3039 
3040 /* Used for communication between the following two functions.  */
3041 struct equiv_mem_data
3042 {
3043   /* A MEM that we wish to ensure remains unchanged.  */
3044   rtx equiv_mem;
3045 
3046   /* Set true if EQUIV_MEM is modified.  */
3047   bool equiv_mem_modified;
3048 };
3049 
3050 /* If EQUIV_MEM is modified by modifying DEST, indicate that it is modified.
3051    Called via note_stores.  */
3052 static void
validate_equiv_mem_from_store(rtx dest,const_rtx set ATTRIBUTE_UNUSED,void * data)3053 validate_equiv_mem_from_store (rtx dest, const_rtx set ATTRIBUTE_UNUSED,
3054 			       void *data)
3055 {
3056   struct equiv_mem_data *info = (struct equiv_mem_data *) data;
3057 
3058   if ((REG_P (dest)
3059        && reg_overlap_mentioned_p (dest, info->equiv_mem))
3060       || (MEM_P (dest)
3061 	  && anti_dependence (info->equiv_mem, dest)))
3062     info->equiv_mem_modified = true;
3063 }
3064 
3065 enum valid_equiv { valid_none, valid_combine, valid_reload };
3066 
3067 /* Verify that no store between START and the death of REG invalidates
3068    MEMREF.  MEMREF is invalidated by modifying a register used in MEMREF,
3069    by storing into an overlapping memory location, or with a non-const
3070    CALL_INSN.
3071 
3072    Return VALID_RELOAD if MEMREF remains valid for both reload and
3073    combine_and_move insns, VALID_COMBINE if only valid for
3074    combine_and_move_insns, and VALID_NONE otherwise.  */
3075 static enum valid_equiv
validate_equiv_mem(rtx_insn * start,rtx reg,rtx memref)3076 validate_equiv_mem (rtx_insn *start, rtx reg, rtx memref)
3077 {
3078   rtx_insn *insn;
3079   rtx note;
3080   struct equiv_mem_data info = { memref, false };
3081   enum valid_equiv ret = valid_reload;
3082 
3083   /* If the memory reference has side effects or is volatile, it isn't a
3084      valid equivalence.  */
3085   if (side_effects_p (memref))
3086     return valid_none;
3087 
3088   for (insn = start; insn; insn = NEXT_INSN (insn))
3089     {
3090       if (!INSN_P (insn))
3091 	continue;
3092 
3093       if (find_reg_note (insn, REG_DEAD, reg))
3094 	return ret;
3095 
3096       if (CALL_P (insn))
3097 	{
3098 	  /* We can combine a reg def from one insn into a reg use in
3099 	     another over a call if the memory is readonly or the call
3100 	     const/pure.  However, we can't set reg_equiv notes up for
3101 	     reload over any call.  The problem is the equivalent form
3102 	     may reference a pseudo which gets assigned a call
3103 	     clobbered hard reg.  When we later replace REG with its
3104 	     equivalent form, the value in the call-clobbered reg has
3105 	     been changed and all hell breaks loose.  */
3106 	  ret = valid_combine;
3107 	  if (!MEM_READONLY_P (memref)
3108 	      && !RTL_CONST_OR_PURE_CALL_P (insn))
3109 	    return valid_none;
3110 	}
3111 
3112       note_stores (insn, validate_equiv_mem_from_store, &info);
3113       if (info.equiv_mem_modified)
3114 	return valid_none;
3115 
3116       /* If a register mentioned in MEMREF is modified via an
3117 	 auto-increment, we lose the equivalence.  Do the same if one
3118 	 dies; although we could extend the life, it doesn't seem worth
3119 	 the trouble.  */
3120 
3121       for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3122 	if ((REG_NOTE_KIND (note) == REG_INC
3123 	     || REG_NOTE_KIND (note) == REG_DEAD)
3124 	    && REG_P (XEXP (note, 0))
3125 	    && reg_overlap_mentioned_p (XEXP (note, 0), memref))
3126 	  return valid_none;
3127     }
3128 
3129   return valid_none;
3130 }
3131 
3132 /* Returns zero if X is known to be invariant.  */
3133 static int
equiv_init_varies_p(rtx x)3134 equiv_init_varies_p (rtx x)
3135 {
3136   RTX_CODE code = GET_CODE (x);
3137   int i;
3138   const char *fmt;
3139 
3140   switch (code)
3141     {
3142     case MEM:
3143       return !MEM_READONLY_P (x) || equiv_init_varies_p (XEXP (x, 0));
3144 
3145     case CONST:
3146     CASE_CONST_ANY:
3147     case SYMBOL_REF:
3148     case LABEL_REF:
3149       return 0;
3150 
3151     case REG:
3152       return reg_equiv[REGNO (x)].replace == 0 && rtx_varies_p (x, 0);
3153 
3154     case ASM_OPERANDS:
3155       if (MEM_VOLATILE_P (x))
3156 	return 1;
3157 
3158       /* Fall through.  */
3159 
3160     default:
3161       break;
3162     }
3163 
3164   fmt = GET_RTX_FORMAT (code);
3165   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3166     if (fmt[i] == 'e')
3167       {
3168 	if (equiv_init_varies_p (XEXP (x, i)))
3169 	  return 1;
3170       }
3171     else if (fmt[i] == 'E')
3172       {
3173 	int j;
3174 	for (j = 0; j < XVECLEN (x, i); j++)
3175 	  if (equiv_init_varies_p (XVECEXP (x, i, j)))
3176 	    return 1;
3177       }
3178 
3179   return 0;
3180 }
3181 
3182 /* Returns nonzero if X (used to initialize register REGNO) is movable.
3183    X is only movable if the registers it uses have equivalent initializations
3184    which appear to be within the same loop (or in an inner loop) and movable
3185    or if they are not candidates for local_alloc and don't vary.  */
3186 static int
equiv_init_movable_p(rtx x,int regno)3187 equiv_init_movable_p (rtx x, int regno)
3188 {
3189   int i, j;
3190   const char *fmt;
3191   enum rtx_code code = GET_CODE (x);
3192 
3193   switch (code)
3194     {
3195     case SET:
3196       return equiv_init_movable_p (SET_SRC (x), regno);
3197 
3198     case CLOBBER:
3199       return 0;
3200 
3201     case PRE_INC:
3202     case PRE_DEC:
3203     case POST_INC:
3204     case POST_DEC:
3205     case PRE_MODIFY:
3206     case POST_MODIFY:
3207       return 0;
3208 
3209     case REG:
3210       return ((reg_equiv[REGNO (x)].loop_depth >= reg_equiv[regno].loop_depth
3211 	       && reg_equiv[REGNO (x)].replace)
3212 	      || (REG_BASIC_BLOCK (REGNO (x)) < NUM_FIXED_BLOCKS
3213 		  && ! rtx_varies_p (x, 0)));
3214 
3215     case UNSPEC_VOLATILE:
3216       return 0;
3217 
3218     case ASM_OPERANDS:
3219       if (MEM_VOLATILE_P (x))
3220 	return 0;
3221 
3222       /* Fall through.  */
3223 
3224     default:
3225       break;
3226     }
3227 
3228   fmt = GET_RTX_FORMAT (code);
3229   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3230     switch (fmt[i])
3231       {
3232       case 'e':
3233 	if (! equiv_init_movable_p (XEXP (x, i), regno))
3234 	  return 0;
3235 	break;
3236       case 'E':
3237 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3238 	  if (! equiv_init_movable_p (XVECEXP (x, i, j), regno))
3239 	    return 0;
3240 	break;
3241       }
3242 
3243   return 1;
3244 }
3245 
3246 static bool memref_referenced_p (rtx memref, rtx x, bool read_p);
3247 
3248 /* Auxiliary function for memref_referenced_p.  Process setting X for
3249    MEMREF store.  */
3250 static bool
process_set_for_memref_referenced_p(rtx memref,rtx x)3251 process_set_for_memref_referenced_p (rtx memref, rtx x)
3252 {
3253   /* If we are setting a MEM, it doesn't count (its address does), but any
3254      other SET_DEST that has a MEM in it is referencing the MEM.  */
3255   if (MEM_P (x))
3256     {
3257       if (memref_referenced_p (memref, XEXP (x, 0), true))
3258 	return true;
3259     }
3260   else if (memref_referenced_p (memref, x, false))
3261     return true;
3262 
3263   return false;
3264 }
3265 
3266 /* TRUE if X references a memory location (as a read if READ_P) that
3267    would be affected by a store to MEMREF.  */
3268 static bool
memref_referenced_p(rtx memref,rtx x,bool read_p)3269 memref_referenced_p (rtx memref, rtx x, bool read_p)
3270 {
3271   int i, j;
3272   const char *fmt;
3273   enum rtx_code code = GET_CODE (x);
3274 
3275   switch (code)
3276     {
3277     case CONST:
3278     case LABEL_REF:
3279     case SYMBOL_REF:
3280     CASE_CONST_ANY:
3281     case PC:
3282     case HIGH:
3283     case LO_SUM:
3284       return false;
3285 
3286     case REG:
3287       return (reg_equiv[REGNO (x)].replacement
3288 	      && memref_referenced_p (memref,
3289 				      reg_equiv[REGNO (x)].replacement, read_p));
3290 
3291     case MEM:
3292       /* Memory X might have another effective type than MEMREF.  */
3293       if (read_p || true_dependence (memref, VOIDmode, x))
3294 	return true;
3295       break;
3296 
3297     case SET:
3298       if (process_set_for_memref_referenced_p (memref, SET_DEST (x)))
3299 	return true;
3300 
3301       return memref_referenced_p (memref, SET_SRC (x), true);
3302 
3303     case CLOBBER:
3304       if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
3305 	return true;
3306 
3307       return false;
3308 
3309     case PRE_DEC:
3310     case POST_DEC:
3311     case PRE_INC:
3312     case POST_INC:
3313       if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
3314 	return true;
3315 
3316       return memref_referenced_p (memref, XEXP (x, 0), true);
3317 
3318     case POST_MODIFY:
3319     case PRE_MODIFY:
3320       /* op0 = op0 + op1 */
3321       if (process_set_for_memref_referenced_p (memref, XEXP (x, 0)))
3322 	return true;
3323 
3324       if (memref_referenced_p (memref, XEXP (x, 0), true))
3325 	return true;
3326 
3327       return memref_referenced_p (memref, XEXP (x, 1), true);
3328 
3329     default:
3330       break;
3331     }
3332 
3333   fmt = GET_RTX_FORMAT (code);
3334   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3335     switch (fmt[i])
3336       {
3337       case 'e':
3338 	if (memref_referenced_p (memref, XEXP (x, i), read_p))
3339 	  return true;
3340 	break;
3341       case 'E':
3342 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3343 	  if (memref_referenced_p (memref, XVECEXP (x, i, j), read_p))
3344 	    return true;
3345 	break;
3346       }
3347 
3348   return false;
3349 }
3350 
3351 /* TRUE if some insn in the range (START, END] references a memory location
3352    that would be affected by a store to MEMREF.
3353 
3354    Callers should not call this routine if START is after END in the
3355    RTL chain.  */
3356 
3357 static int
memref_used_between_p(rtx memref,rtx_insn * start,rtx_insn * end)3358 memref_used_between_p (rtx memref, rtx_insn *start, rtx_insn *end)
3359 {
3360   rtx_insn *insn;
3361 
3362   for (insn = NEXT_INSN (start);
3363        insn && insn != NEXT_INSN (end);
3364        insn = NEXT_INSN (insn))
3365     {
3366       if (!NONDEBUG_INSN_P (insn))
3367 	continue;
3368 
3369       if (memref_referenced_p (memref, PATTERN (insn), false))
3370 	return 1;
3371 
3372       /* Nonconst functions may access memory.  */
3373       if (CALL_P (insn) && (! RTL_CONST_CALL_P (insn)))
3374 	return 1;
3375     }
3376 
3377   gcc_assert (insn == NEXT_INSN (end));
3378   return 0;
3379 }
3380 
3381 /* Mark REG as having no known equivalence.
3382    Some instructions might have been processed before and furnished
3383    with REG_EQUIV notes for this register; these notes will have to be
3384    removed.
3385    STORE is the piece of RTL that does the non-constant / conflicting
3386    assignment - a SET, CLOBBER or REG_INC note.  It is currently not used,
3387    but needs to be there because this function is called from note_stores.  */
3388 static void
no_equiv(rtx reg,const_rtx store ATTRIBUTE_UNUSED,void * data ATTRIBUTE_UNUSED)3389 no_equiv (rtx reg, const_rtx store ATTRIBUTE_UNUSED,
3390 	  void *data ATTRIBUTE_UNUSED)
3391 {
3392   int regno;
3393   rtx_insn_list *list;
3394 
3395   if (!REG_P (reg))
3396     return;
3397   regno = REGNO (reg);
3398   reg_equiv[regno].no_equiv = 1;
3399   list = reg_equiv[regno].init_insns;
3400   if (list && list->insn () == NULL)
3401     return;
3402   reg_equiv[regno].init_insns = gen_rtx_INSN_LIST (VOIDmode, NULL_RTX, NULL);
3403   reg_equiv[regno].replacement = NULL_RTX;
3404   /* This doesn't matter for equivalences made for argument registers, we
3405      should keep their initialization insns.  */
3406   if (reg_equiv[regno].is_arg_equivalence)
3407     return;
3408   ira_reg_equiv[regno].defined_p = false;
3409   ira_reg_equiv[regno].init_insns = NULL;
3410   for (; list; list = list->next ())
3411     {
3412       rtx_insn *insn = list->insn ();
3413       remove_note (insn, find_reg_note (insn, REG_EQUIV, NULL_RTX));
3414     }
3415 }
3416 
3417 /* Check whether the SUBREG is a paradoxical subreg and set the result
3418    in PDX_SUBREGS.  */
3419 
3420 static void
set_paradoxical_subreg(rtx_insn * insn)3421 set_paradoxical_subreg (rtx_insn *insn)
3422 {
3423   subrtx_iterator::array_type array;
3424   FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
3425     {
3426       const_rtx subreg = *iter;
3427       if (GET_CODE (subreg) == SUBREG)
3428 	{
3429 	  const_rtx reg = SUBREG_REG (subreg);
3430 	  if (REG_P (reg) && paradoxical_subreg_p (subreg))
3431 	    reg_equiv[REGNO (reg)].pdx_subregs = true;
3432 	}
3433     }
3434 }
3435 
3436 /* In DEBUG_INSN location adjust REGs from CLEARED_REGS bitmap to the
3437    equivalent replacement.  */
3438 
3439 static rtx
adjust_cleared_regs(rtx loc,const_rtx old_rtx ATTRIBUTE_UNUSED,void * data)3440 adjust_cleared_regs (rtx loc, const_rtx old_rtx ATTRIBUTE_UNUSED, void *data)
3441 {
3442   if (REG_P (loc))
3443     {
3444       bitmap cleared_regs = (bitmap) data;
3445       if (bitmap_bit_p (cleared_regs, REGNO (loc)))
3446 	return simplify_replace_fn_rtx (copy_rtx (*reg_equiv[REGNO (loc)].src_p),
3447 					NULL_RTX, adjust_cleared_regs, data);
3448     }
3449   return NULL_RTX;
3450 }
3451 
3452 /* Given register REGNO is set only once, return true if the defining
3453    insn dominates all uses.  */
3454 
3455 static bool
def_dominates_uses(int regno)3456 def_dominates_uses (int regno)
3457 {
3458   df_ref def = DF_REG_DEF_CHAIN (regno);
3459 
3460   struct df_insn_info *def_info = DF_REF_INSN_INFO (def);
3461   /* If this is an artificial def (eh handler regs, hard frame pointer
3462      for non-local goto, regs defined on function entry) then def_info
3463      is NULL and the reg is always live before any use.  We might
3464      reasonably return true in that case, but since the only call
3465      of this function is currently here in ira.c when we are looking
3466      at a defining insn we can't have an artificial def as that would
3467      bump DF_REG_DEF_COUNT.  */
3468   gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && def_info != NULL);
3469 
3470   rtx_insn *def_insn = DF_REF_INSN (def);
3471   basic_block def_bb = BLOCK_FOR_INSN (def_insn);
3472 
3473   for (df_ref use = DF_REG_USE_CHAIN (regno);
3474        use;
3475        use = DF_REF_NEXT_REG (use))
3476     {
3477       struct df_insn_info *use_info = DF_REF_INSN_INFO (use);
3478       /* Only check real uses, not artificial ones.  */
3479       if (use_info)
3480 	{
3481 	  rtx_insn *use_insn = DF_REF_INSN (use);
3482 	  if (!DEBUG_INSN_P (use_insn))
3483 	    {
3484 	      basic_block use_bb = BLOCK_FOR_INSN (use_insn);
3485 	      if (use_bb != def_bb
3486 		  ? !dominated_by_p (CDI_DOMINATORS, use_bb, def_bb)
3487 		  : DF_INSN_INFO_LUID (use_info) < DF_INSN_INFO_LUID (def_info))
3488 		return false;
3489 	    }
3490 	}
3491     }
3492   return true;
3493 }
3494 
3495 /* Scan the instructions before update_equiv_regs.  Record which registers
3496    are referenced as paradoxical subregs.  Also check for cases in which
3497    the current function needs to save a register that one of its call
3498    instructions clobbers.
3499 
3500    These things are logically unrelated, but it's more efficient to do
3501    them together.  */
3502 
3503 static void
update_equiv_regs_prescan(void)3504 update_equiv_regs_prescan (void)
3505 {
3506   basic_block bb;
3507   rtx_insn *insn;
3508   function_abi_aggregator callee_abis;
3509 
3510   FOR_EACH_BB_FN (bb, cfun)
3511     FOR_BB_INSNS (bb, insn)
3512       if (NONDEBUG_INSN_P (insn))
3513 	{
3514 	  set_paradoxical_subreg (insn);
3515 	  if (CALL_P (insn))
3516 	    callee_abis.note_callee_abi (insn_callee_abi (insn));
3517 	}
3518 
3519   HARD_REG_SET extra_caller_saves = callee_abis.caller_save_regs (*crtl->abi);
3520   if (!hard_reg_set_empty_p (extra_caller_saves))
3521     for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
3522       if (TEST_HARD_REG_BIT (extra_caller_saves, regno))
3523 	df_set_regs_ever_live (regno, true);
3524 }
3525 
3526 /* Find registers that are equivalent to a single value throughout the
3527    compilation (either because they can be referenced in memory or are
3528    set once from a single constant).  Lower their priority for a
3529    register.
3530 
3531    If such a register is only referenced once, try substituting its
3532    value into the using insn.  If it succeeds, we can eliminate the
3533    register completely.
3534 
3535    Initialize init_insns in ira_reg_equiv array.  */
3536 static void
update_equiv_regs(void)3537 update_equiv_regs (void)
3538 {
3539   rtx_insn *insn;
3540   basic_block bb;
3541 
3542   /* Scan the insns and find which registers have equivalences.  Do this
3543      in a separate scan of the insns because (due to -fcse-follow-jumps)
3544      a register can be set below its use.  */
3545   bitmap setjmp_crosses = regstat_get_setjmp_crosses ();
3546   FOR_EACH_BB_FN (bb, cfun)
3547     {
3548       int loop_depth = bb_loop_depth (bb);
3549 
3550       for (insn = BB_HEAD (bb);
3551 	   insn != NEXT_INSN (BB_END (bb));
3552 	   insn = NEXT_INSN (insn))
3553 	{
3554 	  rtx note;
3555 	  rtx set;
3556 	  rtx dest, src;
3557 	  int regno;
3558 
3559 	  if (! INSN_P (insn))
3560 	    continue;
3561 
3562 	  for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
3563 	    if (REG_NOTE_KIND (note) == REG_INC)
3564 	      no_equiv (XEXP (note, 0), note, NULL);
3565 
3566 	  set = single_set (insn);
3567 
3568 	  /* If this insn contains more (or less) than a single SET,
3569 	     only mark all destinations as having no known equivalence.  */
3570 	  if (set == NULL_RTX
3571 	      || side_effects_p (SET_SRC (set)))
3572 	    {
3573 	      note_pattern_stores (PATTERN (insn), no_equiv, NULL);
3574 	      continue;
3575 	    }
3576 	  else if (GET_CODE (PATTERN (insn)) == PARALLEL)
3577 	    {
3578 	      int i;
3579 
3580 	      for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
3581 		{
3582 		  rtx part = XVECEXP (PATTERN (insn), 0, i);
3583 		  if (part != set)
3584 		    note_pattern_stores (part, no_equiv, NULL);
3585 		}
3586 	    }
3587 
3588 	  dest = SET_DEST (set);
3589 	  src = SET_SRC (set);
3590 
3591 	  /* See if this is setting up the equivalence between an argument
3592 	     register and its stack slot.  */
3593 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3594 	  if (note)
3595 	    {
3596 	      gcc_assert (REG_P (dest));
3597 	      regno = REGNO (dest);
3598 
3599 	      /* Note that we don't want to clear init_insns in
3600 		 ira_reg_equiv even if there are multiple sets of this
3601 		 register.  */
3602 	      reg_equiv[regno].is_arg_equivalence = 1;
3603 
3604 	      /* The insn result can have equivalence memory although
3605 		 the equivalence is not set up by the insn.  We add
3606 		 this insn to init insns as it is a flag for now that
3607 		 regno has an equivalence.  We will remove the insn
3608 		 from init insn list later.  */
3609 	      if (rtx_equal_p (src, XEXP (note, 0)) || MEM_P (XEXP (note, 0)))
3610 		ira_reg_equiv[regno].init_insns
3611 		  = gen_rtx_INSN_LIST (VOIDmode, insn,
3612 				       ira_reg_equiv[regno].init_insns);
3613 
3614 	      /* Continue normally in case this is a candidate for
3615 		 replacements.  */
3616 	    }
3617 
3618 	  if (!optimize)
3619 	    continue;
3620 
3621 	  /* We only handle the case of a pseudo register being set
3622 	     once, or always to the same value.  */
3623 	  /* ??? The mn10200 port breaks if we add equivalences for
3624 	     values that need an ADDRESS_REGS register and set them equivalent
3625 	     to a MEM of a pseudo.  The actual problem is in the over-conservative
3626 	     handling of INPADDR_ADDRESS / INPUT_ADDRESS / INPUT triples in
3627 	     calculate_needs, but we traditionally work around this problem
3628 	     here by rejecting equivalences when the destination is in a register
3629 	     that's likely spilled.  This is fragile, of course, since the
3630 	     preferred class of a pseudo depends on all instructions that set
3631 	     or use it.  */
3632 
3633 	  if (!REG_P (dest)
3634 	      || (regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER
3635 	      || (reg_equiv[regno].init_insns
3636 		  && reg_equiv[regno].init_insns->insn () == NULL)
3637 	      || (targetm.class_likely_spilled_p (reg_preferred_class (regno))
3638 		  && MEM_P (src) && ! reg_equiv[regno].is_arg_equivalence))
3639 	    {
3640 	      /* This might be setting a SUBREG of a pseudo, a pseudo that is
3641 		 also set somewhere else to a constant.  */
3642 	      note_pattern_stores (set, no_equiv, NULL);
3643 	      continue;
3644 	    }
3645 
3646 	  /* Don't set reg mentioned in a paradoxical subreg
3647 	     equivalent to a mem.  */
3648 	  if (MEM_P (src) && reg_equiv[regno].pdx_subregs)
3649 	    {
3650 	      note_pattern_stores (set, no_equiv, NULL);
3651 	      continue;
3652 	    }
3653 
3654 	  note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
3655 
3656 	  /* cse sometimes generates function invariants, but doesn't put a
3657 	     REG_EQUAL note on the insn.  Since this note would be redundant,
3658 	     there's no point creating it earlier than here.  */
3659 	  if (! note && ! rtx_varies_p (src, 0))
3660 	    note = set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
3661 
3662 	  /* Don't bother considering a REG_EQUAL note containing an EXPR_LIST
3663 	     since it represents a function call.  */
3664 	  if (note && GET_CODE (XEXP (note, 0)) == EXPR_LIST)
3665 	    note = NULL_RTX;
3666 
3667 	  if (DF_REG_DEF_COUNT (regno) != 1)
3668 	    {
3669 	      bool equal_p = true;
3670 	      rtx_insn_list *list;
3671 
3672 	      /* If we have already processed this pseudo and determined it
3673 		 cannot have an equivalence, then honor that decision.  */
3674 	      if (reg_equiv[regno].no_equiv)
3675 		continue;
3676 
3677 	      if (! note
3678 		  || rtx_varies_p (XEXP (note, 0), 0)
3679 		  || (reg_equiv[regno].replacement
3680 		      && ! rtx_equal_p (XEXP (note, 0),
3681 					reg_equiv[regno].replacement)))
3682 		{
3683 		  no_equiv (dest, set, NULL);
3684 		  continue;
3685 		}
3686 
3687 	      list = reg_equiv[regno].init_insns;
3688 	      for (; list; list = list->next ())
3689 		{
3690 		  rtx note_tmp;
3691 		  rtx_insn *insn_tmp;
3692 
3693 		  insn_tmp = list->insn ();
3694 		  note_tmp = find_reg_note (insn_tmp, REG_EQUAL, NULL_RTX);
3695 		  gcc_assert (note_tmp);
3696 		  if (! rtx_equal_p (XEXP (note, 0), XEXP (note_tmp, 0)))
3697 		    {
3698 		      equal_p = false;
3699 		      break;
3700 		    }
3701 		}
3702 
3703 	      if (! equal_p)
3704 		{
3705 		  no_equiv (dest, set, NULL);
3706 		  continue;
3707 		}
3708 	    }
3709 
3710 	  /* Record this insn as initializing this register.  */
3711 	  reg_equiv[regno].init_insns
3712 	    = gen_rtx_INSN_LIST (VOIDmode, insn, reg_equiv[regno].init_insns);
3713 
3714 	  /* If this register is known to be equal to a constant, record that
3715 	     it is always equivalent to the constant.
3716 	     Note that it is possible to have a register use before
3717 	     the def in loops (see gcc.c-torture/execute/pr79286.c)
3718 	     where the reg is undefined on first use.  If the def insn
3719 	     won't trap we can use it as an equivalence, effectively
3720 	     choosing the "undefined" value for the reg to be the
3721 	     same as the value set by the def.  */
3722 	  if (DF_REG_DEF_COUNT (regno) == 1
3723 	      && note
3724 	      && !rtx_varies_p (XEXP (note, 0), 0)
3725 	      && (!may_trap_or_fault_p (XEXP (note, 0))
3726 		  || def_dominates_uses (regno)))
3727 	    {
3728 	      rtx note_value = XEXP (note, 0);
3729 	      remove_note (insn, note);
3730 	      set_unique_reg_note (insn, REG_EQUIV, note_value);
3731 	    }
3732 
3733 	  /* If this insn introduces a "constant" register, decrease the priority
3734 	     of that register.  Record this insn if the register is only used once
3735 	     more and the equivalence value is the same as our source.
3736 
3737 	     The latter condition is checked for two reasons:  First, it is an
3738 	     indication that it may be more efficient to actually emit the insn
3739 	     as written (if no registers are available, reload will substitute
3740 	     the equivalence).  Secondly, it avoids problems with any registers
3741 	     dying in this insn whose death notes would be missed.
3742 
3743 	     If we don't have a REG_EQUIV note, see if this insn is loading
3744 	     a register used only in one basic block from a MEM.  If so, and the
3745 	     MEM remains unchanged for the life of the register, add a REG_EQUIV
3746 	     note.  */
3747 	  note = find_reg_note (insn, REG_EQUIV, NULL_RTX);
3748 
3749 	  rtx replacement = NULL_RTX;
3750 	  if (note)
3751 	    replacement = XEXP (note, 0);
3752 	  else if (REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
3753 		   && MEM_P (SET_SRC (set)))
3754 	    {
3755 	      enum valid_equiv validity;
3756 	      validity = validate_equiv_mem (insn, dest, SET_SRC (set));
3757 	      if (validity != valid_none)
3758 		{
3759 		  replacement = copy_rtx (SET_SRC (set));
3760 		  if (validity == valid_reload)
3761 		    note = set_unique_reg_note (insn, REG_EQUIV, replacement);
3762 		}
3763 	    }
3764 
3765 	  /* If we haven't done so, record for reload that this is an
3766 	     equivalencing insn.  */
3767 	  if (note && !reg_equiv[regno].is_arg_equivalence)
3768 	    ira_reg_equiv[regno].init_insns
3769 	      = gen_rtx_INSN_LIST (VOIDmode, insn,
3770 				   ira_reg_equiv[regno].init_insns);
3771 
3772 	  if (replacement)
3773 	    {
3774 	      reg_equiv[regno].replacement = replacement;
3775 	      reg_equiv[regno].src_p = &SET_SRC (set);
3776 	      reg_equiv[regno].loop_depth = (short) loop_depth;
3777 
3778 	      /* Don't mess with things live during setjmp.  */
3779 	      if (optimize && !bitmap_bit_p (setjmp_crosses, regno))
3780 		{
3781 		  /* If the register is referenced exactly twice, meaning it is
3782 		     set once and used once, indicate that the reference may be
3783 		     replaced by the equivalence we computed above.  Do this
3784 		     even if the register is only used in one block so that
3785 		     dependencies can be handled where the last register is
3786 		     used in a different block (i.e. HIGH / LO_SUM sequences)
3787 		     and to reduce the number of registers alive across
3788 		     calls.  */
3789 
3790 		  if (REG_N_REFS (regno) == 2
3791 		      && (rtx_equal_p (replacement, src)
3792 			  || ! equiv_init_varies_p (src))
3793 		      && NONJUMP_INSN_P (insn)
3794 		      && equiv_init_movable_p (PATTERN (insn), regno))
3795 		    reg_equiv[regno].replace = 1;
3796 		}
3797 	    }
3798 	}
3799     }
3800 }
3801 
3802 /* For insns that set a MEM to the contents of a REG that is only used
3803    in a single basic block, see if the register is always equivalent
3804    to that memory location and if moving the store from INSN to the
3805    insn that sets REG is safe.  If so, put a REG_EQUIV note on the
3806    initializing insn.  */
3807 static void
add_store_equivs(void)3808 add_store_equivs (void)
3809 {
3810   auto_bitmap seen_insns;
3811 
3812   for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
3813     {
3814       rtx set, src, dest;
3815       unsigned regno;
3816       rtx_insn *init_insn;
3817 
3818       bitmap_set_bit (seen_insns, INSN_UID (insn));
3819 
3820       if (! INSN_P (insn))
3821 	continue;
3822 
3823       set = single_set (insn);
3824       if (! set)
3825 	continue;
3826 
3827       dest = SET_DEST (set);
3828       src = SET_SRC (set);
3829 
3830       /* Don't add a REG_EQUIV note if the insn already has one.  The existing
3831 	 REG_EQUIV is likely more useful than the one we are adding.  */
3832       if (MEM_P (dest) && REG_P (src)
3833 	  && (regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
3834 	  && REG_BASIC_BLOCK (regno) >= NUM_FIXED_BLOCKS
3835 	  && DF_REG_DEF_COUNT (regno) == 1
3836 	  && ! reg_equiv[regno].pdx_subregs
3837 	  && reg_equiv[regno].init_insns != NULL
3838 	  && (init_insn = reg_equiv[regno].init_insns->insn ()) != 0
3839 	  && bitmap_bit_p (seen_insns, INSN_UID (init_insn))
3840 	  && ! find_reg_note (init_insn, REG_EQUIV, NULL_RTX)
3841 	  && validate_equiv_mem (init_insn, src, dest) == valid_reload
3842 	  && ! memref_used_between_p (dest, init_insn, insn)
3843 	  /* Attaching a REG_EQUIV note will fail if INIT_INSN has
3844 	     multiple sets.  */
3845 	  && set_unique_reg_note (init_insn, REG_EQUIV, copy_rtx (dest)))
3846 	{
3847 	  /* This insn makes the equivalence, not the one initializing
3848 	     the register.  */
3849 	  ira_reg_equiv[regno].init_insns
3850 	    = gen_rtx_INSN_LIST (VOIDmode, insn, NULL_RTX);
3851 	  df_notes_rescan (init_insn);
3852 	  if (dump_file)
3853 	    fprintf (dump_file,
3854 		     "Adding REG_EQUIV to insn %d for source of insn %d\n",
3855 		     INSN_UID (init_insn),
3856 		     INSN_UID (insn));
3857 	}
3858     }
3859 }
3860 
3861 /* Scan all regs killed in an insn to see if any of them are registers
3862    only used that once.  If so, see if we can replace the reference
3863    with the equivalent form.  If we can, delete the initializing
3864    reference and this register will go away.  If we can't replace the
3865    reference, and the initializing reference is within the same loop
3866    (or in an inner loop), then move the register initialization just
3867    before the use, so that they are in the same basic block.  */
3868 static void
combine_and_move_insns(void)3869 combine_and_move_insns (void)
3870 {
3871   auto_bitmap cleared_regs;
3872   int max = max_reg_num ();
3873 
3874   for (int regno = FIRST_PSEUDO_REGISTER; regno < max; regno++)
3875     {
3876       if (!reg_equiv[regno].replace)
3877 	continue;
3878 
3879       rtx_insn *use_insn = 0;
3880       for (df_ref use = DF_REG_USE_CHAIN (regno);
3881 	   use;
3882 	   use = DF_REF_NEXT_REG (use))
3883 	if (DF_REF_INSN_INFO (use))
3884 	  {
3885 	    if (DEBUG_INSN_P (DF_REF_INSN (use)))
3886 	      continue;
3887 	    gcc_assert (!use_insn);
3888 	    use_insn = DF_REF_INSN (use);
3889 	  }
3890       gcc_assert (use_insn);
3891 
3892       /* Don't substitute into jumps.  indirect_jump_optimize does
3893 	 this for anything we are prepared to handle.  */
3894       if (JUMP_P (use_insn))
3895 	continue;
3896 
3897       /* Also don't substitute into a conditional trap insn -- it can become
3898 	 an unconditional trap, and that is a flow control insn.  */
3899       if (GET_CODE (PATTERN (use_insn)) == TRAP_IF)
3900 	continue;
3901 
3902       df_ref def = DF_REG_DEF_CHAIN (regno);
3903       gcc_assert (DF_REG_DEF_COUNT (regno) == 1 && DF_REF_INSN_INFO (def));
3904       rtx_insn *def_insn = DF_REF_INSN (def);
3905 
3906       /* We may not move instructions that can throw, since that
3907 	 changes basic block boundaries and we are not prepared to
3908 	 adjust the CFG to match.  */
3909       if (can_throw_internal (def_insn))
3910 	continue;
3911 
3912       /* Instructions with multiple sets can only be moved if DF analysis is
3913 	 performed for all of the registers set.  See PR91052.  */
3914       if (multiple_sets (def_insn))
3915 	continue;
3916 
3917       basic_block use_bb = BLOCK_FOR_INSN (use_insn);
3918       basic_block def_bb = BLOCK_FOR_INSN (def_insn);
3919       if (bb_loop_depth (use_bb) > bb_loop_depth (def_bb))
3920 	continue;
3921 
3922       if (asm_noperands (PATTERN (def_insn)) < 0
3923 	  && validate_replace_rtx (regno_reg_rtx[regno],
3924 				   *reg_equiv[regno].src_p, use_insn))
3925 	{
3926 	  rtx link;
3927 	  /* Append the REG_DEAD notes from def_insn.  */
3928 	  for (rtx *p = &REG_NOTES (def_insn); (link = *p) != 0; )
3929 	    {
3930 	      if (REG_NOTE_KIND (XEXP (link, 0)) == REG_DEAD)
3931 		{
3932 		  *p = XEXP (link, 1);
3933 		  XEXP (link, 1) = REG_NOTES (use_insn);
3934 		  REG_NOTES (use_insn) = link;
3935 		}
3936 	      else
3937 		p = &XEXP (link, 1);
3938 	    }
3939 
3940 	  remove_death (regno, use_insn);
3941 	  SET_REG_N_REFS (regno, 0);
3942 	  REG_FREQ (regno) = 0;
3943 	  df_ref use;
3944 	  FOR_EACH_INSN_USE (use, def_insn)
3945 	    {
3946 	      unsigned int use_regno = DF_REF_REGNO (use);
3947 	      if (!HARD_REGISTER_NUM_P (use_regno))
3948 		reg_equiv[use_regno].replace = 0;
3949 	    }
3950 
3951 	  delete_insn (def_insn);
3952 
3953 	  reg_equiv[regno].init_insns = NULL;
3954 	  ira_reg_equiv[regno].init_insns = NULL;
3955 	  bitmap_set_bit (cleared_regs, regno);
3956 	}
3957 
3958       /* Move the initialization of the register to just before
3959 	 USE_INSN.  Update the flow information.  */
3960       else if (prev_nondebug_insn (use_insn) != def_insn)
3961 	{
3962 	  rtx_insn *new_insn;
3963 
3964 	  new_insn = emit_insn_before (PATTERN (def_insn), use_insn);
3965 	  REG_NOTES (new_insn) = REG_NOTES (def_insn);
3966 	  REG_NOTES (def_insn) = 0;
3967 	  /* Rescan it to process the notes.  */
3968 	  df_insn_rescan (new_insn);
3969 
3970 	  /* Make sure this insn is recognized before reload begins,
3971 	     otherwise eliminate_regs_in_insn will die.  */
3972 	  INSN_CODE (new_insn) = INSN_CODE (def_insn);
3973 
3974 	  delete_insn (def_insn);
3975 
3976 	  XEXP (reg_equiv[regno].init_insns, 0) = new_insn;
3977 
3978 	  REG_BASIC_BLOCK (regno) = use_bb->index;
3979 	  REG_N_CALLS_CROSSED (regno) = 0;
3980 
3981 	  if (use_insn == BB_HEAD (use_bb))
3982 	    BB_HEAD (use_bb) = new_insn;
3983 
3984 	  /* We know regno dies in use_insn, but inside a loop
3985 	     REG_DEAD notes might be missing when def_insn was in
3986 	     another basic block.  However, when we move def_insn into
3987 	     this bb we'll definitely get a REG_DEAD note and reload
3988 	     will see the death.  It's possible that update_equiv_regs
3989 	     set up an equivalence referencing regno for a reg set by
3990 	     use_insn, when regno was seen as non-local.  Now that
3991 	     regno is local to this block, and dies, such an
3992 	     equivalence is invalid.  */
3993 	  if (find_reg_note (use_insn, REG_EQUIV, regno_reg_rtx[regno]))
3994 	    {
3995 	      rtx set = single_set (use_insn);
3996 	      if (set && REG_P (SET_DEST (set)))
3997 		no_equiv (SET_DEST (set), set, NULL);
3998 	    }
3999 
4000 	  ira_reg_equiv[regno].init_insns
4001 	    = gen_rtx_INSN_LIST (VOIDmode, new_insn, NULL_RTX);
4002 	  bitmap_set_bit (cleared_regs, regno);
4003 	}
4004     }
4005 
4006   if (!bitmap_empty_p (cleared_regs))
4007     {
4008       basic_block bb;
4009 
4010       FOR_EACH_BB_FN (bb, cfun)
4011 	{
4012 	  bitmap_and_compl_into (DF_LR_IN (bb), cleared_regs);
4013 	  bitmap_and_compl_into (DF_LR_OUT (bb), cleared_regs);
4014 	  if (!df_live)
4015 	    continue;
4016 	  bitmap_and_compl_into (DF_LIVE_IN (bb), cleared_regs);
4017 	  bitmap_and_compl_into (DF_LIVE_OUT (bb), cleared_regs);
4018 	}
4019 
4020       /* Last pass - adjust debug insns referencing cleared regs.  */
4021       if (MAY_HAVE_DEBUG_BIND_INSNS)
4022 	for (rtx_insn *insn = get_insns (); insn; insn = NEXT_INSN (insn))
4023 	  if (DEBUG_BIND_INSN_P (insn))
4024 	    {
4025 	      rtx old_loc = INSN_VAR_LOCATION_LOC (insn);
4026 	      INSN_VAR_LOCATION_LOC (insn)
4027 		= simplify_replace_fn_rtx (old_loc, NULL_RTX,
4028 					   adjust_cleared_regs,
4029 					   (void *) cleared_regs);
4030 	      if (old_loc != INSN_VAR_LOCATION_LOC (insn))
4031 		df_insn_rescan (insn);
4032 	    }
4033     }
4034 }
4035 
4036 /* A pass over indirect jumps, converting simple cases to direct jumps.
4037    Combine does this optimization too, but only within a basic block.  */
4038 static void
indirect_jump_optimize(void)4039 indirect_jump_optimize (void)
4040 {
4041   basic_block bb;
4042   bool rebuild_p = false;
4043 
4044   FOR_EACH_BB_REVERSE_FN (bb, cfun)
4045     {
4046       rtx_insn *insn = BB_END (bb);
4047       if (!JUMP_P (insn)
4048 	  || find_reg_note (insn, REG_NON_LOCAL_GOTO, NULL_RTX))
4049 	continue;
4050 
4051       rtx x = pc_set (insn);
4052       if (!x || !REG_P (SET_SRC (x)))
4053 	continue;
4054 
4055       int regno = REGNO (SET_SRC (x));
4056       if (DF_REG_DEF_COUNT (regno) == 1)
4057 	{
4058 	  df_ref def = DF_REG_DEF_CHAIN (regno);
4059 	  if (!DF_REF_IS_ARTIFICIAL (def))
4060 	    {
4061 	      rtx_insn *def_insn = DF_REF_INSN (def);
4062 	      rtx lab = NULL_RTX;
4063 	      rtx set = single_set (def_insn);
4064 	      if (set && GET_CODE (SET_SRC (set)) == LABEL_REF)
4065 		lab = SET_SRC (set);
4066 	      else
4067 		{
4068 		  rtx eqnote = find_reg_note (def_insn, REG_EQUAL, NULL_RTX);
4069 		  if (eqnote && GET_CODE (XEXP (eqnote, 0)) == LABEL_REF)
4070 		    lab = XEXP (eqnote, 0);
4071 		}
4072 	      if (lab && validate_replace_rtx (SET_SRC (x), lab, insn))
4073 		rebuild_p = true;
4074 	    }
4075 	}
4076     }
4077 
4078   if (rebuild_p)
4079     {
4080       timevar_push (TV_JUMP);
4081       rebuild_jump_labels (get_insns ());
4082       if (purge_all_dead_edges ())
4083 	delete_unreachable_blocks ();
4084       timevar_pop (TV_JUMP);
4085     }
4086 }
4087 
4088 /* Set up fields memory, constant, and invariant from init_insns in
4089    the structures of array ira_reg_equiv.  */
4090 static void
setup_reg_equiv(void)4091 setup_reg_equiv (void)
4092 {
4093   int i;
4094   rtx_insn_list *elem, *prev_elem, *next_elem;
4095   rtx_insn *insn;
4096   rtx set, x;
4097 
4098   for (i = FIRST_PSEUDO_REGISTER; i < ira_reg_equiv_len; i++)
4099     for (prev_elem = NULL, elem = ira_reg_equiv[i].init_insns;
4100 	 elem;
4101 	 prev_elem = elem, elem = next_elem)
4102       {
4103 	next_elem = elem->next ();
4104 	insn = elem->insn ();
4105 	set = single_set (insn);
4106 
4107 	/* Init insns can set up equivalence when the reg is a destination or
4108 	   a source (in this case the destination is memory).  */
4109 	if (set != 0 && (REG_P (SET_DEST (set)) || REG_P (SET_SRC (set))))
4110 	  {
4111 	    if ((x = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL)
4112 	      {
4113 		x = XEXP (x, 0);
4114 		if (REG_P (SET_DEST (set))
4115 		    && REGNO (SET_DEST (set)) == (unsigned int) i
4116 		    && ! rtx_equal_p (SET_SRC (set), x) && MEM_P (x))
4117 		  {
4118 		    /* This insn reporting the equivalence but
4119 		       actually not setting it.  Remove it from the
4120 		       list.  */
4121 		    if (prev_elem == NULL)
4122 		      ira_reg_equiv[i].init_insns = next_elem;
4123 		    else
4124 		      XEXP (prev_elem, 1) = next_elem;
4125 		    elem = prev_elem;
4126 		  }
4127 	      }
4128 	    else if (REG_P (SET_DEST (set))
4129 		     && REGNO (SET_DEST (set)) == (unsigned int) i)
4130 	      x = SET_SRC (set);
4131 	    else
4132 	      {
4133 		gcc_assert (REG_P (SET_SRC (set))
4134 			    && REGNO (SET_SRC (set)) == (unsigned int) i);
4135 		x = SET_DEST (set);
4136 	      }
4137 	    if (! function_invariant_p (x)
4138 		|| ! flag_pic
4139 		/* A function invariant is often CONSTANT_P but may
4140 		   include a register.  We promise to only pass
4141 		   CONSTANT_P objects to LEGITIMATE_PIC_OPERAND_P.  */
4142 		|| (CONSTANT_P (x) && LEGITIMATE_PIC_OPERAND_P (x)))
4143 	      {
4144 		/* It can happen that a REG_EQUIV note contains a MEM
4145 		   that is not a legitimate memory operand.  As later
4146 		   stages of reload assume that all addresses found in
4147 		   the lra_regno_equiv_* arrays were originally
4148 		   legitimate, we ignore such REG_EQUIV notes.  */
4149 		if (memory_operand (x, VOIDmode))
4150 		  {
4151 		    ira_reg_equiv[i].defined_p = true;
4152 		    ira_reg_equiv[i].memory = x;
4153 		    continue;
4154 		  }
4155 		else if (function_invariant_p (x))
4156 		  {
4157 		    machine_mode mode;
4158 
4159 		    mode = GET_MODE (SET_DEST (set));
4160 		    if (GET_CODE (x) == PLUS
4161 			|| x == frame_pointer_rtx || x == arg_pointer_rtx)
4162 		      /* This is PLUS of frame pointer and a constant,
4163 			 or fp, or argp.  */
4164 		      ira_reg_equiv[i].invariant = x;
4165 		    else if (targetm.legitimate_constant_p (mode, x))
4166 		      ira_reg_equiv[i].constant = x;
4167 		    else
4168 		      {
4169 			ira_reg_equiv[i].memory = force_const_mem (mode, x);
4170 			if (ira_reg_equiv[i].memory == NULL_RTX)
4171 			  {
4172 			    ira_reg_equiv[i].defined_p = false;
4173 			    ira_reg_equiv[i].init_insns = NULL;
4174 			    break;
4175 			  }
4176 		      }
4177 		    ira_reg_equiv[i].defined_p = true;
4178 		    continue;
4179 		  }
4180 	      }
4181 	  }
4182 	ira_reg_equiv[i].defined_p = false;
4183 	ira_reg_equiv[i].init_insns = NULL;
4184 	break;
4185       }
4186 }
4187 
4188 
4189 
4190 /* Print chain C to FILE.  */
4191 static void
print_insn_chain(FILE * file,class insn_chain * c)4192 print_insn_chain (FILE *file, class insn_chain *c)
4193 {
4194   fprintf (file, "insn=%d, ", INSN_UID (c->insn));
4195   bitmap_print (file, &c->live_throughout, "live_throughout: ", ", ");
4196   bitmap_print (file, &c->dead_or_set, "dead_or_set: ", "\n");
4197 }
4198 
4199 
4200 /* Print all reload_insn_chains to FILE.  */
4201 static void
print_insn_chains(FILE * file)4202 print_insn_chains (FILE *file)
4203 {
4204   class insn_chain *c;
4205   for (c = reload_insn_chain; c ; c = c->next)
4206     print_insn_chain (file, c);
4207 }
4208 
4209 /* Return true if pseudo REGNO should be added to set live_throughout
4210    or dead_or_set of the insn chains for reload consideration.  */
4211 static bool
pseudo_for_reload_consideration_p(int regno)4212 pseudo_for_reload_consideration_p (int regno)
4213 {
4214   /* Consider spilled pseudos too for IRA because they still have a
4215      chance to get hard-registers in the reload when IRA is used.  */
4216   return (reg_renumber[regno] >= 0 || ira_conflicts_p);
4217 }
4218 
4219 /* Return true if we can track the individual bytes of subreg X.
4220    When returning true, set *OUTER_SIZE to the number of bytes in
4221    X itself, *INNER_SIZE to the number of bytes in the inner register
4222    and *START to the offset of the first byte.  */
4223 static bool
get_subreg_tracking_sizes(rtx x,HOST_WIDE_INT * outer_size,HOST_WIDE_INT * inner_size,HOST_WIDE_INT * start)4224 get_subreg_tracking_sizes (rtx x, HOST_WIDE_INT *outer_size,
4225 			   HOST_WIDE_INT *inner_size, HOST_WIDE_INT *start)
4226 {
4227   rtx reg = regno_reg_rtx[REGNO (SUBREG_REG (x))];
4228   return (GET_MODE_SIZE (GET_MODE (x)).is_constant (outer_size)
4229 	  && GET_MODE_SIZE (GET_MODE (reg)).is_constant (inner_size)
4230 	  && SUBREG_BYTE (x).is_constant (start));
4231 }
4232 
4233 /* Init LIVE_SUBREGS[ALLOCNUM] and LIVE_SUBREGS_USED[ALLOCNUM] for
4234    a register with SIZE bytes, making the register live if INIT_VALUE.  */
4235 static void
init_live_subregs(bool init_value,sbitmap * live_subregs,bitmap live_subregs_used,int allocnum,int size)4236 init_live_subregs (bool init_value, sbitmap *live_subregs,
4237 		   bitmap live_subregs_used, int allocnum, int size)
4238 {
4239   gcc_assert (size > 0);
4240 
4241   /* Been there, done that.  */
4242   if (bitmap_bit_p (live_subregs_used, allocnum))
4243     return;
4244 
4245   /* Create a new one.  */
4246   if (live_subregs[allocnum] == NULL)
4247     live_subregs[allocnum] = sbitmap_alloc (size);
4248 
4249   /* If the entire reg was live before blasting into subregs, we need
4250      to init all of the subregs to ones else init to 0.  */
4251   if (init_value)
4252     bitmap_ones (live_subregs[allocnum]);
4253   else
4254     bitmap_clear (live_subregs[allocnum]);
4255 
4256   bitmap_set_bit (live_subregs_used, allocnum);
4257 }
4258 
4259 /* Walk the insns of the current function and build reload_insn_chain,
4260    and record register life information.  */
4261 static void
build_insn_chain(void)4262 build_insn_chain (void)
4263 {
4264   unsigned int i;
4265   class insn_chain **p = &reload_insn_chain;
4266   basic_block bb;
4267   class insn_chain *c = NULL;
4268   class insn_chain *next = NULL;
4269   auto_bitmap live_relevant_regs;
4270   auto_bitmap elim_regset;
4271   /* live_subregs is a vector used to keep accurate information about
4272      which hardregs are live in multiword pseudos.  live_subregs and
4273      live_subregs_used are indexed by pseudo number.  The live_subreg
4274      entry for a particular pseudo is only used if the corresponding
4275      element is non zero in live_subregs_used.  The sbitmap size of
4276      live_subreg[allocno] is number of bytes that the pseudo can
4277      occupy.  */
4278   sbitmap *live_subregs = XCNEWVEC (sbitmap, max_regno);
4279   auto_bitmap live_subregs_used;
4280 
4281   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4282     if (TEST_HARD_REG_BIT (eliminable_regset, i))
4283       bitmap_set_bit (elim_regset, i);
4284   FOR_EACH_BB_REVERSE_FN (bb, cfun)
4285     {
4286       bitmap_iterator bi;
4287       rtx_insn *insn;
4288 
4289       CLEAR_REG_SET (live_relevant_regs);
4290       bitmap_clear (live_subregs_used);
4291 
4292       EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), 0, i, bi)
4293 	{
4294 	  if (i >= FIRST_PSEUDO_REGISTER)
4295 	    break;
4296 	  bitmap_set_bit (live_relevant_regs, i);
4297 	}
4298 
4299       EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb),
4300 				FIRST_PSEUDO_REGISTER, i, bi)
4301 	{
4302 	  if (pseudo_for_reload_consideration_p (i))
4303 	    bitmap_set_bit (live_relevant_regs, i);
4304 	}
4305 
4306       FOR_BB_INSNS_REVERSE (bb, insn)
4307 	{
4308 	  if (!NOTE_P (insn) && !BARRIER_P (insn))
4309 	    {
4310 	      struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4311 	      df_ref def, use;
4312 
4313 	      c = new_insn_chain ();
4314 	      c->next = next;
4315 	      next = c;
4316 	      *p = c;
4317 	      p = &c->prev;
4318 
4319 	      c->insn = insn;
4320 	      c->block = bb->index;
4321 
4322 	      if (NONDEBUG_INSN_P (insn))
4323 		FOR_EACH_INSN_INFO_DEF (def, insn_info)
4324 		  {
4325 		    unsigned int regno = DF_REF_REGNO (def);
4326 
4327 		    /* Ignore may clobbers because these are generated
4328 		       from calls. However, every other kind of def is
4329 		       added to dead_or_set.  */
4330 		    if (!DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
4331 		      {
4332 			if (regno < FIRST_PSEUDO_REGISTER)
4333 			  {
4334 			    if (!fixed_regs[regno])
4335 			      bitmap_set_bit (&c->dead_or_set, regno);
4336 			  }
4337 			else if (pseudo_for_reload_consideration_p (regno))
4338 			  bitmap_set_bit (&c->dead_or_set, regno);
4339 		      }
4340 
4341 		    if ((regno < FIRST_PSEUDO_REGISTER
4342 			 || reg_renumber[regno] >= 0
4343 			 || ira_conflicts_p)
4344 			&& (!DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)))
4345 		      {
4346 			rtx reg = DF_REF_REG (def);
4347 			HOST_WIDE_INT outer_size, inner_size, start;
4348 
4349 			/* We can usually track the liveness of individual
4350 			   bytes within a subreg.  The only exceptions are
4351 			   subregs wrapped in ZERO_EXTRACTs and subregs whose
4352 			   size is not known; in those cases we need to be
4353 			   conservative and treat the definition as a partial
4354 			   definition of the full register rather than a full
4355 			   definition of a specific part of the register.  */
4356 			if (GET_CODE (reg) == SUBREG
4357 			    && !DF_REF_FLAGS_IS_SET (def, DF_REF_ZERO_EXTRACT)
4358 			    && get_subreg_tracking_sizes (reg, &outer_size,
4359 							  &inner_size, &start))
4360 			  {
4361 			    HOST_WIDE_INT last = start + outer_size;
4362 
4363 			    init_live_subregs
4364 			      (bitmap_bit_p (live_relevant_regs, regno),
4365 			       live_subregs, live_subregs_used, regno,
4366 			       inner_size);
4367 
4368 			    if (!DF_REF_FLAGS_IS_SET
4369 				(def, DF_REF_STRICT_LOW_PART))
4370 			      {
4371 				/* Expand the range to cover entire words.
4372 				   Bytes added here are "don't care".  */
4373 				start
4374 				  = start / UNITS_PER_WORD * UNITS_PER_WORD;
4375 				last = ((last + UNITS_PER_WORD - 1)
4376 					/ UNITS_PER_WORD * UNITS_PER_WORD);
4377 			      }
4378 
4379 			    /* Ignore the paradoxical bits.  */
4380 			    if (last > SBITMAP_SIZE (live_subregs[regno]))
4381 			      last = SBITMAP_SIZE (live_subregs[regno]);
4382 
4383 			    while (start < last)
4384 			      {
4385 				bitmap_clear_bit (live_subregs[regno], start);
4386 				start++;
4387 			      }
4388 
4389 			    if (bitmap_empty_p (live_subregs[regno]))
4390 			      {
4391 				bitmap_clear_bit (live_subregs_used, regno);
4392 				bitmap_clear_bit (live_relevant_regs, regno);
4393 			      }
4394 			    else
4395 			      /* Set live_relevant_regs here because
4396 				 that bit has to be true to get us to
4397 				 look at the live_subregs fields.  */
4398 			      bitmap_set_bit (live_relevant_regs, regno);
4399 			  }
4400 			else
4401 			  {
4402 			    /* DF_REF_PARTIAL is generated for
4403 			       subregs, STRICT_LOW_PART, and
4404 			       ZERO_EXTRACT.  We handle the subreg
4405 			       case above so here we have to keep from
4406 			       modeling the def as a killing def.  */
4407 			    if (!DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL))
4408 			      {
4409 				bitmap_clear_bit (live_subregs_used, regno);
4410 				bitmap_clear_bit (live_relevant_regs, regno);
4411 			      }
4412 			  }
4413 		      }
4414 		  }
4415 
4416 	      bitmap_and_compl_into (live_relevant_regs, elim_regset);
4417 	      bitmap_copy (&c->live_throughout, live_relevant_regs);
4418 
4419 	      if (NONDEBUG_INSN_P (insn))
4420 		FOR_EACH_INSN_INFO_USE (use, insn_info)
4421 		  {
4422 		    unsigned int regno = DF_REF_REGNO (use);
4423 		    rtx reg = DF_REF_REG (use);
4424 
4425 		    /* DF_REF_READ_WRITE on a use means that this use
4426 		       is fabricated from a def that is a partial set
4427 		       to a multiword reg.  Here, we only model the
4428 		       subreg case that is not wrapped in ZERO_EXTRACT
4429 		       precisely so we do not need to look at the
4430 		       fabricated use.  */
4431 		    if (DF_REF_FLAGS_IS_SET (use, DF_REF_READ_WRITE)
4432 			&& !DF_REF_FLAGS_IS_SET (use, DF_REF_ZERO_EXTRACT)
4433 			&& DF_REF_FLAGS_IS_SET (use, DF_REF_SUBREG))
4434 		      continue;
4435 
4436 		    /* Add the last use of each var to dead_or_set.  */
4437 		    if (!bitmap_bit_p (live_relevant_regs, regno))
4438 		      {
4439 			if (regno < FIRST_PSEUDO_REGISTER)
4440 			  {
4441 			    if (!fixed_regs[regno])
4442 			      bitmap_set_bit (&c->dead_or_set, regno);
4443 			  }
4444 			else if (pseudo_for_reload_consideration_p (regno))
4445 			  bitmap_set_bit (&c->dead_or_set, regno);
4446 		      }
4447 
4448 		    if (regno < FIRST_PSEUDO_REGISTER
4449 			|| pseudo_for_reload_consideration_p (regno))
4450 		      {
4451 			HOST_WIDE_INT outer_size, inner_size, start;
4452 			if (GET_CODE (reg) == SUBREG
4453 			    && !DF_REF_FLAGS_IS_SET (use,
4454 						     DF_REF_SIGN_EXTRACT
4455 						     | DF_REF_ZERO_EXTRACT)
4456 			    && get_subreg_tracking_sizes (reg, &outer_size,
4457 							  &inner_size, &start))
4458 			  {
4459 			    HOST_WIDE_INT last = start + outer_size;
4460 
4461 			    init_live_subregs
4462 			      (bitmap_bit_p (live_relevant_regs, regno),
4463 			       live_subregs, live_subregs_used, regno,
4464 			       inner_size);
4465 
4466 			    /* Ignore the paradoxical bits.  */
4467 			    if (last > SBITMAP_SIZE (live_subregs[regno]))
4468 			      last = SBITMAP_SIZE (live_subregs[regno]);
4469 
4470 			    while (start < last)
4471 			      {
4472 				bitmap_set_bit (live_subregs[regno], start);
4473 				start++;
4474 			      }
4475 			  }
4476 			else
4477 			  /* Resetting the live_subregs_used is
4478 			     effectively saying do not use the subregs
4479 			     because we are reading the whole
4480 			     pseudo.  */
4481 			  bitmap_clear_bit (live_subregs_used, regno);
4482 			bitmap_set_bit (live_relevant_regs, regno);
4483 		      }
4484 		  }
4485 	    }
4486 	}
4487 
4488       /* FIXME!! The following code is a disaster.  Reload needs to see the
4489 	 labels and jump tables that are just hanging out in between
4490 	 the basic blocks.  See pr33676.  */
4491       insn = BB_HEAD (bb);
4492 
4493       /* Skip over the barriers and cruft.  */
4494       while (insn && (BARRIER_P (insn) || NOTE_P (insn)
4495 		      || BLOCK_FOR_INSN (insn) == bb))
4496 	insn = PREV_INSN (insn);
4497 
4498       /* While we add anything except barriers and notes, the focus is
4499 	 to get the labels and jump tables into the
4500 	 reload_insn_chain.  */
4501       while (insn)
4502 	{
4503 	  if (!NOTE_P (insn) && !BARRIER_P (insn))
4504 	    {
4505 	      if (BLOCK_FOR_INSN (insn))
4506 		break;
4507 
4508 	      c = new_insn_chain ();
4509 	      c->next = next;
4510 	      next = c;
4511 	      *p = c;
4512 	      p = &c->prev;
4513 
4514 	      /* The block makes no sense here, but it is what the old
4515 		 code did.  */
4516 	      c->block = bb->index;
4517 	      c->insn = insn;
4518 	      bitmap_copy (&c->live_throughout, live_relevant_regs);
4519 	    }
4520 	  insn = PREV_INSN (insn);
4521 	}
4522     }
4523 
4524   reload_insn_chain = c;
4525   *p = NULL;
4526 
4527   for (i = 0; i < (unsigned int) max_regno; i++)
4528     if (live_subregs[i] != NULL)
4529       sbitmap_free (live_subregs[i]);
4530   free (live_subregs);
4531 
4532   if (dump_file)
4533     print_insn_chains (dump_file);
4534 }
4535 
4536 /* Examine the rtx found in *LOC, which is read or written to as determined
4537    by TYPE.  Return false if we find a reason why an insn containing this
4538    rtx should not be moved (such as accesses to non-constant memory), true
4539    otherwise.  */
4540 static bool
rtx_moveable_p(rtx * loc,enum op_type type)4541 rtx_moveable_p (rtx *loc, enum op_type type)
4542 {
4543   const char *fmt;
4544   rtx x = *loc;
4545   int i, j;
4546 
4547   enum rtx_code code = GET_CODE (x);
4548   switch (code)
4549     {
4550     case CONST:
4551     CASE_CONST_ANY:
4552     case SYMBOL_REF:
4553     case LABEL_REF:
4554       return true;
4555 
4556     case PC:
4557       return type == OP_IN;
4558 
4559     case REG:
4560       if (x == frame_pointer_rtx)
4561 	return true;
4562       if (HARD_REGISTER_P (x))
4563 	return false;
4564 
4565       return true;
4566 
4567     case MEM:
4568       if (type == OP_IN && MEM_READONLY_P (x))
4569 	return rtx_moveable_p (&XEXP (x, 0), OP_IN);
4570       return false;
4571 
4572     case SET:
4573       return (rtx_moveable_p (&SET_SRC (x), OP_IN)
4574 	      && rtx_moveable_p (&SET_DEST (x), OP_OUT));
4575 
4576     case STRICT_LOW_PART:
4577       return rtx_moveable_p (&XEXP (x, 0), OP_OUT);
4578 
4579     case ZERO_EXTRACT:
4580     case SIGN_EXTRACT:
4581       return (rtx_moveable_p (&XEXP (x, 0), type)
4582 	      && rtx_moveable_p (&XEXP (x, 1), OP_IN)
4583 	      && rtx_moveable_p (&XEXP (x, 2), OP_IN));
4584 
4585     case CLOBBER:
4586       return rtx_moveable_p (&SET_DEST (x), OP_OUT);
4587 
4588     case UNSPEC_VOLATILE:
4589       /* It is a bad idea to consider insns with such rtl
4590 	 as moveable ones.  The insn scheduler also considers them as barrier
4591 	 for a reason.  */
4592       return false;
4593 
4594     case ASM_OPERANDS:
4595       /* The same is true for volatile asm: it has unknown side effects, it
4596          cannot be moved at will.  */
4597       if (MEM_VOLATILE_P (x))
4598 	return false;
4599 
4600     default:
4601       break;
4602     }
4603 
4604   fmt = GET_RTX_FORMAT (code);
4605   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4606     {
4607       if (fmt[i] == 'e')
4608 	{
4609 	  if (!rtx_moveable_p (&XEXP (x, i), type))
4610 	    return false;
4611 	}
4612       else if (fmt[i] == 'E')
4613 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4614 	  {
4615 	    if (!rtx_moveable_p (&XVECEXP (x, i, j), type))
4616 	      return false;
4617 	  }
4618     }
4619   return true;
4620 }
4621 
4622 /* A wrapper around dominated_by_p, which uses the information in UID_LUID
4623    to give dominance relationships between two insns I1 and I2.  */
4624 static bool
insn_dominated_by_p(rtx i1,rtx i2,int * uid_luid)4625 insn_dominated_by_p (rtx i1, rtx i2, int *uid_luid)
4626 {
4627   basic_block bb1 = BLOCK_FOR_INSN (i1);
4628   basic_block bb2 = BLOCK_FOR_INSN (i2);
4629 
4630   if (bb1 == bb2)
4631     return uid_luid[INSN_UID (i2)] < uid_luid[INSN_UID (i1)];
4632   return dominated_by_p (CDI_DOMINATORS, bb1, bb2);
4633 }
4634 
4635 /* Record the range of register numbers added by find_moveable_pseudos.  */
4636 int first_moveable_pseudo, last_moveable_pseudo;
4637 
4638 /* These two vectors hold data for every register added by
4639    find_movable_pseudos, with index 0 holding data for the
4640    first_moveable_pseudo.  */
4641 /* The original home register.  */
4642 static vec<rtx> pseudo_replaced_reg;
4643 
4644 /* Look for instances where we have an instruction that is known to increase
4645    register pressure, and whose result is not used immediately.  If it is
4646    possible to move the instruction downwards to just before its first use,
4647    split its lifetime into two ranges.  We create a new pseudo to compute the
4648    value, and emit a move instruction just before the first use.  If, after
4649    register allocation, the new pseudo remains unallocated, the function
4650    move_unallocated_pseudos then deletes the move instruction and places
4651    the computation just before the first use.
4652 
4653    Such a move is safe and profitable if all the input registers remain live
4654    and unchanged between the original computation and its first use.  In such
4655    a situation, the computation is known to increase register pressure, and
4656    moving it is known to at least not worsen it.
4657 
4658    We restrict moves to only those cases where a register remains unallocated,
4659    in order to avoid interfering too much with the instruction schedule.  As
4660    an exception, we may move insns which only modify their input register
4661    (typically induction variables), as this increases the freedom for our
4662    intended transformation, and does not limit the second instruction
4663    scheduler pass.  */
4664 
4665 static void
find_moveable_pseudos(void)4666 find_moveable_pseudos (void)
4667 {
4668   unsigned i;
4669   int max_regs = max_reg_num ();
4670   int max_uid = get_max_uid ();
4671   basic_block bb;
4672   int *uid_luid = XNEWVEC (int, max_uid);
4673   rtx_insn **closest_uses = XNEWVEC (rtx_insn *, max_regs);
4674   /* A set of registers which are live but not modified throughout a block.  */
4675   bitmap_head *bb_transp_live = XNEWVEC (bitmap_head,
4676 					 last_basic_block_for_fn (cfun));
4677   /* A set of registers which only exist in a given basic block.  */
4678   bitmap_head *bb_local = XNEWVEC (bitmap_head,
4679 				   last_basic_block_for_fn (cfun));
4680   /* A set of registers which are set once, in an instruction that can be
4681      moved freely downwards, but are otherwise transparent to a block.  */
4682   bitmap_head *bb_moveable_reg_sets = XNEWVEC (bitmap_head,
4683 					       last_basic_block_for_fn (cfun));
4684   auto_bitmap live, used, set, interesting, unusable_as_input;
4685   bitmap_iterator bi;
4686 
4687   first_moveable_pseudo = max_regs;
4688   pseudo_replaced_reg.release ();
4689   pseudo_replaced_reg.safe_grow_cleared (max_regs, true);
4690 
4691   df_analyze ();
4692   calculate_dominance_info (CDI_DOMINATORS);
4693 
4694   i = 0;
4695   FOR_EACH_BB_FN (bb, cfun)
4696     {
4697       rtx_insn *insn;
4698       bitmap transp = bb_transp_live + bb->index;
4699       bitmap moveable = bb_moveable_reg_sets + bb->index;
4700       bitmap local = bb_local + bb->index;
4701 
4702       bitmap_initialize (local, 0);
4703       bitmap_initialize (transp, 0);
4704       bitmap_initialize (moveable, 0);
4705       bitmap_copy (live, df_get_live_out (bb));
4706       bitmap_and_into (live, df_get_live_in (bb));
4707       bitmap_copy (transp, live);
4708       bitmap_clear (moveable);
4709       bitmap_clear (live);
4710       bitmap_clear (used);
4711       bitmap_clear (set);
4712       FOR_BB_INSNS (bb, insn)
4713 	if (NONDEBUG_INSN_P (insn))
4714 	  {
4715 	    df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4716 	    df_ref def, use;
4717 
4718 	    uid_luid[INSN_UID (insn)] = i++;
4719 
4720 	    def = df_single_def (insn_info);
4721 	    use = df_single_use (insn_info);
4722 	    if (use
4723 		&& def
4724 		&& DF_REF_REGNO (use) == DF_REF_REGNO (def)
4725 		&& !bitmap_bit_p (set, DF_REF_REGNO (use))
4726 		&& rtx_moveable_p (&PATTERN (insn), OP_IN))
4727 	      {
4728 		unsigned regno = DF_REF_REGNO (use);
4729 		bitmap_set_bit (moveable, regno);
4730 		bitmap_set_bit (set, regno);
4731 		bitmap_set_bit (used, regno);
4732 		bitmap_clear_bit (transp, regno);
4733 		continue;
4734 	      }
4735 	    FOR_EACH_INSN_INFO_USE (use, insn_info)
4736 	      {
4737 		unsigned regno = DF_REF_REGNO (use);
4738 		bitmap_set_bit (used, regno);
4739 		if (bitmap_clear_bit (moveable, regno))
4740 		  bitmap_clear_bit (transp, regno);
4741 	      }
4742 
4743 	    FOR_EACH_INSN_INFO_DEF (def, insn_info)
4744 	      {
4745 		unsigned regno = DF_REF_REGNO (def);
4746 		bitmap_set_bit (set, regno);
4747 		bitmap_clear_bit (transp, regno);
4748 		bitmap_clear_bit (moveable, regno);
4749 	      }
4750 	  }
4751     }
4752 
4753   FOR_EACH_BB_FN (bb, cfun)
4754     {
4755       bitmap local = bb_local + bb->index;
4756       rtx_insn *insn;
4757 
4758       FOR_BB_INSNS (bb, insn)
4759 	if (NONDEBUG_INSN_P (insn))
4760 	  {
4761 	    df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
4762 	    rtx_insn *def_insn;
4763 	    rtx closest_use, note;
4764 	    df_ref def, use;
4765 	    unsigned regno;
4766 	    bool all_dominated, all_local;
4767 	    machine_mode mode;
4768 
4769 	    def = df_single_def (insn_info);
4770 	    /* There must be exactly one def in this insn.  */
4771 	    if (!def || !single_set (insn))
4772 	      continue;
4773 	    /* This must be the only definition of the reg.  We also limit
4774 	       which modes we deal with so that we can assume we can generate
4775 	       move instructions.  */
4776 	    regno = DF_REF_REGNO (def);
4777 	    mode = GET_MODE (DF_REF_REG (def));
4778 	    if (DF_REG_DEF_COUNT (regno) != 1
4779 		|| !DF_REF_INSN_INFO (def)
4780 		|| HARD_REGISTER_NUM_P (regno)
4781 		|| DF_REG_EQ_USE_COUNT (regno) > 0
4782 		|| (!INTEGRAL_MODE_P (mode)
4783 		    && !FLOAT_MODE_P (mode)
4784 		    && !OPAQUE_MODE_P (mode)))
4785 	      continue;
4786 	    def_insn = DF_REF_INSN (def);
4787 
4788 	    for (note = REG_NOTES (def_insn); note; note = XEXP (note, 1))
4789 	      if (REG_NOTE_KIND (note) == REG_EQUIV && MEM_P (XEXP (note, 0)))
4790 		break;
4791 
4792 	    if (note)
4793 	      {
4794 		if (dump_file)
4795 		  fprintf (dump_file, "Ignoring reg %d, has equiv memory\n",
4796 			   regno);
4797 		bitmap_set_bit (unusable_as_input, regno);
4798 		continue;
4799 	      }
4800 
4801 	    use = DF_REG_USE_CHAIN (regno);
4802 	    all_dominated = true;
4803 	    all_local = true;
4804 	    closest_use = NULL_RTX;
4805 	    for (; use; use = DF_REF_NEXT_REG (use))
4806 	      {
4807 		rtx_insn *insn;
4808 		if (!DF_REF_INSN_INFO (use))
4809 		  {
4810 		    all_dominated = false;
4811 		    all_local = false;
4812 		    break;
4813 		  }
4814 		insn = DF_REF_INSN (use);
4815 		if (DEBUG_INSN_P (insn))
4816 		  continue;
4817 		if (BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (def_insn))
4818 		  all_local = false;
4819 		if (!insn_dominated_by_p (insn, def_insn, uid_luid))
4820 		  all_dominated = false;
4821 		if (closest_use != insn && closest_use != const0_rtx)
4822 		  {
4823 		    if (closest_use == NULL_RTX)
4824 		      closest_use = insn;
4825 		    else if (insn_dominated_by_p (closest_use, insn, uid_luid))
4826 		      closest_use = insn;
4827 		    else if (!insn_dominated_by_p (insn, closest_use, uid_luid))
4828 		      closest_use = const0_rtx;
4829 		  }
4830 	      }
4831 	    if (!all_dominated)
4832 	      {
4833 		if (dump_file)
4834 		  fprintf (dump_file, "Reg %d not all uses dominated by set\n",
4835 			   regno);
4836 		continue;
4837 	      }
4838 	    if (all_local)
4839 	      bitmap_set_bit (local, regno);
4840 	    if (closest_use == const0_rtx || closest_use == NULL
4841 		|| next_nonnote_nondebug_insn (def_insn) == closest_use)
4842 	      {
4843 		if (dump_file)
4844 		  fprintf (dump_file, "Reg %d uninteresting%s\n", regno,
4845 			   closest_use == const0_rtx || closest_use == NULL
4846 			   ? " (no unique first use)" : "");
4847 		continue;
4848 	      }
4849 
4850 	    bitmap_set_bit (interesting, regno);
4851 	    /* If we get here, we know closest_use is a non-NULL insn
4852 	       (as opposed to const_0_rtx).  */
4853 	    closest_uses[regno] = as_a <rtx_insn *> (closest_use);
4854 
4855 	    if (dump_file && (all_local || all_dominated))
4856 	      {
4857 		fprintf (dump_file, "Reg %u:", regno);
4858 		if (all_local)
4859 		  fprintf (dump_file, " local to bb %d", bb->index);
4860 		if (all_dominated)
4861 		  fprintf (dump_file, " def dominates all uses");
4862 		if (closest_use != const0_rtx)
4863 		  fprintf (dump_file, " has unique first use");
4864 		fputs ("\n", dump_file);
4865 	      }
4866 	  }
4867     }
4868 
4869   EXECUTE_IF_SET_IN_BITMAP (interesting, 0, i, bi)
4870     {
4871       df_ref def = DF_REG_DEF_CHAIN (i);
4872       rtx_insn *def_insn = DF_REF_INSN (def);
4873       basic_block def_block = BLOCK_FOR_INSN (def_insn);
4874       bitmap def_bb_local = bb_local + def_block->index;
4875       bitmap def_bb_moveable = bb_moveable_reg_sets + def_block->index;
4876       bitmap def_bb_transp = bb_transp_live + def_block->index;
4877       bool local_to_bb_p = bitmap_bit_p (def_bb_local, i);
4878       rtx_insn *use_insn = closest_uses[i];
4879       df_ref use;
4880       bool all_ok = true;
4881       bool all_transp = true;
4882 
4883       if (!REG_P (DF_REF_REG (def)))
4884 	continue;
4885 
4886       if (!local_to_bb_p)
4887 	{
4888 	  if (dump_file)
4889 	    fprintf (dump_file, "Reg %u not local to one basic block\n",
4890 		     i);
4891 	  continue;
4892 	}
4893       if (reg_equiv_init (i) != NULL_RTX)
4894 	{
4895 	  if (dump_file)
4896 	    fprintf (dump_file, "Ignoring reg %u with equiv init insn\n",
4897 		     i);
4898 	  continue;
4899 	}
4900       if (!rtx_moveable_p (&PATTERN (def_insn), OP_IN))
4901 	{
4902 	  if (dump_file)
4903 	    fprintf (dump_file, "Found def insn %d for %d to be not moveable\n",
4904 		     INSN_UID (def_insn), i);
4905 	  continue;
4906 	}
4907       if (dump_file)
4908 	fprintf (dump_file, "Examining insn %d, def for %d\n",
4909 		 INSN_UID (def_insn), i);
4910       FOR_EACH_INSN_USE (use, def_insn)
4911 	{
4912 	  unsigned regno = DF_REF_REGNO (use);
4913 	  if (bitmap_bit_p (unusable_as_input, regno))
4914 	    {
4915 	      all_ok = false;
4916 	      if (dump_file)
4917 		fprintf (dump_file, "  found unusable input reg %u.\n", regno);
4918 	      break;
4919 	    }
4920 	  if (!bitmap_bit_p (def_bb_transp, regno))
4921 	    {
4922 	      if (bitmap_bit_p (def_bb_moveable, regno)
4923 		  && !control_flow_insn_p (use_insn))
4924 		{
4925 		  if (modified_between_p (DF_REF_REG (use), def_insn, use_insn))
4926 		    {
4927 		      rtx_insn *x = NEXT_INSN (def_insn);
4928 		      while (!modified_in_p (DF_REF_REG (use), x))
4929 			{
4930 			  gcc_assert (x != use_insn);
4931 			  x = NEXT_INSN (x);
4932 			}
4933 		      if (dump_file)
4934 			fprintf (dump_file, "  input reg %u modified but insn %d moveable\n",
4935 				 regno, INSN_UID (x));
4936 		      emit_insn_after (PATTERN (x), use_insn);
4937 		      set_insn_deleted (x);
4938 		    }
4939 		  else
4940 		    {
4941 		      if (dump_file)
4942 			fprintf (dump_file, "  input reg %u modified between def and use\n",
4943 				 regno);
4944 		      all_transp = false;
4945 		    }
4946 		}
4947 	      else
4948 		all_transp = false;
4949 	    }
4950 	}
4951       if (!all_ok)
4952 	continue;
4953       if (!dbg_cnt (ira_move))
4954 	break;
4955       if (dump_file)
4956 	fprintf (dump_file, "  all ok%s\n", all_transp ? " and transp" : "");
4957 
4958       if (all_transp)
4959 	{
4960 	  rtx def_reg = DF_REF_REG (def);
4961 	  rtx newreg = ira_create_new_reg (def_reg);
4962 	  if (validate_change (def_insn, DF_REF_REAL_LOC (def), newreg, 0))
4963 	    {
4964 	      unsigned nregno = REGNO (newreg);
4965 	      emit_insn_before (gen_move_insn (def_reg, newreg), use_insn);
4966 	      nregno -= max_regs;
4967 	      pseudo_replaced_reg[nregno] = def_reg;
4968 	    }
4969 	}
4970     }
4971 
4972   FOR_EACH_BB_FN (bb, cfun)
4973     {
4974       bitmap_clear (bb_local + bb->index);
4975       bitmap_clear (bb_transp_live + bb->index);
4976       bitmap_clear (bb_moveable_reg_sets + bb->index);
4977     }
4978   free (uid_luid);
4979   free (closest_uses);
4980   free (bb_local);
4981   free (bb_transp_live);
4982   free (bb_moveable_reg_sets);
4983 
4984   last_moveable_pseudo = max_reg_num ();
4985 
4986   fix_reg_equiv_init ();
4987   expand_reg_info ();
4988   regstat_free_n_sets_and_refs ();
4989   regstat_free_ri ();
4990   regstat_init_n_sets_and_refs ();
4991   regstat_compute_ri ();
4992   free_dominance_info (CDI_DOMINATORS);
4993 }
4994 
4995 /* If SET pattern SET is an assignment from a hard register to a pseudo which
4996    is live at CALL_DOM (if non-NULL, otherwise this check is omitted), return
4997    the destination.  Otherwise return NULL.  */
4998 
4999 static rtx
interesting_dest_for_shprep_1(rtx set,basic_block call_dom)5000 interesting_dest_for_shprep_1 (rtx set, basic_block call_dom)
5001 {
5002   rtx src = SET_SRC (set);
5003   rtx dest = SET_DEST (set);
5004   if (!REG_P (src) || !HARD_REGISTER_P (src)
5005       || !REG_P (dest) || HARD_REGISTER_P (dest)
5006       || (call_dom && !bitmap_bit_p (df_get_live_in (call_dom), REGNO (dest))))
5007     return NULL;
5008   return dest;
5009 }
5010 
5011 /* If insn is interesting for parameter range-splitting shrink-wrapping
5012    preparation, i.e. it is a single set from a hard register to a pseudo, which
5013    is live at CALL_DOM (if non-NULL, otherwise this check is omitted), or a
5014    parallel statement with only one such statement, return the destination.
5015    Otherwise return NULL.  */
5016 
5017 static rtx
interesting_dest_for_shprep(rtx_insn * insn,basic_block call_dom)5018 interesting_dest_for_shprep (rtx_insn *insn, basic_block call_dom)
5019 {
5020   if (!INSN_P (insn))
5021     return NULL;
5022   rtx pat = PATTERN (insn);
5023   if (GET_CODE (pat) == SET)
5024     return interesting_dest_for_shprep_1 (pat, call_dom);
5025 
5026   if (GET_CODE (pat) != PARALLEL)
5027     return NULL;
5028   rtx ret = NULL;
5029   for (int i = 0; i < XVECLEN (pat, 0); i++)
5030     {
5031       rtx sub = XVECEXP (pat, 0, i);
5032       if (GET_CODE (sub) == USE || GET_CODE (sub) == CLOBBER)
5033 	continue;
5034       if (GET_CODE (sub) != SET
5035 	  || side_effects_p (sub))
5036 	return NULL;
5037       rtx dest = interesting_dest_for_shprep_1 (sub, call_dom);
5038       if (dest && ret)
5039 	return NULL;
5040       if (dest)
5041 	ret = dest;
5042     }
5043   return ret;
5044 }
5045 
5046 /* Split live ranges of pseudos that are loaded from hard registers in the
5047    first BB in a BB that dominates all non-sibling call if such a BB can be
5048    found and is not in a loop.  Return true if the function has made any
5049    changes.  */
5050 
5051 static bool
split_live_ranges_for_shrink_wrap(void)5052 split_live_ranges_for_shrink_wrap (void)
5053 {
5054   basic_block bb, call_dom = NULL;
5055   basic_block first = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
5056   rtx_insn *insn, *last_interesting_insn = NULL;
5057   auto_bitmap need_new, reachable;
5058   vec<basic_block> queue;
5059 
5060   if (!SHRINK_WRAPPING_ENABLED)
5061     return false;
5062 
5063   queue.create (n_basic_blocks_for_fn (cfun));
5064 
5065   FOR_EACH_BB_FN (bb, cfun)
5066     FOR_BB_INSNS (bb, insn)
5067       if (CALL_P (insn) && !SIBLING_CALL_P (insn))
5068 	{
5069 	  if (bb == first)
5070 	    {
5071 	      queue.release ();
5072 	      return false;
5073 	    }
5074 
5075 	  bitmap_set_bit (need_new, bb->index);
5076 	  bitmap_set_bit (reachable, bb->index);
5077 	  queue.quick_push (bb);
5078 	  break;
5079 	}
5080 
5081   if (queue.is_empty ())
5082     {
5083       queue.release ();
5084       return false;
5085     }
5086 
5087   while (!queue.is_empty ())
5088     {
5089       edge e;
5090       edge_iterator ei;
5091 
5092       bb = queue.pop ();
5093       FOR_EACH_EDGE (e, ei, bb->succs)
5094 	if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
5095 	    && bitmap_set_bit (reachable, e->dest->index))
5096 	  queue.quick_push (e->dest);
5097     }
5098   queue.release ();
5099 
5100   FOR_BB_INSNS (first, insn)
5101     {
5102       rtx dest = interesting_dest_for_shprep (insn, NULL);
5103       if (!dest)
5104 	continue;
5105 
5106       if (DF_REG_DEF_COUNT (REGNO (dest)) > 1)
5107 	return false;
5108 
5109       for (df_ref use = DF_REG_USE_CHAIN (REGNO(dest));
5110 	   use;
5111 	   use = DF_REF_NEXT_REG (use))
5112 	{
5113 	  int ubbi = DF_REF_BB (use)->index;
5114 	  if (bitmap_bit_p (reachable, ubbi))
5115 	    bitmap_set_bit (need_new, ubbi);
5116 	}
5117       last_interesting_insn = insn;
5118     }
5119 
5120   if (!last_interesting_insn)
5121     return false;
5122 
5123   call_dom = nearest_common_dominator_for_set (CDI_DOMINATORS, need_new);
5124   if (call_dom == first)
5125     return false;
5126 
5127   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5128   while (bb_loop_depth (call_dom) > 0)
5129     call_dom = get_immediate_dominator (CDI_DOMINATORS, call_dom);
5130   loop_optimizer_finalize ();
5131 
5132   if (call_dom == first)
5133     return false;
5134 
5135   calculate_dominance_info (CDI_POST_DOMINATORS);
5136   if (dominated_by_p (CDI_POST_DOMINATORS, first, call_dom))
5137     {
5138       free_dominance_info (CDI_POST_DOMINATORS);
5139       return false;
5140     }
5141   free_dominance_info (CDI_POST_DOMINATORS);
5142 
5143   if (dump_file)
5144     fprintf (dump_file, "Will split live ranges of parameters at BB %i\n",
5145 	     call_dom->index);
5146 
5147   bool ret = false;
5148   FOR_BB_INSNS (first, insn)
5149     {
5150       rtx dest = interesting_dest_for_shprep (insn, call_dom);
5151       if (!dest || dest == pic_offset_table_rtx)
5152 	continue;
5153 
5154       bool need_newreg = false;
5155       df_ref use, next;
5156       for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next)
5157 	{
5158 	  rtx_insn *uin = DF_REF_INSN (use);
5159 	  next = DF_REF_NEXT_REG (use);
5160 
5161 	  if (DEBUG_INSN_P (uin))
5162 	    continue;
5163 
5164 	  basic_block ubb = BLOCK_FOR_INSN (uin);
5165 	  if (ubb == call_dom
5166 	      || dominated_by_p (CDI_DOMINATORS, ubb, call_dom))
5167 	    {
5168 	      need_newreg = true;
5169 	      break;
5170 	    }
5171 	}
5172 
5173       if (need_newreg)
5174 	{
5175 	  rtx newreg = ira_create_new_reg (dest);
5176 
5177 	  for (use = DF_REG_USE_CHAIN (REGNO (dest)); use; use = next)
5178 	    {
5179 	      rtx_insn *uin = DF_REF_INSN (use);
5180 	      next = DF_REF_NEXT_REG (use);
5181 
5182 	      basic_block ubb = BLOCK_FOR_INSN (uin);
5183 	      if (ubb == call_dom
5184 		  || dominated_by_p (CDI_DOMINATORS, ubb, call_dom))
5185 		validate_change (uin, DF_REF_REAL_LOC (use), newreg, true);
5186 	    }
5187 
5188 	  rtx_insn *new_move = gen_move_insn (newreg, dest);
5189 	  emit_insn_after (new_move, bb_note (call_dom));
5190 	  if (dump_file)
5191 	    {
5192 	      fprintf (dump_file, "Split live-range of register ");
5193 	      print_rtl_single (dump_file, dest);
5194 	    }
5195 	  ret = true;
5196 	}
5197 
5198       if (insn == last_interesting_insn)
5199 	break;
5200     }
5201   apply_change_group ();
5202   return ret;
5203 }
5204 
5205 /* Perform the second half of the transformation started in
5206    find_moveable_pseudos.  We look for instances where the newly introduced
5207    pseudo remains unallocated, and remove it by moving the definition to
5208    just before its use, replacing the move instruction generated by
5209    find_moveable_pseudos.  */
5210 static void
move_unallocated_pseudos(void)5211 move_unallocated_pseudos (void)
5212 {
5213   int i;
5214   for (i = first_moveable_pseudo; i < last_moveable_pseudo; i++)
5215     if (reg_renumber[i] < 0)
5216       {
5217 	int idx = i - first_moveable_pseudo;
5218 	rtx other_reg = pseudo_replaced_reg[idx];
5219 	/* The iterating range [first_moveable_pseudo, last_moveable_pseudo)
5220 	   covers every new pseudo created in find_moveable_pseudos,
5221 	   regardless of the validation with it is successful or not.
5222 	   So we need to skip the pseudos which were used in those failed
5223 	   validations to avoid unexpected DF info and consequent ICE.
5224 	   We only set pseudo_replaced_reg[] when the validation is successful
5225 	   in find_moveable_pseudos, it's enough to check it here.  */
5226 	if (!other_reg)
5227 	  continue;
5228 	rtx_insn *def_insn = DF_REF_INSN (DF_REG_DEF_CHAIN (i));
5229 	/* The use must follow all definitions of OTHER_REG, so we can
5230 	   insert the new definition immediately after any of them.  */
5231 	df_ref other_def = DF_REG_DEF_CHAIN (REGNO (other_reg));
5232 	rtx_insn *move_insn = DF_REF_INSN (other_def);
5233 	rtx_insn *newinsn = emit_insn_after (PATTERN (def_insn), move_insn);
5234 	rtx set;
5235 	int success;
5236 
5237 	if (dump_file)
5238 	  fprintf (dump_file, "moving def of %d (insn %d now) ",
5239 		   REGNO (other_reg), INSN_UID (def_insn));
5240 
5241 	delete_insn (move_insn);
5242 	while ((other_def = DF_REG_DEF_CHAIN (REGNO (other_reg))))
5243 	  delete_insn (DF_REF_INSN (other_def));
5244 	delete_insn (def_insn);
5245 
5246 	set = single_set (newinsn);
5247 	success = validate_change (newinsn, &SET_DEST (set), other_reg, 0);
5248 	gcc_assert (success);
5249 	if (dump_file)
5250 	  fprintf (dump_file, " %d) rather than keep unallocated replacement %d\n",
5251 		   INSN_UID (newinsn), i);
5252 	SET_REG_N_REFS (i, 0);
5253       }
5254 
5255   first_moveable_pseudo = last_moveable_pseudo = 0;
5256 }
5257 
5258 
5259 
5260 /* Code dealing with scratches (changing them onto
5261    pseudos and restoring them from the pseudos).
5262 
5263    We change scratches into pseudos at the beginning of IRA to
5264    simplify dealing with them (conflicts, hard register assignments).
5265 
5266    If the pseudo denoting scratch was spilled it means that we do not
5267    need a hard register for it.  Such pseudos are transformed back to
5268    scratches at the end of LRA.  */
5269 
5270 /* Description of location of a former scratch operand.	 */
5271 struct sloc
5272 {
5273   rtx_insn *insn; /* Insn where the scratch was.  */
5274   int nop;  /* Number of the operand which was a scratch.  */
5275   unsigned regno; /* regno gnerated instead of scratch */
5276   int icode;  /* Original icode from which scratch was removed.  */
5277 };
5278 
5279 typedef struct sloc *sloc_t;
5280 
5281 /* Locations of the former scratches.  */
5282 static vec<sloc_t> scratches;
5283 
5284 /* Bitmap of scratch regnos.  */
5285 static bitmap_head scratch_bitmap;
5286 
5287 /* Bitmap of scratch operands.	*/
5288 static bitmap_head scratch_operand_bitmap;
5289 
5290 /* Return true if pseudo REGNO is made of SCRATCH.  */
5291 bool
ira_former_scratch_p(int regno)5292 ira_former_scratch_p (int regno)
5293 {
5294   return bitmap_bit_p (&scratch_bitmap, regno);
5295 }
5296 
5297 /* Return true if the operand NOP of INSN is a former scratch.	*/
5298 bool
ira_former_scratch_operand_p(rtx_insn * insn,int nop)5299 ira_former_scratch_operand_p (rtx_insn *insn, int nop)
5300 {
5301   return bitmap_bit_p (&scratch_operand_bitmap,
5302 		       INSN_UID (insn) * MAX_RECOG_OPERANDS + nop) != 0;
5303 }
5304 
5305 /* Register operand NOP in INSN as a former scratch.  It will be
5306    changed to scratch back, if it is necessary, at the LRA end.  */
5307 void
ira_register_new_scratch_op(rtx_insn * insn,int nop,int icode)5308 ira_register_new_scratch_op (rtx_insn *insn, int nop, int icode)
5309 {
5310   rtx op = *recog_data.operand_loc[nop];
5311   sloc_t loc = XNEW (struct sloc);
5312   ira_assert (REG_P (op));
5313   loc->insn = insn;
5314   loc->nop = nop;
5315   loc->regno = REGNO (op);
5316   loc->icode = icode;
5317   scratches.safe_push (loc);
5318   bitmap_set_bit (&scratch_bitmap, REGNO (op));
5319   bitmap_set_bit (&scratch_operand_bitmap,
5320 		  INSN_UID (insn) * MAX_RECOG_OPERANDS + nop);
5321   add_reg_note (insn, REG_UNUSED, op);
5322 }
5323 
5324 /* Return true if string STR contains constraint 'X'.  */
5325 static bool
contains_X_constraint_p(const char * str)5326 contains_X_constraint_p (const char *str)
5327 {
5328   int c;
5329 
5330   while ((c = *str))
5331     {
5332       str += CONSTRAINT_LEN (c, str);
5333       if (c == 'X') return true;
5334     }
5335   return false;
5336 }
5337 
5338 /* Change INSN's scratches into pseudos and save their location.
5339    Return true if we changed any scratch.  */
5340 bool
ira_remove_insn_scratches(rtx_insn * insn,bool all_p,FILE * dump_file,rtx (* get_reg)(rtx original))5341 ira_remove_insn_scratches (rtx_insn *insn, bool all_p, FILE *dump_file,
5342 			   rtx (*get_reg) (rtx original))
5343 {
5344   int i;
5345   bool insn_changed_p;
5346   rtx reg, *loc;
5347 
5348   extract_insn (insn);
5349   insn_changed_p = false;
5350   for (i = 0; i < recog_data.n_operands; i++)
5351     {
5352       loc = recog_data.operand_loc[i];
5353       if (GET_CODE (*loc) == SCRATCH && GET_MODE (*loc) != VOIDmode)
5354 	{
5355 	  if (! all_p && contains_X_constraint_p (recog_data.constraints[i]))
5356 	    continue;
5357 	  insn_changed_p = true;
5358 	  *loc = reg = get_reg (*loc);
5359 	  ira_register_new_scratch_op (insn, i, INSN_CODE (insn));
5360 	  if (ira_dump_file != NULL)
5361 	    fprintf (dump_file,
5362 		     "Removing SCRATCH to p%u in insn #%u (nop %d)\n",
5363 		     REGNO (reg), INSN_UID (insn), i);
5364 	}
5365     }
5366   return insn_changed_p;
5367 }
5368 
5369 /* Return new register of the same mode as ORIGINAL.  Used in
5370    remove_scratches.  */
5371 static rtx
get_scratch_reg(rtx original)5372 get_scratch_reg (rtx original)
5373 {
5374   return gen_reg_rtx (GET_MODE (original));
5375 }
5376 
5377 /* Change scratches into pseudos and save their location.  Return true
5378    if we changed any scratch.  */
5379 static bool
remove_scratches(void)5380 remove_scratches (void)
5381 {
5382   bool change_p = false;
5383   basic_block bb;
5384   rtx_insn *insn;
5385 
5386   scratches.create (get_max_uid ());
5387   bitmap_initialize (&scratch_bitmap, &reg_obstack);
5388   bitmap_initialize (&scratch_operand_bitmap, &reg_obstack);
5389   FOR_EACH_BB_FN (bb, cfun)
5390     FOR_BB_INSNS (bb, insn)
5391     if (INSN_P (insn)
5392 	&& ira_remove_insn_scratches (insn, false, ira_dump_file, get_scratch_reg))
5393       {
5394 	/* Because we might use DF, we need to keep DF info up to date.  */
5395 	df_insn_rescan (insn);
5396 	change_p = true;
5397       }
5398   return change_p;
5399 }
5400 
5401 /* Changes pseudos created by function remove_scratches onto scratches.	 */
5402 void
ira_restore_scratches(FILE * dump_file)5403 ira_restore_scratches (FILE *dump_file)
5404 {
5405   int regno, n;
5406   unsigned i;
5407   rtx *op_loc;
5408   sloc_t loc;
5409 
5410   for (i = 0; scratches.iterate (i, &loc); i++)
5411     {
5412       /* Ignore already deleted insns.  */
5413       if (NOTE_P (loc->insn)
5414 	  && NOTE_KIND (loc->insn) == NOTE_INSN_DELETED)
5415 	continue;
5416       extract_insn (loc->insn);
5417       if (loc->icode != INSN_CODE (loc->insn))
5418 	{
5419 	  /* The icode doesn't match, which means the insn has been modified
5420 	     (e.g. register elimination).  The scratch cannot be restored.  */
5421 	  continue;
5422 	}
5423       op_loc = recog_data.operand_loc[loc->nop];
5424       if (REG_P (*op_loc)
5425 	  && ((regno = REGNO (*op_loc)) >= FIRST_PSEUDO_REGISTER)
5426 	  && reg_renumber[regno] < 0)
5427 	{
5428 	  /* It should be only case when scratch register with chosen
5429 	     constraint 'X' did not get memory or hard register.  */
5430 	  ira_assert (ira_former_scratch_p (regno));
5431 	  *op_loc = gen_rtx_SCRATCH (GET_MODE (*op_loc));
5432 	  for (n = 0; n < recog_data.n_dups; n++)
5433 	    *recog_data.dup_loc[n]
5434 	      = *recog_data.operand_loc[(int) recog_data.dup_num[n]];
5435 	  if (dump_file != NULL)
5436 	    fprintf (dump_file, "Restoring SCRATCH in insn #%u(nop %d)\n",
5437 		     INSN_UID (loc->insn), loc->nop);
5438 	}
5439     }
5440   for (i = 0; scratches.iterate (i, &loc); i++)
5441     free (loc);
5442   scratches.release ();
5443   bitmap_clear (&scratch_bitmap);
5444   bitmap_clear (&scratch_operand_bitmap);
5445 }
5446 
5447 
5448 
5449 /* If the backend knows where to allocate pseudos for hard
5450    register initial values, register these allocations now.  */
5451 static void
allocate_initial_values(void)5452 allocate_initial_values (void)
5453 {
5454   if (targetm.allocate_initial_value)
5455     {
5456       rtx hreg, preg, x;
5457       int i, regno;
5458 
5459       for (i = 0; HARD_REGISTER_NUM_P (i); i++)
5460 	{
5461 	  if (! initial_value_entry (i, &hreg, &preg))
5462 	    break;
5463 
5464 	  x = targetm.allocate_initial_value (hreg);
5465 	  regno = REGNO (preg);
5466 	  if (x && REG_N_SETS (regno) <= 1)
5467 	    {
5468 	      if (MEM_P (x))
5469 		reg_equiv_memory_loc (regno) = x;
5470 	      else
5471 		{
5472 		  basic_block bb;
5473 		  int new_regno;
5474 
5475 		  gcc_assert (REG_P (x));
5476 		  new_regno = REGNO (x);
5477 		  reg_renumber[regno] = new_regno;
5478 		  /* Poke the regno right into regno_reg_rtx so that even
5479 		     fixed regs are accepted.  */
5480 		  SET_REGNO (preg, new_regno);
5481 		  /* Update global register liveness information.  */
5482 		  FOR_EACH_BB_FN (bb, cfun)
5483 		    {
5484 		      if (REGNO_REG_SET_P (df_get_live_in (bb), regno))
5485 			SET_REGNO_REG_SET (df_get_live_in (bb), new_regno);
5486 		      if (REGNO_REG_SET_P (df_get_live_out (bb), regno))
5487 			SET_REGNO_REG_SET (df_get_live_out (bb), new_regno);
5488 		    }
5489 		}
5490 	    }
5491 	}
5492 
5493       gcc_checking_assert (! initial_value_entry (FIRST_PSEUDO_REGISTER,
5494 						  &hreg, &preg));
5495     }
5496 }
5497 
5498 
5499 
5500 
5501 /* True when we use LRA instead of reload pass for the current
5502    function.  */
5503 bool ira_use_lra_p;
5504 
5505 /* True if we have allocno conflicts.  It is false for non-optimized
5506    mode or when the conflict table is too big.  */
5507 bool ira_conflicts_p;
5508 
5509 /* Saved between IRA and reload.  */
5510 static int saved_flag_ira_share_spill_slots;
5511 
5512 /* This is the main entry of IRA.  */
5513 static void
ira(FILE * f)5514 ira (FILE *f)
5515 {
5516   bool loops_p;
5517   int ira_max_point_before_emit;
5518   bool saved_flag_caller_saves = flag_caller_saves;
5519   enum ira_region saved_flag_ira_region = flag_ira_region;
5520   basic_block bb;
5521   edge_iterator ei;
5522   edge e;
5523   bool output_jump_reload_p = false;
5524 
5525   if (ira_use_lra_p)
5526     {
5527       /* First put potential jump output reloads on the output edges
5528 	 as USE which will be removed at the end of LRA.  The major
5529 	 goal is actually to create BBs for critical edges for LRA and
5530 	 populate them later by live info.  In LRA it will be
5531 	 difficult to do this. */
5532       FOR_EACH_BB_FN (bb, cfun)
5533 	{
5534 	  rtx_insn *end = BB_END (bb);
5535 	  if (!JUMP_P (end))
5536 	    continue;
5537 	  extract_insn (end);
5538 	  for (int i = 0; i < recog_data.n_operands; i++)
5539 	    if (recog_data.operand_type[i] != OP_IN)
5540 	      {
5541 		bool skip_p = false;
5542 		FOR_EACH_EDGE (e, ei, bb->succs)
5543 		  if (EDGE_CRITICAL_P (e)
5544 		      && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
5545 		      && (e->flags & EDGE_ABNORMAL))
5546 		    {
5547 		      skip_p = true;
5548 		      break;
5549 		    }
5550 		if (skip_p)
5551 		  break;
5552 		output_jump_reload_p = true;
5553 		FOR_EACH_EDGE (e, ei, bb->succs)
5554 		  if (EDGE_CRITICAL_P (e)
5555 		      && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
5556 		    {
5557 		      start_sequence ();
5558 		      /* We need to put some no-op insn here.  We can
5559 			 not put a note as commit_edges insertion will
5560 			 fail.  */
5561 		      emit_insn (gen_rtx_USE (VOIDmode, const1_rtx));
5562 		      rtx_insn *insns = get_insns ();
5563 		      end_sequence ();
5564 		      insert_insn_on_edge (insns, e);
5565 		    }
5566 		break;
5567 	      }
5568 	}
5569       if (output_jump_reload_p)
5570 	commit_edge_insertions ();
5571     }
5572 
5573   if (flag_ira_verbose < 10)
5574     {
5575       internal_flag_ira_verbose = flag_ira_verbose;
5576       ira_dump_file = f;
5577     }
5578   else
5579     {
5580       internal_flag_ira_verbose = flag_ira_verbose - 10;
5581       ira_dump_file = stderr;
5582     }
5583 
5584   clear_bb_flags ();
5585 
5586   /* Determine if the current function is a leaf before running IRA
5587      since this can impact optimizations done by the prologue and
5588      epilogue thus changing register elimination offsets.
5589      Other target callbacks may use crtl->is_leaf too, including
5590      SHRINK_WRAPPING_ENABLED, so initialize as early as possible.  */
5591   crtl->is_leaf = leaf_function_p ();
5592 
5593   /* Perform target specific PIC register initialization.  */
5594   targetm.init_pic_reg ();
5595 
5596   ira_conflicts_p = optimize > 0;
5597 
5598   /* Determine the number of pseudos actually requiring coloring.  */
5599   unsigned int num_used_regs = 0;
5600   for (unsigned int i = FIRST_PSEUDO_REGISTER; i < DF_REG_SIZE (df); i++)
5601     if (DF_REG_DEF_COUNT (i) || DF_REG_USE_COUNT (i))
5602       num_used_regs++;
5603 
5604   /* If there are too many pseudos and/or basic blocks (e.g. 10K
5605      pseudos and 10K blocks or 100K pseudos and 1K blocks), we will
5606      use simplified and faster algorithms in LRA.  */
5607   lra_simple_p
5608     = ira_use_lra_p
5609       && num_used_regs >= (1U << 26) / last_basic_block_for_fn (cfun);
5610 
5611   if (lra_simple_p)
5612     {
5613       /* It permits to skip live range splitting in LRA.  */
5614       flag_caller_saves = false;
5615       /* There is no sense to do regional allocation when we use
5616 	simplified LRA.  */
5617       flag_ira_region = IRA_REGION_ONE;
5618       ira_conflicts_p = false;
5619     }
5620 
5621 #ifndef IRA_NO_OBSTACK
5622   gcc_obstack_init (&ira_obstack);
5623 #endif
5624   bitmap_obstack_initialize (&ira_bitmap_obstack);
5625 
5626   /* LRA uses its own infrastructure to handle caller save registers.  */
5627   if (flag_caller_saves && !ira_use_lra_p)
5628     init_caller_save ();
5629 
5630   setup_prohibited_mode_move_regs ();
5631   decrease_live_ranges_number ();
5632   df_note_add_problem ();
5633 
5634   /* DF_LIVE can't be used in the register allocator, too many other
5635      parts of the compiler depend on using the "classic" liveness
5636      interpretation of the DF_LR problem.  See PR38711.
5637      Remove the problem, so that we don't spend time updating it in
5638      any of the df_analyze() calls during IRA/LRA.  */
5639   if (optimize > 1)
5640     df_remove_problem (df_live);
5641   gcc_checking_assert (df_live == NULL);
5642 
5643   if (flag_checking)
5644     df->changeable_flags |= DF_VERIFY_SCHEDULED;
5645 
5646   df_analyze ();
5647 
5648   init_reg_equiv ();
5649   if (ira_conflicts_p)
5650     {
5651       calculate_dominance_info (CDI_DOMINATORS);
5652 
5653       if (split_live_ranges_for_shrink_wrap ())
5654 	df_analyze ();
5655 
5656       free_dominance_info (CDI_DOMINATORS);
5657     }
5658 
5659   df_clear_flags (DF_NO_INSN_RESCAN);
5660 
5661   indirect_jump_optimize ();
5662   if (delete_trivially_dead_insns (get_insns (), max_reg_num ()))
5663     df_analyze ();
5664 
5665   regstat_init_n_sets_and_refs ();
5666   regstat_compute_ri ();
5667 
5668   /* If we are not optimizing, then this is the only place before
5669      register allocation where dataflow is done.  And that is needed
5670      to generate these warnings.  */
5671   if (warn_clobbered)
5672     generate_setjmp_warnings ();
5673 
5674   /* update_equiv_regs can use reg classes of pseudos and they are set up in
5675      register pressure sensitive scheduling and loop invariant motion and in
5676      live range shrinking.  This info can become obsolete if we add new pseudos
5677      since the last set up.  Recalculate it again if the new pseudos were
5678      added.  */
5679   if (resize_reg_info () && (flag_sched_pressure || flag_live_range_shrinkage
5680 			     || flag_ira_loop_pressure))
5681     ira_set_pseudo_classes (true, ira_dump_file);
5682 
5683   init_alias_analysis ();
5684   loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
5685   reg_equiv = XCNEWVEC (struct equivalence, max_reg_num ());
5686   update_equiv_regs_prescan ();
5687   update_equiv_regs ();
5688 
5689   /* Don't move insns if live range shrinkage or register
5690      pressure-sensitive scheduling were done because it will not
5691      improve allocation but likely worsen insn scheduling.  */
5692   if (optimize
5693       && !flag_live_range_shrinkage
5694       && !(flag_sched_pressure && flag_schedule_insns))
5695     combine_and_move_insns ();
5696 
5697   /* Gather additional equivalences with memory.  */
5698   if (optimize)
5699     add_store_equivs ();
5700 
5701   loop_optimizer_finalize ();
5702   free_dominance_info (CDI_DOMINATORS);
5703   end_alias_analysis ();
5704   free (reg_equiv);
5705 
5706   /* Once max_regno changes, we need to free and re-init/re-compute
5707      some data structures like regstat_n_sets_and_refs and reg_info_p.  */
5708   auto regstat_recompute_for_max_regno = []() {
5709     regstat_free_n_sets_and_refs ();
5710     regstat_free_ri ();
5711     regstat_init_n_sets_and_refs ();
5712     regstat_compute_ri ();
5713   };
5714 
5715   int max_regno_before_rm = max_reg_num ();
5716   if (ira_use_lra_p && remove_scratches ())
5717     {
5718       ira_expand_reg_equiv ();
5719       /* For now remove_scatches is supposed to create pseudos when it
5720 	 succeeds, assert this happens all the time.  Once it doesn't
5721 	 hold, we should guard the regstat recompute for the case
5722 	 max_regno changes.  */
5723       gcc_assert (max_regno_before_rm != max_reg_num ());
5724       regstat_recompute_for_max_regno ();
5725     }
5726 
5727   setup_reg_equiv ();
5728   grow_reg_equivs ();
5729   setup_reg_equiv_init ();
5730 
5731   allocated_reg_info_size = max_reg_num ();
5732 
5733   /* It is not worth to do such improvement when we use a simple
5734      allocation because of -O0 usage or because the function is too
5735      big.  */
5736   if (ira_conflicts_p)
5737     find_moveable_pseudos ();
5738 
5739   max_regno_before_ira = max_reg_num ();
5740   ira_setup_eliminable_regset ();
5741 
5742   ira_overall_cost = ira_reg_cost = ira_mem_cost = 0;
5743   ira_load_cost = ira_store_cost = ira_shuffle_cost = 0;
5744   ira_move_loops_num = ira_additional_jumps_num = 0;
5745 
5746   ira_assert (current_loops == NULL);
5747   if (flag_ira_region == IRA_REGION_ALL || flag_ira_region == IRA_REGION_MIXED)
5748     loop_optimizer_init (AVOID_CFG_MODIFICATIONS | LOOPS_HAVE_RECORDED_EXITS);
5749 
5750   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
5751     fprintf (ira_dump_file, "Building IRA IR\n");
5752   loops_p = ira_build ();
5753 
5754   ira_assert (ira_conflicts_p || !loops_p);
5755 
5756   saved_flag_ira_share_spill_slots = flag_ira_share_spill_slots;
5757   if (too_high_register_pressure_p () || cfun->calls_setjmp)
5758     /* It is just wasting compiler's time to pack spilled pseudos into
5759        stack slots in this case -- prohibit it.  We also do this if
5760        there is setjmp call because a variable not modified between
5761        setjmp and longjmp the compiler is required to preserve its
5762        value and sharing slots does not guarantee it.  */
5763     flag_ira_share_spill_slots = FALSE;
5764 
5765   ira_color ();
5766 
5767   ira_max_point_before_emit = ira_max_point;
5768 
5769   ira_initiate_emit_data ();
5770 
5771   ira_emit (loops_p);
5772 
5773   max_regno = max_reg_num ();
5774   if (ira_conflicts_p)
5775     {
5776       if (! loops_p)
5777 	{
5778 	  if (! ira_use_lra_p)
5779 	    ira_initiate_assign ();
5780 	}
5781       else
5782 	{
5783 	  expand_reg_info ();
5784 
5785 	  if (ira_use_lra_p)
5786 	    {
5787 	      ira_allocno_t a;
5788 	      ira_allocno_iterator ai;
5789 
5790 	      FOR_EACH_ALLOCNO (a, ai)
5791                 {
5792                   int old_regno = ALLOCNO_REGNO (a);
5793                   int new_regno = REGNO (ALLOCNO_EMIT_DATA (a)->reg);
5794 
5795                   ALLOCNO_REGNO (a) = new_regno;
5796 
5797                   if (old_regno != new_regno)
5798                     setup_reg_classes (new_regno, reg_preferred_class (old_regno),
5799                                        reg_alternate_class (old_regno),
5800                                        reg_allocno_class (old_regno));
5801                 }
5802 	    }
5803 	  else
5804 	    {
5805 	      if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
5806 		fprintf (ira_dump_file, "Flattening IR\n");
5807 	      ira_flattening (max_regno_before_ira, ira_max_point_before_emit);
5808 	    }
5809 	  /* New insns were generated: add notes and recalculate live
5810 	     info.  */
5811 	  df_analyze ();
5812 
5813 	  /* ??? Rebuild the loop tree, but why?  Does the loop tree
5814 	     change if new insns were generated?  Can that be handled
5815 	     by updating the loop tree incrementally?  */
5816 	  loop_optimizer_finalize ();
5817 	  free_dominance_info (CDI_DOMINATORS);
5818 	  loop_optimizer_init (AVOID_CFG_MODIFICATIONS
5819 			       | LOOPS_HAVE_RECORDED_EXITS);
5820 
5821 	  if (! ira_use_lra_p)
5822 	    {
5823 	      setup_allocno_assignment_flags ();
5824 	      ira_initiate_assign ();
5825 	      ira_reassign_conflict_allocnos (max_regno);
5826 	    }
5827 	}
5828     }
5829 
5830   ira_finish_emit_data ();
5831 
5832   setup_reg_renumber ();
5833 
5834   calculate_allocation_cost ();
5835 
5836 #ifdef ENABLE_IRA_CHECKING
5837   if (ira_conflicts_p && ! ira_use_lra_p)
5838     /* Opposite to reload pass, LRA does not use any conflict info
5839        from IRA.  We don't rebuild conflict info for LRA (through
5840        ira_flattening call) and cannot use the check here.  We could
5841        rebuild this info for LRA in the check mode but there is a risk
5842        that code generated with the check and without it will be a bit
5843        different.  Calling ira_flattening in any mode would be a
5844        wasting CPU time.  So do not check the allocation for LRA.  */
5845     check_allocation ();
5846 #endif
5847 
5848   if (max_regno != max_regno_before_ira)
5849     regstat_recompute_for_max_regno ();
5850 
5851   overall_cost_before = ira_overall_cost;
5852   if (! ira_conflicts_p)
5853     grow_reg_equivs ();
5854   else
5855     {
5856       fix_reg_equiv_init ();
5857 
5858 #ifdef ENABLE_IRA_CHECKING
5859       print_redundant_copies ();
5860 #endif
5861       if (! ira_use_lra_p)
5862 	{
5863 	  ira_spilled_reg_stack_slots_num = 0;
5864 	  ira_spilled_reg_stack_slots
5865 	    = ((class ira_spilled_reg_stack_slot *)
5866 	       ira_allocate (max_regno
5867 			     * sizeof (class ira_spilled_reg_stack_slot)));
5868 	  memset ((void *)ira_spilled_reg_stack_slots, 0,
5869 		  max_regno * sizeof (class ira_spilled_reg_stack_slot));
5870 	}
5871     }
5872   allocate_initial_values ();
5873 
5874   /* See comment for find_moveable_pseudos call.  */
5875   if (ira_conflicts_p)
5876     move_unallocated_pseudos ();
5877 
5878   /* Restore original values.  */
5879   if (lra_simple_p)
5880     {
5881       flag_caller_saves = saved_flag_caller_saves;
5882       flag_ira_region = saved_flag_ira_region;
5883     }
5884 }
5885 
5886 /* Modify asm goto to avoid further trouble with this insn.  We can
5887    not replace the insn by USE as in other asm insns as we still
5888    need to keep CFG consistency.  */
5889 void
ira_nullify_asm_goto(rtx_insn * insn)5890 ira_nullify_asm_goto (rtx_insn *insn)
5891 {
5892   ira_assert (JUMP_P (insn) && INSN_CODE (insn) < 0);
5893   rtx tmp = extract_asm_operands (PATTERN (insn));
5894   PATTERN (insn) = gen_rtx_ASM_OPERANDS (VOIDmode, ggc_strdup (""), "", 0,
5895 					 rtvec_alloc (0),
5896 					 rtvec_alloc (0),
5897 					 ASM_OPERANDS_LABEL_VEC (tmp),
5898 					 ASM_OPERANDS_SOURCE_LOCATION(tmp));
5899 }
5900 
5901 static void
do_reload(void)5902 do_reload (void)
5903 {
5904   basic_block bb;
5905   bool need_dce;
5906   unsigned pic_offset_table_regno = INVALID_REGNUM;
5907 
5908   if (flag_ira_verbose < 10)
5909     ira_dump_file = dump_file;
5910 
5911   /* If pic_offset_table_rtx is a pseudo register, then keep it so
5912      after reload to avoid possible wrong usages of hard reg assigned
5913      to it.  */
5914   if (pic_offset_table_rtx
5915       && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
5916     pic_offset_table_regno = REGNO (pic_offset_table_rtx);
5917 
5918   timevar_push (TV_RELOAD);
5919   if (ira_use_lra_p)
5920     {
5921       if (current_loops != NULL)
5922 	{
5923 	  loop_optimizer_finalize ();
5924 	  free_dominance_info (CDI_DOMINATORS);
5925 	}
5926       FOR_ALL_BB_FN (bb, cfun)
5927 	bb->loop_father = NULL;
5928       current_loops = NULL;
5929 
5930       ira_destroy ();
5931 
5932       lra (ira_dump_file);
5933       /* ???!!! Move it before lra () when we use ira_reg_equiv in
5934 	 LRA.  */
5935       vec_free (reg_equivs);
5936       reg_equivs = NULL;
5937       need_dce = false;
5938     }
5939   else
5940     {
5941       df_set_flags (DF_NO_INSN_RESCAN);
5942       build_insn_chain ();
5943 
5944       need_dce = reload (get_insns (), ira_conflicts_p);
5945     }
5946 
5947   timevar_pop (TV_RELOAD);
5948 
5949   timevar_push (TV_IRA);
5950 
5951   if (ira_conflicts_p && ! ira_use_lra_p)
5952     {
5953       ira_free (ira_spilled_reg_stack_slots);
5954       ira_finish_assign ();
5955     }
5956 
5957   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL
5958       && overall_cost_before != ira_overall_cost)
5959     fprintf (ira_dump_file, "+++Overall after reload %" PRId64 "\n",
5960 	     ira_overall_cost);
5961 
5962   flag_ira_share_spill_slots = saved_flag_ira_share_spill_slots;
5963 
5964   if (! ira_use_lra_p)
5965     {
5966       ira_destroy ();
5967       if (current_loops != NULL)
5968 	{
5969 	  loop_optimizer_finalize ();
5970 	  free_dominance_info (CDI_DOMINATORS);
5971 	}
5972       FOR_ALL_BB_FN (bb, cfun)
5973 	bb->loop_father = NULL;
5974       current_loops = NULL;
5975 
5976       regstat_free_ri ();
5977       regstat_free_n_sets_and_refs ();
5978     }
5979 
5980   if (optimize)
5981     cleanup_cfg (CLEANUP_EXPENSIVE);
5982 
5983   finish_reg_equiv ();
5984 
5985   bitmap_obstack_release (&ira_bitmap_obstack);
5986 #ifndef IRA_NO_OBSTACK
5987   obstack_free (&ira_obstack, NULL);
5988 #endif
5989 
5990   /* The code after the reload has changed so much that at this point
5991      we might as well just rescan everything.  Note that
5992      df_rescan_all_insns is not going to help here because it does not
5993      touch the artificial uses and defs.  */
5994   df_finish_pass (true);
5995   df_scan_alloc (NULL);
5996   df_scan_blocks ();
5997 
5998   if (optimize > 1)
5999     {
6000       df_live_add_problem ();
6001       df_live_set_all_dirty ();
6002     }
6003 
6004   if (optimize)
6005     df_analyze ();
6006 
6007   if (need_dce && optimize)
6008     run_fast_dce ();
6009 
6010   /* Diagnose uses of the hard frame pointer when it is used as a global
6011      register.  Often we can get away with letting the user appropriate
6012      the frame pointer, but we should let them know when code generation
6013      makes that impossible.  */
6014   if (global_regs[HARD_FRAME_POINTER_REGNUM] && frame_pointer_needed)
6015     {
6016       tree decl = global_regs_decl[HARD_FRAME_POINTER_REGNUM];
6017       error_at (DECL_SOURCE_LOCATION (current_function_decl),
6018                 "frame pointer required, but reserved");
6019       inform (DECL_SOURCE_LOCATION (decl), "for %qD", decl);
6020     }
6021 
6022   /* If we are doing generic stack checking, give a warning if this
6023      function's frame size is larger than we expect.  */
6024   if (flag_stack_check == GENERIC_STACK_CHECK)
6025     {
6026       poly_int64 size = get_frame_size () + STACK_CHECK_FIXED_FRAME_SIZE;
6027 
6028       for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
6029 	if (df_regs_ever_live_p (i)
6030 	    && !fixed_regs[i]
6031 	    && !crtl->abi->clobbers_full_reg_p (i))
6032 	  size += UNITS_PER_WORD;
6033 
6034       if (constant_lower_bound (size) > STACK_CHECK_MAX_FRAME_SIZE)
6035 	warning (0, "frame size too large for reliable stack checking");
6036     }
6037 
6038   if (pic_offset_table_regno != INVALID_REGNUM)
6039     pic_offset_table_rtx = gen_rtx_REG (Pmode, pic_offset_table_regno);
6040 
6041   timevar_pop (TV_IRA);
6042 }
6043 
6044 /* Run the integrated register allocator.  */
6045 
6046 namespace {
6047 
6048 const pass_data pass_data_ira =
6049 {
6050   RTL_PASS, /* type */
6051   "ira", /* name */
6052   OPTGROUP_NONE, /* optinfo_flags */
6053   TV_IRA, /* tv_id */
6054   0, /* properties_required */
6055   0, /* properties_provided */
6056   0, /* properties_destroyed */
6057   0, /* todo_flags_start */
6058   TODO_do_not_ggc_collect, /* todo_flags_finish */
6059 };
6060 
6061 class pass_ira : public rtl_opt_pass
6062 {
6063 public:
pass_ira(gcc::context * ctxt)6064   pass_ira (gcc::context *ctxt)
6065     : rtl_opt_pass (pass_data_ira, ctxt)
6066   {}
6067 
6068   /* opt_pass methods: */
gate(function *)6069   virtual bool gate (function *)
6070     {
6071       return !targetm.no_register_allocation;
6072     }
execute(function *)6073   virtual unsigned int execute (function *)
6074     {
6075       ira (dump_file);
6076       return 0;
6077     }
6078 
6079 }; // class pass_ira
6080 
6081 } // anon namespace
6082 
6083 rtl_opt_pass *
make_pass_ira(gcc::context * ctxt)6084 make_pass_ira (gcc::context *ctxt)
6085 {
6086   return new pass_ira (ctxt);
6087 }
6088 
6089 namespace {
6090 
6091 const pass_data pass_data_reload =
6092 {
6093   RTL_PASS, /* type */
6094   "reload", /* name */
6095   OPTGROUP_NONE, /* optinfo_flags */
6096   TV_RELOAD, /* tv_id */
6097   0, /* properties_required */
6098   0, /* properties_provided */
6099   0, /* properties_destroyed */
6100   0, /* todo_flags_start */
6101   0, /* todo_flags_finish */
6102 };
6103 
6104 class pass_reload : public rtl_opt_pass
6105 {
6106 public:
pass_reload(gcc::context * ctxt)6107   pass_reload (gcc::context *ctxt)
6108     : rtl_opt_pass (pass_data_reload, ctxt)
6109   {}
6110 
6111   /* opt_pass methods: */
gate(function *)6112   virtual bool gate (function *)
6113     {
6114       return !targetm.no_register_allocation;
6115     }
execute(function *)6116   virtual unsigned int execute (function *)
6117     {
6118       do_reload ();
6119       return 0;
6120     }
6121 
6122 }; // class pass_reload
6123 
6124 } // anon namespace
6125 
6126 rtl_opt_pass *
make_pass_reload(gcc::context * ctxt)6127 make_pass_reload (gcc::context *ctxt)
6128 {
6129   return new pass_reload (ctxt);
6130 }
6131