1 /* Change pseudos by memory.
2    Copyright (C) 2010-2013 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.	If not see
19 <http://www.gnu.org/licenses/>.	 */
20 
21 
22 /* This file contains code for a pass to change spilled pseudos into
23    memory.
24 
25    The pass creates necessary stack slots and assigns spilled pseudos
26    to the stack slots in following way:
27 
28    for all spilled pseudos P most frequently used first do
29      for all stack slots S do
30        if P doesn't conflict with pseudos assigned to S then
31 	 assign S to P and goto to the next pseudo process
32        end
33      end
34      create new stack slot S and assign P to S
35    end
36 
37    The actual algorithm is bit more complicated because of different
38    pseudo sizes.
39 
40    After that the code changes spilled pseudos (except ones created
41    from scratches) by corresponding stack slot memory in RTL.
42 
43    If at least one stack slot was created, we need to run more passes
44    because we have new addresses which should be checked and because
45    the old address displacements might change and address constraints
46    (or insn memory constraints) might not be satisfied any more.
47 
48    For some targets, the pass can spill some pseudos into hard
49    registers of different class (usually into vector registers)
50    instead of spilling them into memory if it is possible and
51    profitable.  Spilling GENERAL_REGS pseudo into SSE registers for
52    Intel Corei7 is an example of such optimization.  And this is
53    actually recommended by Intel optimization guide.
54 
55    The file also contains code for final change of pseudos on hard
56    regs correspondingly assigned to them.  */
57 
58 #include "config.h"
59 #include "system.h"
60 #include "coretypes.h"
61 #include "tm.h"
62 #include "rtl.h"
63 #include "tm_p.h"
64 #include "insn-config.h"
65 #include "recog.h"
66 #include "output.h"
67 #include "regs.h"
68 #include "hard-reg-set.h"
69 #include "flags.h"
70 #include "function.h"
71 #include "expr.h"
72 #include "basic-block.h"
73 #include "except.h"
74 #include "timevar.h"
75 #include "target.h"
76 #include "lra-int.h"
77 #include "ira.h"
78 #include "df.h"
79 
80 
81 /* Max regno at the start of the pass.	*/
82 static int regs_num;
83 
84 /* Map spilled regno -> hard regno used instead of memory for
85    spilling.  */
86 static rtx *spill_hard_reg;
87 
88 /* The structure describes stack slot of a spilled pseudo.  */
89 struct pseudo_slot
90 {
91   /* Number (0, 1, ...) of the stack slot to which given pseudo
92      belongs.  */
93   int slot_num;
94   /* First or next slot with the same slot number.  */
95   struct pseudo_slot *next, *first;
96   /* Memory representing the spilled pseudo.  */
97   rtx mem;
98 };
99 
100 /* The stack slots for each spilled pseudo.  Indexed by regnos.	 */
101 static struct pseudo_slot *pseudo_slots;
102 
103 /* The structure describes a register or a stack slot which can be
104    used for several spilled pseudos.  */
105 struct slot
106 {
107   /* First pseudo with given stack slot.  */
108   int regno;
109   /* Hard reg into which the slot pseudos are spilled.	The value is
110      negative for pseudos spilled into memory.	*/
111   int hard_regno;
112   /* Memory representing the all stack slot.  It can be different from
113      memory representing a pseudo belonging to give stack slot because
114      pseudo can be placed in a part of the corresponding stack slot.
115      The value is NULL for pseudos spilled into a hard reg.  */
116   rtx mem;
117   /* Combined live ranges of all pseudos belonging to given slot.  It
118      is used to figure out that a new spilled pseudo can use given
119      stack slot.  */
120   lra_live_range_t live_ranges;
121 };
122 
123 /* Array containing info about the stack slots.	 The array element is
124    indexed by the stack slot number in the range [0..slots_num).  */
125 static struct slot *slots;
126 /* The number of the stack slots currently existing.  */
127 static int slots_num;
128 
129 /* Set up memory of the spilled pseudo I.  The function can allocate
130    the corresponding stack slot if it is not done yet.	*/
131 static void
assign_mem_slot(int i)132 assign_mem_slot (int i)
133 {
134   rtx x = NULL_RTX;
135   enum machine_mode mode = GET_MODE (regno_reg_rtx[i]);
136   unsigned int inherent_size = PSEUDO_REGNO_BYTES (i);
137   unsigned int inherent_align = GET_MODE_ALIGNMENT (mode);
138   unsigned int max_ref_width = GET_MODE_SIZE (lra_reg_info[i].biggest_mode);
139   unsigned int total_size = MAX (inherent_size, max_ref_width);
140   unsigned int min_align = max_ref_width * BITS_PER_UNIT;
141   int adjust = 0;
142 
143   lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
144 	      && lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0);
145 
146   x = slots[pseudo_slots[i].slot_num].mem;
147 
148   /* We can use a slot already allocated because it is guaranteed the
149      slot provides both enough inherent space and enough total
150      space.  */
151   if (x)
152     ;
153   /* Each pseudo has an inherent size which comes from its own mode,
154      and a total size which provides room for paradoxical subregs
155      which refer to the pseudo reg in wider modes.  We allocate a new
156      slot, making sure that it has enough inherent space and total
157      space.  */
158   else
159     {
160       rtx stack_slot;
161 
162       /* No known place to spill from => no slot to reuse.  */
163       x = assign_stack_local (mode, total_size,
164 			      min_align > inherent_align
165 			      || total_size > inherent_size ? -1 : 0);
166       x = lra_eliminate_regs_1 (x, GET_MODE (x), false, false, true);
167       stack_slot = x;
168       /* Cancel the big-endian correction done in assign_stack_local.
169 	 Get the address of the beginning of the slot.	This is so we
170 	 can do a big-endian correction unconditionally below.	*/
171       if (BYTES_BIG_ENDIAN)
172 	{
173 	  adjust = inherent_size - total_size;
174 	  if (adjust)
175 	    stack_slot
176 	      = adjust_address_nv (x,
177 				   mode_for_size (total_size * BITS_PER_UNIT,
178 						  MODE_INT, 1),
179 				   adjust);
180 	}
181       slots[pseudo_slots[i].slot_num].mem = stack_slot;
182     }
183 
184   /* On a big endian machine, the "address" of the slot is the address
185      of the low part that fits its inherent mode.  */
186   if (BYTES_BIG_ENDIAN && inherent_size < total_size)
187     adjust += (total_size - inherent_size);
188 
189   x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust);
190 
191   /* Set all of the memory attributes as appropriate for a spill.  */
192   set_mem_attrs_for_spill (x);
193   pseudo_slots[i].mem = x;
194 }
195 
196 /* Sort pseudos according their usage frequencies.  */
197 static int
regno_freq_compare(const void * v1p,const void * v2p)198 regno_freq_compare (const void *v1p, const void *v2p)
199 {
200   const int regno1 = *(const int *) v1p;
201   const int regno2 = *(const int *) v2p;
202   int diff;
203 
204   if ((diff = lra_reg_info[regno2].freq - lra_reg_info[regno1].freq) != 0)
205     return diff;
206   return regno1 - regno2;
207 }
208 
209 /* Redefine STACK_GROWS_DOWNWARD in terms of 0 or 1.  */
210 #ifdef STACK_GROWS_DOWNWARD
211 # undef STACK_GROWS_DOWNWARD
212 # define STACK_GROWS_DOWNWARD 1
213 #else
214 # define STACK_GROWS_DOWNWARD 0
215 #endif
216 
217 /* Sort pseudos according to their slots, putting the slots in the order
218    that they should be allocated.  Slots with lower numbers have the highest
219    priority and should get the smallest displacement from the stack or
220    frame pointer (whichever is being used).
221 
222    The first allocated slot is always closest to the frame pointer,
223    so prefer lower slot numbers when frame_pointer_needed.  If the stack
224    and frame grow in the same direction, then the first allocated slot is
225    always closest to the initial stack pointer and furthest away from the
226    final stack pointer, so allocate higher numbers first when using the
227    stack pointer in that case.  The reverse is true if the stack and
228    frame grow in opposite directions.  */
229 static int
pseudo_reg_slot_compare(const void * v1p,const void * v2p)230 pseudo_reg_slot_compare (const void *v1p, const void *v2p)
231 {
232   const int regno1 = *(const int *) v1p;
233   const int regno2 = *(const int *) v2p;
234   int diff, slot_num1, slot_num2;
235   int total_size1, total_size2;
236 
237   slot_num1 = pseudo_slots[regno1].slot_num;
238   slot_num2 = pseudo_slots[regno2].slot_num;
239   if ((diff = slot_num1 - slot_num2) != 0)
240     return (frame_pointer_needed
241 	    || !FRAME_GROWS_DOWNWARD == STACK_GROWS_DOWNWARD ? diff : -diff);
242   total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode);
243   total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode);
244   if ((diff = total_size2 - total_size1) != 0)
245     return diff;
246   return regno1 - regno2;
247 }
248 
249 /* Assign spill hard registers to N pseudos in PSEUDO_REGNOS which is
250    sorted in order of highest frequency first.  Put the pseudos which
251    did not get a spill hard register at the beginning of array
252    PSEUDO_REGNOS.  Return the number of such pseudos.  */
253 static int
assign_spill_hard_regs(int * pseudo_regnos,int n)254 assign_spill_hard_regs (int *pseudo_regnos, int n)
255 {
256   int i, k, p, regno, res, spill_class_size, hard_regno, nr;
257   enum reg_class rclass, spill_class;
258   enum machine_mode mode;
259   lra_live_range_t r;
260   rtx insn, set;
261   basic_block bb;
262   HARD_REG_SET conflict_hard_regs;
263   bitmap_head ok_insn_bitmap;
264   bitmap setjump_crosses = regstat_get_setjmp_crosses ();
265   /* Hard registers which can not be used for any purpose at given
266      program point because they are unallocatable or already allocated
267      for other pseudos.	 */
268   HARD_REG_SET *reserved_hard_regs;
269 
270   if (! lra_reg_spill_p)
271     return n;
272   /* Set up reserved hard regs for every program point.	 */
273   reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point);
274   for (p = 0; p < lra_live_max_point; p++)
275     COPY_HARD_REG_SET (reserved_hard_regs[p], lra_no_alloc_regs);
276   for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
277     if (lra_reg_info[i].nrefs != 0
278 	&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
279       for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
280 	for (p = r->start; p <= r->finish; p++)
281 	  add_to_hard_reg_set (&reserved_hard_regs[p],
282 			       lra_reg_info[i].biggest_mode, hard_regno);
283   bitmap_initialize (&ok_insn_bitmap, &reg_obstack);
284   FOR_EACH_BB (bb)
285     FOR_BB_INSNS (bb, insn)
286       if (DEBUG_INSN_P (insn)
287 	  || ((set = single_set (insn)) != NULL_RTX
288 	      && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))))
289 	bitmap_set_bit (&ok_insn_bitmap, INSN_UID (insn));
290   for (res = i = 0; i < n; i++)
291     {
292       regno = pseudo_regnos[i];
293       rclass = lra_get_allocno_class (regno);
294       if (bitmap_bit_p (setjump_crosses, regno)
295 	  || (spill_class
296 	      = ((enum reg_class)
297 		 targetm.spill_class ((reg_class_t) rclass,
298 				      PSEUDO_REGNO_MODE (regno)))) == NO_REGS
299 	  || bitmap_intersect_compl_p (&lra_reg_info[regno].insn_bitmap,
300 				       &ok_insn_bitmap))
301 	{
302 	  pseudo_regnos[res++] = regno;
303 	  continue;
304 	}
305       lra_assert (spill_class != NO_REGS);
306       COPY_HARD_REG_SET (conflict_hard_regs,
307 			 lra_reg_info[regno].conflict_hard_regs);
308       for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
309 	for (p = r->start; p <= r->finish; p++)
310 	  IOR_HARD_REG_SET (conflict_hard_regs, reserved_hard_regs[p]);
311       spill_class_size = ira_class_hard_regs_num[spill_class];
312       mode = lra_reg_info[regno].biggest_mode;
313       for (k = 0; k < spill_class_size; k++)
314 	{
315 	  hard_regno = ira_class_hard_regs[spill_class][k];
316 	  if (! overlaps_hard_reg_set_p (conflict_hard_regs, mode, hard_regno))
317 	    break;
318 	}
319       if (k >= spill_class_size)
320 	{
321 	   /* There is no available regs -- assign memory later.  */
322 	  pseudo_regnos[res++] = regno;
323 	  continue;
324 	}
325       if (lra_dump_file != NULL)
326 	fprintf (lra_dump_file, "  Spill r%d into hr%d\n", regno, hard_regno);
327       /* Update reserved_hard_regs.  */
328       for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
329 	for (p = r->start; p <= r->finish; p++)
330 	  add_to_hard_reg_set (&reserved_hard_regs[p],
331 			       lra_reg_info[regno].biggest_mode, hard_regno);
332       spill_hard_reg[regno]
333 	= gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno);
334       for (nr = 0;
335 	   nr < hard_regno_nregs[hard_regno][lra_reg_info[regno].biggest_mode];
336 	   nr++)
337 	/* Just loop.  */;
338       df_set_regs_ever_live (hard_regno + nr, true);
339     }
340   bitmap_clear (&ok_insn_bitmap);
341   free (reserved_hard_regs);
342   return res;
343 }
344 
345 /* Add pseudo REGNO to slot SLOT_NUM.  */
346 static void
add_pseudo_to_slot(int regno,int slot_num)347 add_pseudo_to_slot (int regno, int slot_num)
348 {
349   struct pseudo_slot *first;
350 
351   if (slots[slot_num].regno < 0)
352     {
353       /* It is the first pseudo in the slot.  */
354       slots[slot_num].regno = regno;
355       pseudo_slots[regno].first = &pseudo_slots[regno];
356       pseudo_slots[regno].next = NULL;
357     }
358   else
359     {
360       first = pseudo_slots[regno].first = &pseudo_slots[slots[slot_num].regno];
361       pseudo_slots[regno].next = first->next;
362       first->next = &pseudo_slots[regno];
363     }
364   pseudo_slots[regno].mem = NULL_RTX;
365   pseudo_slots[regno].slot_num = slot_num;
366   slots[slot_num].live_ranges
367     = lra_merge_live_ranges (slots[slot_num].live_ranges,
368 			     lra_copy_live_range_list
369 			     (lra_reg_info[regno].live_ranges));
370 }
371 
372 /* Assign stack slot numbers to pseudos in array PSEUDO_REGNOS of
373    length N.  Sort pseudos in PSEUDO_REGNOS for subsequent assigning
374    memory stack slots.	*/
375 static void
assign_stack_slot_num_and_sort_pseudos(int * pseudo_regnos,int n)376 assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n)
377 {
378   int i, j, regno;
379 
380   slots_num = 0;
381   /* Assign stack slot numbers to spilled pseudos, use smaller numbers
382      for most frequently used pseudos.	*/
383   for (i = 0; i < n; i++)
384     {
385       regno = pseudo_regnos[i];
386       if (! flag_ira_share_spill_slots)
387 	j = slots_num;
388       else
389 	{
390 	  for (j = 0; j < slots_num; j++)
391 	    if (slots[j].hard_regno < 0
392 		&& ! (lra_intersected_live_ranges_p
393 		      (slots[j].live_ranges,
394 		       lra_reg_info[regno].live_ranges)))
395 	      break;
396 	}
397       if (j >= slots_num)
398 	{
399 	  /* New slot.	*/
400 	  slots[j].live_ranges = NULL;
401 	  slots[j].regno = slots[j].hard_regno = -1;
402 	  slots[j].mem = NULL_RTX;
403 	  slots_num++;
404 	}
405       add_pseudo_to_slot (regno, j);
406     }
407   /* Sort regnos according to their slot numbers.  */
408   qsort (pseudo_regnos, n, sizeof (int), pseudo_reg_slot_compare);
409 }
410 
411 /* Recursively process LOC in INSN and change spilled pseudos to the
412    corresponding memory or spilled hard reg.  Ignore spilled pseudos
413    created from the scratches.	*/
414 static void
remove_pseudos(rtx * loc,rtx insn)415 remove_pseudos (rtx *loc, rtx insn)
416 {
417   int i;
418   rtx hard_reg;
419   const char *fmt;
420   enum rtx_code code;
421 
422   if (*loc == NULL_RTX)
423     return;
424   code = GET_CODE (*loc);
425   if (code == REG && (i = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER
426       && lra_get_regno_hard_regno (i) < 0
427       /* We do not want to assign memory for former scratches because
428 	 it might result in an address reload for some targets.	 In
429 	 any case we transform such pseudos not getting hard registers
430 	 into scratches back.  */
431       && ! lra_former_scratch_p (i))
432     {
433       hard_reg = spill_hard_reg[i];
434       *loc = copy_rtx (hard_reg != NULL_RTX ? hard_reg : pseudo_slots[i].mem);
435       return;
436     }
437 
438   fmt = GET_RTX_FORMAT (code);
439   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
440     {
441       if (fmt[i] == 'e')
442 	remove_pseudos (&XEXP (*loc, i), insn);
443       else if (fmt[i] == 'E')
444 	{
445 	  int j;
446 
447 	  for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
448 	    remove_pseudos (&XVECEXP (*loc, i, j), insn);
449 	}
450     }
451 }
452 
453 /* Convert spilled pseudos into their stack slots or spill hard regs,
454    put insns to process on the constraint stack (that is all insns in
455    which pseudos were changed to memory or spill hard regs).   */
456 static void
spill_pseudos(void)457 spill_pseudos (void)
458 {
459   basic_block bb;
460   rtx insn;
461   int i;
462   bitmap_head spilled_pseudos, changed_insns;
463 
464   bitmap_initialize (&spilled_pseudos, &reg_obstack);
465   bitmap_initialize (&changed_insns, &reg_obstack);
466   for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
467     {
468       if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
469 	  && ! lra_former_scratch_p (i))
470 	{
471 	  bitmap_set_bit (&spilled_pseudos, i);
472 	  bitmap_ior_into (&changed_insns, &lra_reg_info[i].insn_bitmap);
473 	}
474     }
475   FOR_EACH_BB (bb)
476     {
477       FOR_BB_INSNS (bb, insn)
478 	if (bitmap_bit_p (&changed_insns, INSN_UID (insn)))
479 	  {
480 	    remove_pseudos (&PATTERN (insn), insn);
481 	    if (CALL_P (insn))
482 	      remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn);
483 	    if (lra_dump_file != NULL)
484 	      fprintf (lra_dump_file,
485 		       "Changing spilled pseudos to memory in insn #%u\n",
486 		       INSN_UID (insn));
487 	    lra_push_insn (insn);
488 	    if (lra_reg_spill_p || targetm.different_addr_displacement_p ())
489 	      lra_set_used_insn_alternative (insn, -1);
490 	  }
491 	else if (CALL_P (insn))
492 	  /* Presence of any pseudo in CALL_INSN_FUNCTION_USAGE does
493 	     not affect value of insn_bitmap of the corresponding
494 	     lra_reg_info.  That is because we don't need to reload
495 	     pseudos in CALL_INSN_FUNCTION_USAGEs.  So if we process
496 	     only insns in the insn_bitmap of given pseudo here, we
497 	     can miss the pseudo in some
498 	     CALL_INSN_FUNCTION_USAGEs.  */
499 	  remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn);
500       bitmap_and_compl_into (df_get_live_in (bb), &spilled_pseudos);
501       bitmap_and_compl_into (df_get_live_out (bb), &spilled_pseudos);
502     }
503   bitmap_clear (&spilled_pseudos);
504   bitmap_clear (&changed_insns);
505 }
506 
507 /* Return true if we need to change some pseudos into memory.  */
508 bool
lra_need_for_spills_p(void)509 lra_need_for_spills_p (void)
510 {
511   int i; max_regno = max_reg_num ();
512 
513   for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
514     if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
515 	&& ! lra_former_scratch_p (i))
516       return true;
517   return false;
518 }
519 
520 /* Change spilled pseudos into memory or spill hard regs.  Put changed
521    insns on the constraint stack (these insns will be considered on
522    the next constraint pass).  The changed insns are all insns in
523    which pseudos were changed.  */
524 void
lra_spill(void)525 lra_spill (void)
526 {
527   int i, n, curr_regno;
528   int *pseudo_regnos;
529 
530   regs_num = max_reg_num ();
531   spill_hard_reg = XNEWVEC (rtx, regs_num);
532   pseudo_regnos = XNEWVEC (int, regs_num);
533   for (n = 0, i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
534     if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
535 	/* We do not want to assign memory for former scratches.  */
536 	&& ! lra_former_scratch_p (i))
537       {
538 	spill_hard_reg[i] = NULL_RTX;
539 	pseudo_regnos[n++] = i;
540       }
541   lra_assert (n > 0);
542   pseudo_slots = XNEWVEC (struct pseudo_slot, regs_num);
543   slots = XNEWVEC (struct slot, regs_num);
544   /* Sort regnos according their usage frequencies.  */
545   qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare);
546   n = assign_spill_hard_regs (pseudo_regnos, n);
547   assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n);
548   for (i = 0; i < n; i++)
549     if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX)
550       assign_mem_slot (pseudo_regnos[i]);
551   if (lra_dump_file != NULL)
552     {
553       for (i = 0; i < slots_num; i++)
554 	{
555 	  fprintf (lra_dump_file, "  Slot %d regnos (width = %d):", i,
556 		   GET_MODE_SIZE (GET_MODE (slots[i].mem)));
557 	  for (curr_regno = slots[i].regno;;
558 	       curr_regno = pseudo_slots[curr_regno].next - pseudo_slots)
559 	    {
560 	      fprintf (lra_dump_file, "	 %d", curr_regno);
561 	      if (pseudo_slots[curr_regno].next == NULL)
562 		break;
563 	    }
564 	  fprintf (lra_dump_file, "\n");
565 	}
566     }
567   spill_pseudos ();
568   free (slots);
569   free (pseudo_slots);
570   free (pseudo_regnos);
571   free (spill_hard_reg);
572 }
573 
574 /* Apply alter_subreg for subregs of regs in *LOC.  Use FINAL_P for
575    alter_subreg calls. Return true if any subreg of reg is
576    processed.  */
577 static bool
alter_subregs(rtx * loc,bool final_p)578 alter_subregs (rtx *loc, bool final_p)
579 {
580   int i;
581   rtx x = *loc;
582   bool res;
583   const char *fmt;
584   enum rtx_code code;
585 
586   if (x == NULL_RTX)
587     return false;
588   code = GET_CODE (x);
589   if (code == SUBREG && REG_P (SUBREG_REG (x)))
590     {
591       lra_assert (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER);
592       alter_subreg (loc, final_p);
593       return true;
594     }
595   fmt = GET_RTX_FORMAT (code);
596   res = false;
597   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
598     {
599       if (fmt[i] == 'e')
600 	{
601 	  if (alter_subregs (&XEXP (x, i), final_p))
602 	    res = true;
603 	}
604       else if (fmt[i] == 'E')
605 	{
606 	  int j;
607 
608 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
609 	    if (alter_subregs (&XVECEXP (x, i, j), final_p))
610 	      res = true;
611 	}
612     }
613   return res;
614 }
615 
616 /* Final change of pseudos got hard registers into the corresponding
617    hard registers and removing temporary clobbers.  */
618 void
lra_final_code_change(void)619 lra_final_code_change (void)
620 {
621   int i, hard_regno;
622   basic_block bb;
623   rtx insn, curr;
624   int max_regno = max_reg_num ();
625 
626   for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
627     if (lra_reg_info[i].nrefs != 0
628 	&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
629       SET_REGNO (regno_reg_rtx[i], hard_regno);
630   FOR_EACH_BB (bb)
631     FOR_BB_INSNS_SAFE (bb, insn, curr)
632       if (INSN_P (insn))
633 	{
634 	  rtx pat = PATTERN (insn);
635 
636 	  if (GET_CODE (pat) == CLOBBER && LRA_TEMP_CLOBBER_P (pat))
637 	    {
638 	      /* Remove clobbers temporarily created in LRA.  We don't
639 		 need them anymore and don't want to waste compiler
640 		 time processing them in a few subsequent passes.  */
641 	      lra_invalidate_insn_data (insn);
642 	      remove_insn (insn);
643 	      continue;
644 	    }
645 
646 	  lra_insn_recog_data_t id = lra_get_insn_recog_data (insn);
647 	  bool insn_change_p = false;
648 
649 	  for (i = id->insn_static_data->n_operands - 1; i >= 0; i--)
650 	    if (alter_subregs (id->operand_loc[i], ! DEBUG_INSN_P (insn)))
651 	      {
652 		lra_update_dup (id, i);
653 		insn_change_p = true;
654 	      }
655 	  if (insn_change_p)
656 	    lra_update_operator_dups (id);
657 	}
658 }
659