1 /* Change pseudos by memory.
2    Copyright (C) 2010-2021 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.	If not see
19 <http://www.gnu.org/licenses/>.	 */
20 
21 
22 /* This file contains code for a pass to change spilled pseudos into
23    memory.
24 
25    The pass creates necessary stack slots and assigns spilled pseudos
26    to the stack slots in following way:
27 
28    for all spilled pseudos P most frequently used first do
29      for all stack slots S do
30        if P doesn't conflict with pseudos assigned to S then
31 	 assign S to P and goto to the next pseudo process
32        end
33      end
34      create new stack slot S and assign P to S
35    end
36 
37    The actual algorithm is bit more complicated because of different
38    pseudo sizes.
39 
40    After that the code changes spilled pseudos (except ones created
41    from scratches) by corresponding stack slot memory in RTL.
42 
43    If at least one stack slot was created, we need to run more passes
44    because we have new addresses which should be checked and because
45    the old address displacements might change and address constraints
46    (or insn memory constraints) might not be satisfied any more.
47 
48    For some targets, the pass can spill some pseudos into hard
49    registers of different class (usually into vector registers)
50    instead of spilling them into memory if it is possible and
51    profitable.  Spilling GENERAL_REGS pseudo into SSE registers for
52    Intel Corei7 is an example of such optimization.  And this is
53    actually recommended by Intel optimization guide.
54 
55    The file also contains code for final change of pseudos on hard
56    regs correspondingly assigned to them.  */
57 
58 #include "config.h"
59 #include "system.h"
60 #include "coretypes.h"
61 #include "backend.h"
62 #include "target.h"
63 #include "rtl.h"
64 #include "df.h"
65 #include "insn-config.h"
66 #include "regs.h"
67 #include "memmodel.h"
68 #include "ira.h"
69 #include "recog.h"
70 #include "output.h"
71 #include "cfgrtl.h"
72 #include "lra.h"
73 #include "lra-int.h"
74 
75 
76 /* Max regno at the start of the pass.	*/
77 static int regs_num;
78 
79 /* Map spilled regno -> hard regno used instead of memory for
80    spilling.  */
81 static rtx *spill_hard_reg;
82 
83 /* The structure describes stack slot of a spilled pseudo.  */
84 struct pseudo_slot
85 {
86   /* Number (0, 1, ...) of the stack slot to which given pseudo
87      belongs.  */
88   int slot_num;
89   /* First or next slot with the same slot number.  */
90   struct pseudo_slot *next, *first;
91   /* Memory representing the spilled pseudo.  */
92   rtx mem;
93 };
94 
95 /* The stack slots for each spilled pseudo.  Indexed by regnos.	 */
96 static struct pseudo_slot *pseudo_slots;
97 
98 /* The structure describes a register or a stack slot which can be
99    used for several spilled pseudos.  */
100 class slot
101 {
102 public:
103   /* First pseudo with given stack slot.  */
104   int regno;
105   /* Hard reg into which the slot pseudos are spilled.	The value is
106      negative for pseudos spilled into memory.	*/
107   int hard_regno;
108   /* Maximum alignment required by all users of the slot.  */
109   unsigned int align;
110   /* Maximum size required by all users of the slot.  */
111   poly_int64 size;
112   /* Memory representing the all stack slot.  It can be different from
113      memory representing a pseudo belonging to give stack slot because
114      pseudo can be placed in a part of the corresponding stack slot.
115      The value is NULL for pseudos spilled into a hard reg.  */
116   rtx mem;
117   /* Combined live ranges of all pseudos belonging to given slot.  It
118      is used to figure out that a new spilled pseudo can use given
119      stack slot.  */
120   lra_live_range_t live_ranges;
121 };
122 
123 /* Array containing info about the stack slots.	 The array element is
124    indexed by the stack slot number in the range [0..slots_num).  */
125 static class slot *slots;
126 /* The number of the stack slots currently existing.  */
127 static int slots_num;
128 
129 /* Set up memory of the spilled pseudo I.  The function can allocate
130    the corresponding stack slot if it is not done yet.	*/
131 static void
assign_mem_slot(int i)132 assign_mem_slot (int i)
133 {
134   rtx x = NULL_RTX;
135   machine_mode mode = GET_MODE (regno_reg_rtx[i]);
136   poly_int64 inherent_size = PSEUDO_REGNO_BYTES (i);
137   machine_mode wider_mode
138     = wider_subreg_mode (mode, lra_reg_info[i].biggest_mode);
139   poly_int64 total_size = GET_MODE_SIZE (wider_mode);
140   poly_int64 adjust = 0;
141 
142   lra_assert (regno_reg_rtx[i] != NULL_RTX && REG_P (regno_reg_rtx[i])
143 	      && lra_reg_info[i].nrefs != 0 && reg_renumber[i] < 0);
144 
145   unsigned int slot_num = pseudo_slots[i].slot_num;
146   x = slots[slot_num].mem;
147   if (!x)
148     {
149       x = assign_stack_local (BLKmode, slots[slot_num].size,
150 			      slots[slot_num].align);
151       slots[slot_num].mem = x;
152     }
153 
154   /* On a big endian machine, the "address" of the slot is the address
155      of the low part that fits its inherent mode.  */
156   adjust += subreg_size_lowpart_offset (inherent_size, total_size);
157   x = adjust_address_nv (x, GET_MODE (regno_reg_rtx[i]), adjust);
158 
159   /* Set all of the memory attributes as appropriate for a spill.  */
160   set_mem_attrs_for_spill (x);
161   pseudo_slots[i].mem = x;
162 }
163 
164 /* Sort pseudos according their usage frequencies.  */
165 static int
regno_freq_compare(const void * v1p,const void * v2p)166 regno_freq_compare (const void *v1p, const void *v2p)
167 {
168   const int regno1 = *(const int *) v1p;
169   const int regno2 = *(const int *) v2p;
170   int diff;
171 
172   if ((diff = lra_reg_info[regno2].freq - lra_reg_info[regno1].freq) != 0)
173     return diff;
174   return regno1 - regno2;
175 }
176 
177 /* Sort pseudos according to their slots, putting the slots in the order
178    that they should be allocated.
179 
180    First prefer to group slots with variable sizes together and slots
181    with constant sizes together, since that usually makes them easier
182    to address from a common anchor point.  E.g. loads of polynomial-sized
183    registers tend to take polynomial offsets while loads of constant-sized
184    registers tend to take constant (non-polynomial) offsets.
185 
186    Next, slots with lower numbers have the highest priority and should
187    get the smallest displacement from the stack or frame pointer
188    (whichever is being used).
189 
190    The first allocated slot is always closest to the frame pointer,
191    so prefer lower slot numbers when frame_pointer_needed.  If the stack
192    and frame grow in the same direction, then the first allocated slot is
193    always closest to the initial stack pointer and furthest away from the
194    final stack pointer, so allocate higher numbers first when using the
195    stack pointer in that case.  The reverse is true if the stack and
196    frame grow in opposite directions.  */
197 static int
pseudo_reg_slot_compare(const void * v1p,const void * v2p)198 pseudo_reg_slot_compare (const void *v1p, const void *v2p)
199 {
200   const int regno1 = *(const int *) v1p;
201   const int regno2 = *(const int *) v2p;
202   int diff, slot_num1, slot_num2;
203 
204   slot_num1 = pseudo_slots[regno1].slot_num;
205   slot_num2 = pseudo_slots[regno2].slot_num;
206   diff = (int (slots[slot_num1].size.is_constant ())
207 	  - int (slots[slot_num2].size.is_constant ()));
208   if (diff != 0)
209     return diff;
210   if ((diff = slot_num1 - slot_num2) != 0)
211     return (frame_pointer_needed
212 	    || (!FRAME_GROWS_DOWNWARD) == STACK_GROWS_DOWNWARD ? diff : -diff);
213   poly_int64 total_size1 = GET_MODE_SIZE (lra_reg_info[regno1].biggest_mode);
214   poly_int64 total_size2 = GET_MODE_SIZE (lra_reg_info[regno2].biggest_mode);
215   if ((diff = compare_sizes_for_sort (total_size2, total_size1)) != 0)
216     return diff;
217   return regno1 - regno2;
218 }
219 
220 /* Assign spill hard registers to N pseudos in PSEUDO_REGNOS which is
221    sorted in order of highest frequency first.  Put the pseudos which
222    did not get a spill hard register at the beginning of array
223    PSEUDO_REGNOS.  Return the number of such pseudos.  */
224 static int
assign_spill_hard_regs(int * pseudo_regnos,int n)225 assign_spill_hard_regs (int *pseudo_regnos, int n)
226 {
227   int i, k, p, regno, res, spill_class_size, hard_regno, nr;
228   enum reg_class rclass, spill_class;
229   machine_mode mode;
230   lra_live_range_t r;
231   rtx_insn *insn;
232   rtx set;
233   basic_block bb;
234   HARD_REG_SET conflict_hard_regs;
235   bitmap setjump_crosses = regstat_get_setjmp_crosses ();
236   /* Hard registers which cannot be used for any purpose at given
237      program point because they are unallocatable or already allocated
238      for other pseudos.	 */
239   HARD_REG_SET *reserved_hard_regs;
240 
241   if (! lra_reg_spill_p)
242     return n;
243   /* Set up reserved hard regs for every program point.	 */
244   reserved_hard_regs = XNEWVEC (HARD_REG_SET, lra_live_max_point);
245   for (p = 0; p < lra_live_max_point; p++)
246     reserved_hard_regs[p] = lra_no_alloc_regs;
247   for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
248     if (lra_reg_info[i].nrefs != 0
249 	&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
250       for (r = lra_reg_info[i].live_ranges; r != NULL; r = r->next)
251 	for (p = r->start; p <= r->finish; p++)
252 	  add_to_hard_reg_set (&reserved_hard_regs[p],
253 			       lra_reg_info[i].biggest_mode, hard_regno);
254   auto_bitmap ok_insn_bitmap (&reg_obstack);
255   FOR_EACH_BB_FN (bb, cfun)
256     FOR_BB_INSNS (bb, insn)
257       if (DEBUG_INSN_P (insn)
258 	  || ((set = single_set (insn)) != NULL_RTX
259 	      && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))))
260 	bitmap_set_bit (ok_insn_bitmap, INSN_UID (insn));
261   for (res = i = 0; i < n; i++)
262     {
263       regno = pseudo_regnos[i];
264       rclass = lra_get_allocno_class (regno);
265       if (bitmap_bit_p (setjump_crosses, regno)
266 	  || (spill_class
267 	      = ((enum reg_class)
268 		 targetm.spill_class ((reg_class_t) rclass,
269 				      PSEUDO_REGNO_MODE (regno)))) == NO_REGS
270 	  || bitmap_intersect_compl_p (&lra_reg_info[regno].insn_bitmap,
271 				       ok_insn_bitmap))
272 	{
273 	  pseudo_regnos[res++] = regno;
274 	  continue;
275 	}
276       lra_assert (spill_class != NO_REGS);
277       conflict_hard_regs = lra_reg_info[regno].conflict_hard_regs;
278       for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
279 	for (p = r->start; p <= r->finish; p++)
280 	  conflict_hard_regs |= reserved_hard_regs[p];
281       spill_class_size = ira_class_hard_regs_num[spill_class];
282       mode = lra_reg_info[regno].biggest_mode;
283       for (k = 0; k < spill_class_size; k++)
284 	{
285 	  hard_regno = ira_class_hard_regs[spill_class][k];
286 	  if (TEST_HARD_REG_BIT (eliminable_regset, hard_regno)
287 	      || !targetm.hard_regno_mode_ok (hard_regno, mode))
288 	    continue;
289 	  if (! overlaps_hard_reg_set_p (conflict_hard_regs, mode, hard_regno))
290 	    break;
291 	}
292       if (k >= spill_class_size)
293 	{
294 	   /* There is no available regs -- assign memory later.  */
295 	  pseudo_regnos[res++] = regno;
296 	  continue;
297 	}
298       if (lra_dump_file != NULL)
299 	fprintf (lra_dump_file, "  Spill r%d into hr%d\n", regno, hard_regno);
300       add_to_hard_reg_set (&hard_regs_spilled_into,
301 			   lra_reg_info[regno].biggest_mode, hard_regno);
302       /* Update reserved_hard_regs.  */
303       for (r = lra_reg_info[regno].live_ranges; r != NULL; r = r->next)
304 	for (p = r->start; p <= r->finish; p++)
305 	  add_to_hard_reg_set (&reserved_hard_regs[p],
306 			       lra_reg_info[regno].biggest_mode, hard_regno);
307       spill_hard_reg[regno]
308 	= gen_raw_REG (PSEUDO_REGNO_MODE (regno), hard_regno);
309       for (nr = 0;
310 	   nr < hard_regno_nregs (hard_regno,
311 				  lra_reg_info[regno].biggest_mode);
312 	   nr++)
313 	/* Just loop.  */
314 	df_set_regs_ever_live (hard_regno + nr, true);
315     }
316   free (reserved_hard_regs);
317   return res;
318 }
319 
320 /* Add pseudo REGNO to slot SLOT_NUM.  */
321 static void
add_pseudo_to_slot(int regno,int slot_num)322 add_pseudo_to_slot (int regno, int slot_num)
323 {
324   struct pseudo_slot *first;
325 
326   /* Each pseudo has an inherent size which comes from its own mode,
327      and a total size which provides room for paradoxical subregs.
328      We need to make sure the size and alignment of the slot are
329      sufficient for both.  */
330   machine_mode mode = wider_subreg_mode (PSEUDO_REGNO_MODE (regno),
331 					 lra_reg_info[regno].biggest_mode);
332   unsigned int align = spill_slot_alignment (mode);
333   slots[slot_num].align = MAX (slots[slot_num].align, align);
334   slots[slot_num].size = upper_bound (slots[slot_num].size,
335 				      GET_MODE_SIZE (mode));
336 
337   if (slots[slot_num].regno < 0)
338     {
339       /* It is the first pseudo in the slot.  */
340       slots[slot_num].regno = regno;
341       pseudo_slots[regno].first = &pseudo_slots[regno];
342       pseudo_slots[regno].next = NULL;
343     }
344   else
345     {
346       first = pseudo_slots[regno].first = &pseudo_slots[slots[slot_num].regno];
347       pseudo_slots[regno].next = first->next;
348       first->next = &pseudo_slots[regno];
349     }
350   pseudo_slots[regno].mem = NULL_RTX;
351   pseudo_slots[regno].slot_num = slot_num;
352   slots[slot_num].live_ranges
353     = lra_merge_live_ranges (slots[slot_num].live_ranges,
354 			     lra_copy_live_range_list
355 			     (lra_reg_info[regno].live_ranges));
356 }
357 
358 /* Assign stack slot numbers to pseudos in array PSEUDO_REGNOS of
359    length N.  Sort pseudos in PSEUDO_REGNOS for subsequent assigning
360    memory stack slots.	*/
361 static void
assign_stack_slot_num_and_sort_pseudos(int * pseudo_regnos,int n)362 assign_stack_slot_num_and_sort_pseudos (int *pseudo_regnos, int n)
363 {
364   int i, j, regno;
365 
366   slots_num = 0;
367   /* Assign stack slot numbers to spilled pseudos, use smaller numbers
368      for most frequently used pseudos.	*/
369   for (i = 0; i < n; i++)
370     {
371       regno = pseudo_regnos[i];
372       if (! flag_ira_share_spill_slots)
373 	j = slots_num;
374       else
375 	{
376 	  machine_mode mode
377 	    = wider_subreg_mode (PSEUDO_REGNO_MODE (regno),
378 				 lra_reg_info[regno].biggest_mode);
379 	  for (j = 0; j < slots_num; j++)
380 	    if (slots[j].hard_regno < 0
381 		/* Although it's possible to share slots between modes
382 		   with constant and non-constant widths, we usually
383 		   get better spill code by keeping the constant and
384 		   non-constant areas separate.  */
385 		&& (GET_MODE_SIZE (mode).is_constant ()
386 		    == slots[j].size.is_constant ())
387 		&& ! (lra_intersected_live_ranges_p
388 		      (slots[j].live_ranges,
389 		       lra_reg_info[regno].live_ranges)))
390 	      break;
391 	}
392       if (j >= slots_num)
393 	{
394 	  /* New slot.	*/
395 	  slots[j].live_ranges = NULL;
396 	  slots[j].size = 0;
397 	  slots[j].align = BITS_PER_UNIT;
398 	  slots[j].regno = slots[j].hard_regno = -1;
399 	  slots[j].mem = NULL_RTX;
400 	  slots_num++;
401 	}
402       add_pseudo_to_slot (regno, j);
403     }
404   /* Sort regnos according to their slot numbers.  */
405   qsort (pseudo_regnos, n, sizeof (int), pseudo_reg_slot_compare);
406 }
407 
408 /* Recursively process LOC in INSN and change spilled pseudos to the
409    corresponding memory or spilled hard reg.  Ignore spilled pseudos
410    created from the scratches.  Return true if the pseudo nrefs equal
411    to 0 (don't change the pseudo in this case).  Otherwise return false.  */
412 static bool
remove_pseudos(rtx * loc,rtx_insn * insn)413 remove_pseudos (rtx *loc, rtx_insn *insn)
414 {
415   int i;
416   rtx hard_reg;
417   const char *fmt;
418   enum rtx_code code;
419   bool res = false;
420 
421   if (*loc == NULL_RTX)
422     return res;
423   code = GET_CODE (*loc);
424   if (code == SUBREG && REG_P (SUBREG_REG (*loc)))
425     {
426       /* Try to remove memory subregs to simplify LRA job
427          and avoid LRA cycling in case of subreg memory reload.  */
428       res = remove_pseudos (&SUBREG_REG (*loc), insn);
429       if (GET_CODE (SUBREG_REG (*loc)) == MEM)
430 	{
431 	  alter_subreg (loc, false);
432 	  if (GET_CODE (*loc) == MEM)
433 	    {
434 	      lra_update_insn_recog_data (insn);
435 	      if (lra_dump_file != NULL)
436 		fprintf (lra_dump_file,
437 			 "Memory subreg was simplified in insn #%u\n",
438 			 INSN_UID (insn));
439 	    }
440 	}
441       return res;
442     }
443   else if (code == REG && (i = REGNO (*loc)) >= FIRST_PSEUDO_REGISTER
444       && lra_get_regno_hard_regno (i) < 0
445       /* We do not want to assign memory for former scratches because
446 	 it might result in an address reload for some targets.	 In
447 	 any case we transform such pseudos not getting hard registers
448 	 into scratches back.  */
449       && ! ira_former_scratch_p (i))
450     {
451       if (lra_reg_info[i].nrefs == 0
452 	  && pseudo_slots[i].mem == NULL && spill_hard_reg[i] == NULL)
453 	return true;
454       if ((hard_reg = spill_hard_reg[i]) != NULL_RTX)
455 	*loc = copy_rtx (hard_reg);
456       else
457 	{
458 	  rtx x = lra_eliminate_regs_1 (insn, pseudo_slots[i].mem,
459 					GET_MODE (pseudo_slots[i].mem),
460 					false, false, 0, true);
461 	  *loc = x != pseudo_slots[i].mem ? x : copy_rtx (x);
462 	}
463       return res;
464     }
465 
466   fmt = GET_RTX_FORMAT (code);
467   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
468     {
469       if (fmt[i] == 'e')
470 	res = remove_pseudos (&XEXP (*loc, i), insn) || res;
471       else if (fmt[i] == 'E')
472 	{
473 	  int j;
474 
475 	  for (j = XVECLEN (*loc, i) - 1; j >= 0; j--)
476 	    res = remove_pseudos (&XVECEXP (*loc, i, j), insn) || res;
477 	}
478     }
479   return res;
480 }
481 
482 /* Convert spilled pseudos into their stack slots or spill hard regs,
483    put insns to process on the constraint stack (that is all insns in
484    which pseudos were changed to memory or spill hard regs).   */
485 static void
spill_pseudos(void)486 spill_pseudos (void)
487 {
488   basic_block bb;
489   rtx_insn *insn, *curr;
490   int i;
491 
492   auto_bitmap spilled_pseudos (&reg_obstack);
493   auto_bitmap changed_insns (&reg_obstack);
494   for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
495     {
496       if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
497 	  && ! ira_former_scratch_p (i))
498 	{
499 	  bitmap_set_bit (spilled_pseudos, i);
500 	  bitmap_ior_into (changed_insns, &lra_reg_info[i].insn_bitmap);
501 	}
502     }
503   FOR_EACH_BB_FN (bb, cfun)
504     {
505       FOR_BB_INSNS_SAFE (bb, insn, curr)
506 	{
507 	  bool removed_pseudo_p = false;
508 
509 	  if (bitmap_bit_p (changed_insns, INSN_UID (insn)))
510 	    {
511 	      rtx *link_loc, link;
512 
513 	      removed_pseudo_p = remove_pseudos (&PATTERN (insn), insn);
514 	      if (CALL_P (insn)
515 		  && remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn))
516 		removed_pseudo_p = true;
517 	      for (link_loc = &REG_NOTES (insn);
518 		   (link = *link_loc) != NULL_RTX;
519 		   link_loc = &XEXP (link, 1))
520 		{
521 		  switch (REG_NOTE_KIND (link))
522 		    {
523 		    case REG_FRAME_RELATED_EXPR:
524 		    case REG_CFA_DEF_CFA:
525 		    case REG_CFA_ADJUST_CFA:
526 		    case REG_CFA_OFFSET:
527 		    case REG_CFA_REGISTER:
528 		    case REG_CFA_EXPRESSION:
529 		    case REG_CFA_RESTORE:
530 		    case REG_CFA_SET_VDRAP:
531 		      if (remove_pseudos (&XEXP (link, 0), insn))
532 			removed_pseudo_p = true;
533 		      break;
534 		    default:
535 		      break;
536 		    }
537 		}
538 	      if (lra_dump_file != NULL)
539 		fprintf (lra_dump_file,
540 			 "Changing spilled pseudos to memory in insn #%u\n",
541 			 INSN_UID (insn));
542 	      lra_push_insn (insn);
543 	      if (lra_reg_spill_p || targetm.different_addr_displacement_p ())
544 		lra_set_used_insn_alternative (insn, LRA_UNKNOWN_ALT);
545 	    }
546 	  else if (CALL_P (insn)
547 		   /* Presence of any pseudo in CALL_INSN_FUNCTION_USAGE
548 		      does not affect value of insn_bitmap of the
549 		      corresponding lra_reg_info.  That is because we
550 		      don't need to reload pseudos in
551 		      CALL_INSN_FUNCTION_USAGEs.  So if we process only
552 		      insns in the insn_bitmap of given pseudo here, we
553 		      can miss the pseudo in some
554 		      CALL_INSN_FUNCTION_USAGEs.  */
555 		   && remove_pseudos (&CALL_INSN_FUNCTION_USAGE (insn), insn))
556 	    removed_pseudo_p = true;
557 	  if (removed_pseudo_p)
558 	    {
559 	      lra_assert (DEBUG_INSN_P (insn));
560 	      lra_invalidate_insn_data (insn);
561 	      INSN_VAR_LOCATION_LOC (insn) = gen_rtx_UNKNOWN_VAR_LOC ();
562 	      if (lra_dump_file != NULL)
563 		fprintf (lra_dump_file,
564 			 "Debug insn #%u is reset because it referenced "
565 			 "removed pseudo\n", INSN_UID (insn));
566 	    }
567 	  bitmap_and_compl_into (df_get_live_in (bb), spilled_pseudos);
568 	  bitmap_and_compl_into (df_get_live_out (bb), spilled_pseudos);
569 	}
570     }
571 }
572 
573 /* Return true if we need scratch reg assignments.  */
574 bool
lra_need_for_scratch_reg_p(void)575 lra_need_for_scratch_reg_p (void)
576 {
577   int i; max_regno = max_reg_num ();
578 
579   for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
580     if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
581 	&& ira_former_scratch_p (i))
582       return true;
583   return false;
584 }
585 
586 /* Return true if we need to change some pseudos into memory.  */
587 bool
lra_need_for_spills_p(void)588 lra_need_for_spills_p (void)
589 {
590   int i; max_regno = max_reg_num ();
591 
592   for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
593     if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
594 	&& ! ira_former_scratch_p (i))
595       return true;
596   return false;
597 }
598 
599 /* Change spilled pseudos into memory or spill hard regs.  Put changed
600    insns on the constraint stack (these insns will be considered on
601    the next constraint pass).  The changed insns are all insns in
602    which pseudos were changed.  */
603 void
lra_spill(void)604 lra_spill (void)
605 {
606   int i, n, curr_regno;
607   int *pseudo_regnos;
608 
609   regs_num = max_reg_num ();
610   spill_hard_reg = XNEWVEC (rtx, regs_num);
611   pseudo_regnos = XNEWVEC (int, regs_num);
612   for (n = 0, i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
613     if (lra_reg_info[i].nrefs != 0 && lra_get_regno_hard_regno (i) < 0
614 	/* We do not want to assign memory for former scratches.  */
615 	&& ! ira_former_scratch_p (i))
616       pseudo_regnos[n++] = i;
617   lra_assert (n > 0);
618   pseudo_slots = XNEWVEC (struct pseudo_slot, regs_num);
619   for (i = FIRST_PSEUDO_REGISTER; i < regs_num; i++)
620     {
621       spill_hard_reg[i] = NULL_RTX;
622       pseudo_slots[i].mem = NULL_RTX;
623     }
624   slots = XNEWVEC (class slot, regs_num);
625   /* Sort regnos according their usage frequencies.  */
626   qsort (pseudo_regnos, n, sizeof (int), regno_freq_compare);
627   n = assign_spill_hard_regs (pseudo_regnos, n);
628   assign_stack_slot_num_and_sort_pseudos (pseudo_regnos, n);
629   for (i = 0; i < n; i++)
630     if (pseudo_slots[pseudo_regnos[i]].mem == NULL_RTX)
631       assign_mem_slot (pseudo_regnos[i]);
632   if (n > 0 && crtl->stack_alignment_needed)
633     /* If we have a stack frame, we must align it now.  The stack size
634        may be a part of the offset computation for register
635        elimination.  */
636     assign_stack_local (BLKmode, 0, crtl->stack_alignment_needed);
637   if (lra_dump_file != NULL)
638     {
639       for (i = 0; i < slots_num; i++)
640 	{
641 	  fprintf (lra_dump_file, "  Slot %d regnos (width = ", i);
642 	  print_dec (GET_MODE_SIZE (GET_MODE (slots[i].mem)),
643 		     lra_dump_file, SIGNED);
644 	  fprintf (lra_dump_file, "):");
645 	  for (curr_regno = slots[i].regno;;
646 	       curr_regno = pseudo_slots[curr_regno].next - pseudo_slots)
647 	    {
648 	      fprintf (lra_dump_file, "	 %d", curr_regno);
649 	      if (pseudo_slots[curr_regno].next == NULL)
650 		break;
651 	    }
652 	  fprintf (lra_dump_file, "\n");
653 	}
654     }
655   spill_pseudos ();
656   free (slots);
657   free (pseudo_slots);
658   free (pseudo_regnos);
659   free (spill_hard_reg);
660 }
661 
662 /* Apply alter_subreg for subregs of regs in *LOC.  Use FINAL_P for
663    alter_subreg calls. Return true if any subreg of reg is
664    processed.  */
665 static bool
alter_subregs(rtx * loc,bool final_p)666 alter_subregs (rtx *loc, bool final_p)
667 {
668   int i;
669   rtx x = *loc;
670   bool res;
671   const char *fmt;
672   enum rtx_code code;
673 
674   if (x == NULL_RTX)
675     return false;
676   code = GET_CODE (x);
677   if (code == SUBREG && REG_P (SUBREG_REG (x)))
678     {
679       lra_assert (REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER);
680       alter_subreg (loc, final_p);
681       return true;
682     }
683   fmt = GET_RTX_FORMAT (code);
684   res = false;
685   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
686     {
687       if (fmt[i] == 'e')
688 	{
689 	  if (alter_subregs (&XEXP (x, i), final_p))
690 	    res = true;
691 	}
692       else if (fmt[i] == 'E')
693 	{
694 	  int j;
695 
696 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
697 	    if (alter_subregs (&XVECEXP (x, i, j), final_p))
698 	      res = true;
699 	}
700     }
701   return res;
702 }
703 
704 /* Return true if REGNO is used for return in the current
705    function.  */
706 static bool
return_regno_p(unsigned int regno)707 return_regno_p (unsigned int regno)
708 {
709   rtx outgoing = crtl->return_rtx;
710 
711   if (! outgoing)
712     return false;
713 
714   if (REG_P (outgoing))
715     return REGNO (outgoing) == regno;
716   else if (GET_CODE (outgoing) == PARALLEL)
717     {
718       int i;
719 
720       for (i = 0; i < XVECLEN (outgoing, 0); i++)
721 	{
722 	  rtx x = XEXP (XVECEXP (outgoing, 0, i), 0);
723 
724 	  if (REG_P (x) && REGNO (x) == regno)
725 	    return true;
726 	}
727     }
728   return false;
729 }
730 
731 /* Return true if REGNO is in one of subsequent USE after INSN in the
732    same BB.  */
733 static bool
regno_in_use_p(rtx_insn * insn,unsigned int regno)734 regno_in_use_p (rtx_insn *insn, unsigned int regno)
735 {
736   static lra_insn_recog_data_t id;
737   static struct lra_static_insn_data *static_id;
738   struct lra_insn_reg *reg;
739   int i, arg_regno;
740   basic_block bb = BLOCK_FOR_INSN (insn);
741 
742   while ((insn = next_nondebug_insn (insn)) != NULL_RTX)
743     {
744       if (BARRIER_P (insn) || bb != BLOCK_FOR_INSN (insn))
745 	return false;
746       if (! INSN_P (insn))
747 	continue;
748       if (GET_CODE (PATTERN (insn)) == USE
749 	  && REG_P (XEXP (PATTERN (insn), 0))
750 	  && regno == REGNO (XEXP (PATTERN (insn), 0)))
751 	return true;
752       /* Check that the regno is not modified.  */
753       id = lra_get_insn_recog_data (insn);
754       for (reg = id->regs; reg != NULL; reg = reg->next)
755 	if (reg->type != OP_IN && reg->regno == (int) regno)
756 	  return false;
757       static_id = id->insn_static_data;
758       for (reg = static_id->hard_regs; reg != NULL; reg = reg->next)
759 	if (reg->type != OP_IN && reg->regno == (int) regno)
760 	  return false;
761       if (id->arg_hard_regs != NULL)
762 	for (i = 0; (arg_regno = id->arg_hard_regs[i]) >= 0; i++)
763 	  if ((int) regno == (arg_regno >= FIRST_PSEUDO_REGISTER
764 			      ? arg_regno : arg_regno - FIRST_PSEUDO_REGISTER))
765 	    return false;
766     }
767   return false;
768 }
769 
770 /* Final change of pseudos got hard registers into the corresponding
771    hard registers and removing temporary clobbers.  */
772 void
lra_final_code_change(void)773 lra_final_code_change (void)
774 {
775   int i, hard_regno;
776   basic_block bb;
777   rtx_insn *insn, *curr;
778   rtx set;
779   int max_regno = max_reg_num ();
780 
781   for (i = FIRST_PSEUDO_REGISTER; i < max_regno; i++)
782     if (lra_reg_info[i].nrefs != 0
783 	&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
784       SET_REGNO (regno_reg_rtx[i], hard_regno);
785   FOR_EACH_BB_FN (bb, cfun)
786     FOR_BB_INSNS_SAFE (bb, insn, curr)
787       if (INSN_P (insn))
788 	{
789 	  rtx pat = PATTERN (insn);
790 
791 	  if (GET_CODE (pat) == USE && XEXP (pat, 0) == const1_rtx)
792 	    {
793 	      /* Remove markers to eliminate critical edges for jump insn
794 		 output reloads (see code in ira.c::ira).  */
795 	      lra_invalidate_insn_data (insn);
796 	      delete_insn (insn);
797 	      continue;
798 	    }
799 	  if (GET_CODE (pat) == CLOBBER && LRA_TEMP_CLOBBER_P (pat))
800 	    {
801 	      /* Remove clobbers temporarily created in LRA.  We don't
802 		 need them anymore and don't want to waste compiler
803 		 time processing them in a few subsequent passes.  */
804 	      lra_invalidate_insn_data (insn);
805 	      delete_insn (insn);
806 	      continue;
807 	    }
808 
809 	  /* IRA can generate move insns involving pseudos.  It is
810 	     better remove them earlier to speed up compiler a bit.
811 	     It is also better to do it here as they might not pass
812 	     final RTL check in LRA, (e.g. insn moving a control
813 	     register into itself).  So remove an useless move insn
814 	     unless next insn is USE marking the return reg (we should
815 	     save this as some subsequent optimizations assume that
816 	     such original insns are saved).  */
817 	  if (NONJUMP_INSN_P (insn) && GET_CODE (pat) == SET
818 	      && REG_P (SET_SRC (pat)) && REG_P (SET_DEST (pat))
819 	      && REGNO (SET_SRC (pat)) == REGNO (SET_DEST (pat))
820 	      && (! return_regno_p (REGNO (SET_SRC (pat)))
821 		  || ! regno_in_use_p (insn, REGNO (SET_SRC (pat)))))
822 	    {
823 	      lra_invalidate_insn_data (insn);
824 	      delete_insn (insn);
825 	      continue;
826 	    }
827 
828 	  lra_insn_recog_data_t id = lra_get_insn_recog_data (insn);
829 	  struct lra_insn_reg *reg;
830 
831 	  for (reg = id->regs; reg != NULL; reg = reg->next)
832 	    if (reg->regno >= FIRST_PSEUDO_REGISTER
833 		&& lra_reg_info [reg->regno].nrefs == 0)
834 	      break;
835 
836 	  if (reg != NULL)
837 	    {
838 	      /* Pseudos still can be in debug insns in some very rare
839 		 and complicated cases, e.g. the pseudo was removed by
840 		 inheritance and the debug insn is not EBBs where the
841 		 inheritance happened.  It is difficult and time
842 		 consuming to find what hard register corresponds the
843 		 pseudo -- so just remove the debug insn.  Another
844 		 solution could be assigning hard reg/memory but it
845 		 would be a misleading info.  It is better not to have
846 		 info than have it wrong.  */
847 	      lra_assert (DEBUG_INSN_P (insn));
848 	      lra_invalidate_insn_data (insn);
849 	      delete_insn (insn);
850 	      continue;
851 	    }
852 
853 	  struct lra_static_insn_data *static_id = id->insn_static_data;
854 	  bool insn_change_p = false;
855 
856 	  for (i = id->insn_static_data->n_operands - 1; i >= 0; i--)
857 	    if ((DEBUG_INSN_P (insn) || ! static_id->operand[i].is_operator)
858 		&& alter_subregs (id->operand_loc[i], ! DEBUG_INSN_P (insn)))
859 	      {
860 		lra_update_dup (id, i);
861 		insn_change_p = true;
862 	      }
863 	  if (insn_change_p)
864 	    lra_update_operator_dups (id);
865 
866 	  if ((set = single_set (insn)) != NULL
867 	      && REG_P (SET_SRC (set)) && REG_P (SET_DEST (set))
868 	      && REGNO (SET_SRC (set)) == REGNO (SET_DEST (set)))
869 	    {
870 	      /* Remove an useless move insn.  IRA can generate move
871 		 insns involving pseudos.  It is better remove them
872 		 earlier to speed up compiler a bit.  It is also
873 		 better to do it here as they might not pass final RTL
874 		 check in LRA, (e.g. insn moving a control register
875 		 into itself).  */
876 	      lra_invalidate_insn_data (insn);
877 	      delete_insn (insn);
878 	    }
879 	}
880 }
881