1 /* IRA processing allocno lives to build allocno live ranges.
2    Copyright (C) 2006-2019 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "predict.h"
28 #include "df.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "sparseset.h"
36 
37 /* The code in this file is similar to one in global but the code
38    works on the allocno basis and creates live ranges instead of
39    pseudo-register conflicts.  */
40 
41 /* Program points are enumerated by numbers from range
42    0..IRA_MAX_POINT-1.  There are approximately two times more program
43    points than insns.  Program points are places in the program where
44    liveness info can be changed.  In most general case (there are more
45    complicated cases too) some program points correspond to places
46    where input operand dies and other ones correspond to places where
47    output operands are born.  */
48 int ira_max_point;
49 
50 /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno
51    live ranges with given start/finish point.  */
52 live_range_t *ira_start_point_ranges, *ira_finish_point_ranges;
53 
54 /* Number of the current program point.  */
55 static int curr_point;
56 
57 /* Point where register pressure excess started or -1 if there is no
58    register pressure excess.  Excess pressure for a register class at
59    some point means that there are more allocnos of given register
60    class living at the point than number of hard-registers of the
61    class available for the allocation.  It is defined only for
62    pressure classes.  */
63 static int high_pressure_start_point[N_REG_CLASSES];
64 
65 /* Objects live at current point in the scan.  */
66 static sparseset objects_live;
67 
68 /* A temporary bitmap used in functions that wish to avoid visiting an allocno
69    multiple times.  */
70 static sparseset allocnos_processed;
71 
72 /* Set of hard regs (except eliminable ones) currently live.  */
73 static HARD_REG_SET hard_regs_live;
74 
75 /* The loop tree node corresponding to the current basic block.  */
76 static ira_loop_tree_node_t curr_bb_node;
77 
78 /* The number of the last processed call.  */
79 static int last_call_num;
80 /* The number of last call at which given allocno was saved.  */
81 static int *allocno_saved_at_call;
82 
83 /* The value of get_preferred_alternatives for the current instruction,
84    supplemental to recog_data.  */
85 static alternative_mask preferred_alternatives;
86 
87 /* If non-NULL, the source operand of a register to register copy for which
88    we should not add a conflict with the copy's destination operand.  */
89 static rtx ignore_reg_for_conflicts;
90 
91 /* Record hard register REGNO as now being live.  */
92 static void
make_hard_regno_live(int regno)93 make_hard_regno_live (int regno)
94 {
95   SET_HARD_REG_BIT (hard_regs_live, regno);
96 }
97 
98 /* Process the definition of hard register REGNO.  This updates
99    hard_regs_live and hard reg conflict information for living allocnos.  */
100 static void
make_hard_regno_dead(int regno)101 make_hard_regno_dead (int regno)
102 {
103   unsigned int i;
104   EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
105     {
106       ira_object_t obj = ira_object_id_map[i];
107 
108       if (ignore_reg_for_conflicts != NULL_RTX
109 	  && REGNO (ignore_reg_for_conflicts)
110 	     == (unsigned int) ALLOCNO_REGNO (OBJECT_ALLOCNO (obj)))
111 	continue;
112 
113       SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
114       SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
115     }
116   CLEAR_HARD_REG_BIT (hard_regs_live, regno);
117 }
118 
119 /* Record object OBJ as now being live.  Set a bit for it in objects_live,
120    and start a new live range for it if necessary.  */
121 static void
make_object_live(ira_object_t obj)122 make_object_live (ira_object_t obj)
123 {
124   sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj));
125 
126   live_range_t lr = OBJECT_LIVE_RANGES (obj);
127   if (lr == NULL
128       || (lr->finish != curr_point && lr->finish + 1 != curr_point))
129     ira_add_live_range_to_object (obj, curr_point, -1);
130 }
131 
132 /* Update ALLOCNO_EXCESS_PRESSURE_POINTS_NUM for the allocno
133    associated with object OBJ.  */
134 static void
update_allocno_pressure_excess_length(ira_object_t obj)135 update_allocno_pressure_excess_length (ira_object_t obj)
136 {
137   ira_allocno_t a = OBJECT_ALLOCNO (obj);
138   int start, i;
139   enum reg_class aclass, pclass, cl;
140   live_range_t p;
141 
142   aclass = ALLOCNO_CLASS (a);
143   pclass = ira_pressure_class_translate[aclass];
144   for (i = 0;
145        (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
146        i++)
147     {
148       if (! ira_reg_pressure_class_p[cl])
149 	continue;
150       if (high_pressure_start_point[cl] < 0)
151 	continue;
152       p = OBJECT_LIVE_RANGES (obj);
153       ira_assert (p != NULL);
154       start = (high_pressure_start_point[cl] > p->start
155 	       ? high_pressure_start_point[cl] : p->start);
156       ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a) += curr_point - start + 1;
157     }
158 }
159 
160 /* Process the definition of object OBJ, which is associated with allocno A.
161    This finishes the current live range for it.  */
162 static void
make_object_dead(ira_object_t obj)163 make_object_dead (ira_object_t obj)
164 {
165   live_range_t lr;
166   int regno;
167   int ignore_regno = -1;
168   int ignore_total_regno = -1;
169   int end_regno = -1;
170 
171   sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (obj));
172 
173   /* Check whether any part of IGNORE_REG_FOR_CONFLICTS already conflicts
174      with OBJ.  */
175   if (ignore_reg_for_conflicts != NULL_RTX
176       && REGNO (ignore_reg_for_conflicts) < FIRST_PSEUDO_REGISTER)
177     {
178       end_regno = END_REGNO (ignore_reg_for_conflicts);
179       ignore_regno = ignore_total_regno = REGNO (ignore_reg_for_conflicts);
180 
181       for (regno = ignore_regno; regno < end_regno; regno++)
182 	{
183 	  if (TEST_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno))
184 	    ignore_regno = end_regno;
185 	  if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno))
186 	    ignore_total_regno = end_regno;
187 	}
188     }
189 
190   IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj), hard_regs_live);
191   IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), hard_regs_live);
192 
193   /* If IGNORE_REG_FOR_CONFLICTS did not already conflict with OBJ, make
194      sure it still doesn't.  */
195   for (regno = ignore_regno; regno < end_regno; regno++)
196     CLEAR_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
197   for (regno = ignore_total_regno; regno < end_regno; regno++)
198     CLEAR_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj), regno);
199 
200   lr = OBJECT_LIVE_RANGES (obj);
201   ira_assert (lr != NULL);
202   lr->finish = curr_point;
203   update_allocno_pressure_excess_length (obj);
204 }
205 
206 /* The current register pressures for each pressure class for the current
207    basic block.  */
208 static int curr_reg_pressure[N_REG_CLASSES];
209 
210 /* Record that register pressure for PCLASS increased by N registers.
211    Update the current register pressure, maximal register pressure for
212    the current BB and the start point of the register pressure
213    excess.  */
214 static void
inc_register_pressure(enum reg_class pclass,int n)215 inc_register_pressure (enum reg_class pclass, int n)
216 {
217   int i;
218   enum reg_class cl;
219 
220   for (i = 0;
221        (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
222        i++)
223     {
224       if (! ira_reg_pressure_class_p[cl])
225 	continue;
226       curr_reg_pressure[cl] += n;
227       if (high_pressure_start_point[cl] < 0
228 	  && (curr_reg_pressure[cl] > ira_class_hard_regs_num[cl]))
229 	high_pressure_start_point[cl] = curr_point;
230       if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
231 	curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
232     }
233 }
234 
235 /* Record that register pressure for PCLASS has decreased by NREGS
236    registers; update current register pressure, start point of the
237    register pressure excess, and register pressure excess length for
238    living allocnos.  */
239 
240 static void
dec_register_pressure(enum reg_class pclass,int nregs)241 dec_register_pressure (enum reg_class pclass, int nregs)
242 {
243   int i;
244   unsigned int j;
245   enum reg_class cl;
246   bool set_p = false;
247 
248   for (i = 0;
249        (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
250        i++)
251     {
252       if (! ira_reg_pressure_class_p[cl])
253 	continue;
254       curr_reg_pressure[cl] -= nregs;
255       ira_assert (curr_reg_pressure[cl] >= 0);
256       if (high_pressure_start_point[cl] >= 0
257 	  && curr_reg_pressure[cl] <= ira_class_hard_regs_num[cl])
258 	set_p = true;
259     }
260   if (set_p)
261     {
262       EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)
263 	update_allocno_pressure_excess_length (ira_object_id_map[j]);
264       for (i = 0;
265 	   (cl = ira_reg_class_super_classes[pclass][i]) != LIM_REG_CLASSES;
266 	   i++)
267 	{
268 	  if (! ira_reg_pressure_class_p[cl])
269 	    continue;
270 	  if (high_pressure_start_point[cl] >= 0
271 	      && curr_reg_pressure[cl] <= ira_class_hard_regs_num[cl])
272 	    high_pressure_start_point[cl] = -1;
273 	}
274     }
275 }
276 
277 /* Determine from the objects_live bitmap whether REGNO is currently live,
278    and occupies only one object.  Return false if we have no information.  */
279 static bool
pseudo_regno_single_word_and_live_p(int regno)280 pseudo_regno_single_word_and_live_p (int regno)
281 {
282   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
283   ira_object_t obj;
284 
285   if (a == NULL)
286     return false;
287   if (ALLOCNO_NUM_OBJECTS (a) > 1)
288     return false;
289 
290   obj = ALLOCNO_OBJECT (a, 0);
291 
292   return sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj));
293 }
294 
295 /* Mark the pseudo register REGNO as live.  Update all information about
296    live ranges and register pressure.  */
297 static void
mark_pseudo_regno_live(int regno)298 mark_pseudo_regno_live (int regno)
299 {
300   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
301   enum reg_class pclass;
302   int i, n, nregs;
303 
304   if (a == NULL)
305     return;
306 
307   /* Invalidate because it is referenced.  */
308   allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
309 
310   n = ALLOCNO_NUM_OBJECTS (a);
311   pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
312   nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
313   if (n > 1)
314     {
315       /* We track every subobject separately.  */
316       gcc_assert (nregs == n);
317       nregs = 1;
318     }
319 
320   for (i = 0; i < n; i++)
321     {
322       ira_object_t obj = ALLOCNO_OBJECT (a, i);
323 
324       if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
325 	continue;
326 
327       inc_register_pressure (pclass, nregs);
328       make_object_live (obj);
329     }
330 }
331 
332 /* Like mark_pseudo_regno_live, but try to only mark one subword of
333    the pseudo as live.  SUBWORD indicates which; a value of 0
334    indicates the low part.  */
335 static void
mark_pseudo_regno_subword_live(int regno,int subword)336 mark_pseudo_regno_subword_live (int regno, int subword)
337 {
338   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
339   int n;
340   enum reg_class pclass;
341   ira_object_t obj;
342 
343   if (a == NULL)
344     return;
345 
346   /* Invalidate because it is referenced.  */
347   allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
348 
349   n = ALLOCNO_NUM_OBJECTS (a);
350   if (n == 1)
351     {
352       mark_pseudo_regno_live (regno);
353       return;
354     }
355 
356   pclass = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
357   gcc_assert
358     (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
359   obj = ALLOCNO_OBJECT (a, subword);
360 
361   if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
362     return;
363 
364   inc_register_pressure (pclass, 1);
365   make_object_live (obj);
366 }
367 
368 /* Mark the register REG as live.  Store a 1 in hard_regs_live for
369    this register, record how many consecutive hardware registers it
370    actually needs.  */
371 static void
mark_hard_reg_live(rtx reg)372 mark_hard_reg_live (rtx reg)
373 {
374   int regno = REGNO (reg);
375 
376   if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
377     {
378       int last = END_REGNO (reg);
379       enum reg_class aclass, pclass;
380 
381       while (regno < last)
382 	{
383 	  if (! TEST_HARD_REG_BIT (hard_regs_live, regno)
384 	      && ! TEST_HARD_REG_BIT (eliminable_regset, regno))
385 	    {
386 	      aclass = ira_hard_regno_allocno_class[regno];
387 	      pclass = ira_pressure_class_translate[aclass];
388 	      inc_register_pressure (pclass, 1);
389 	      make_hard_regno_live (regno);
390 	    }
391 	  regno++;
392 	}
393     }
394 }
395 
396 /* Mark a pseudo, or one of its subwords, as live.  REGNO is the pseudo's
397    register number; ORIG_REG is the access in the insn, which may be a
398    subreg.  */
399 static void
mark_pseudo_reg_live(rtx orig_reg,unsigned regno)400 mark_pseudo_reg_live (rtx orig_reg, unsigned regno)
401 {
402   if (read_modify_subreg_p (orig_reg))
403     {
404       mark_pseudo_regno_subword_live (regno,
405 				      subreg_lowpart_p (orig_reg) ? 0 : 1);
406     }
407   else
408     mark_pseudo_regno_live (regno);
409 }
410 
411 /* Mark the register referenced by use or def REF as live.  */
412 static void
mark_ref_live(df_ref ref)413 mark_ref_live (df_ref ref)
414 {
415   rtx reg = DF_REF_REG (ref);
416   rtx orig_reg = reg;
417 
418   if (GET_CODE (reg) == SUBREG)
419     reg = SUBREG_REG (reg);
420 
421   if (REGNO (reg) >= FIRST_PSEUDO_REGISTER)
422     mark_pseudo_reg_live (orig_reg, REGNO (reg));
423   else
424     mark_hard_reg_live (reg);
425 }
426 
427 /* Mark the pseudo register REGNO as dead.  Update all information about
428    live ranges and register pressure.  */
429 static void
mark_pseudo_regno_dead(int regno)430 mark_pseudo_regno_dead (int regno)
431 {
432   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
433   int n, i, nregs;
434   enum reg_class cl;
435 
436   if (a == NULL)
437     return;
438 
439   /* Invalidate because it is referenced.  */
440   allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
441 
442   n = ALLOCNO_NUM_OBJECTS (a);
443   cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
444   nregs = ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)];
445   if (n > 1)
446     {
447       /* We track every subobject separately.  */
448       gcc_assert (nregs == n);
449       nregs = 1;
450     }
451   for (i = 0; i < n; i++)
452     {
453       ira_object_t obj = ALLOCNO_OBJECT (a, i);
454       if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
455 	continue;
456 
457       dec_register_pressure (cl, nregs);
458       make_object_dead (obj);
459     }
460 }
461 
462 /* Like mark_pseudo_regno_dead, but called when we know that only part of the
463    register dies.  SUBWORD indicates which; a value of 0 indicates the low part.  */
464 static void
mark_pseudo_regno_subword_dead(int regno,int subword)465 mark_pseudo_regno_subword_dead (int regno, int subword)
466 {
467   ira_allocno_t a = ira_curr_regno_allocno_map[regno];
468   int n;
469   enum reg_class cl;
470   ira_object_t obj;
471 
472   if (a == NULL)
473     return;
474 
475   /* Invalidate because it is referenced.  */
476   allocno_saved_at_call[ALLOCNO_NUM (a)] = 0;
477 
478   n = ALLOCNO_NUM_OBJECTS (a);
479   if (n == 1)
480     /* The allocno as a whole doesn't die in this case.  */
481     return;
482 
483   cl = ira_pressure_class_translate[ALLOCNO_CLASS (a)];
484   gcc_assert
485     (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)]);
486 
487   obj = ALLOCNO_OBJECT (a, subword);
488   if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)))
489     return;
490 
491   dec_register_pressure (cl, 1);
492   make_object_dead (obj);
493 }
494 
495 /* Process the definition of hard register REG.  This updates hard_regs_live
496    and hard reg conflict information for living allocnos.  */
497 static void
mark_hard_reg_dead(rtx reg)498 mark_hard_reg_dead (rtx reg)
499 {
500   int regno = REGNO (reg);
501 
502   if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
503     {
504       int last = END_REGNO (reg);
505       enum reg_class aclass, pclass;
506 
507       while (regno < last)
508 	{
509 	  if (TEST_HARD_REG_BIT (hard_regs_live, regno))
510 	    {
511 	      aclass = ira_hard_regno_allocno_class[regno];
512 	      pclass = ira_pressure_class_translate[aclass];
513 	      dec_register_pressure (pclass, 1);
514 	      make_hard_regno_dead (regno);
515 	    }
516 	  regno++;
517 	}
518     }
519 }
520 
521 /* Mark a pseudo, or one of its subwords, as dead.  REGNO is the pseudo's
522    register number; ORIG_REG is the access in the insn, which may be a
523    subreg.  */
524 static void
mark_pseudo_reg_dead(rtx orig_reg,unsigned regno)525 mark_pseudo_reg_dead (rtx orig_reg, unsigned regno)
526 {
527   if (read_modify_subreg_p (orig_reg))
528     {
529       mark_pseudo_regno_subword_dead (regno,
530 				      subreg_lowpart_p (orig_reg) ? 0 : 1);
531     }
532   else
533     mark_pseudo_regno_dead (regno);
534 }
535 
536 /* Mark the register referenced by definition DEF as dead, if the
537    definition is a total one.  */
538 static void
mark_ref_dead(df_ref def)539 mark_ref_dead (df_ref def)
540 {
541   rtx reg = DF_REF_REG (def);
542   rtx orig_reg = reg;
543 
544   if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL))
545     return;
546 
547   if (GET_CODE (reg) == SUBREG)
548     reg = SUBREG_REG (reg);
549 
550   if (DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL)
551       && (GET_CODE (orig_reg) != SUBREG
552 	  || REGNO (reg) < FIRST_PSEUDO_REGISTER
553 	  || !read_modify_subreg_p (orig_reg)))
554     return;
555 
556   if (REGNO (reg) >= FIRST_PSEUDO_REGISTER)
557     mark_pseudo_reg_dead (orig_reg, REGNO (reg));
558   else
559     mark_hard_reg_dead (reg);
560 }
561 
562 /* If REG is a pseudo or a subreg of it, and the class of its allocno
563    intersects CL, make a conflict with pseudo DREG.  ORIG_DREG is the
564    rtx actually accessed, it may be identical to DREG or a subreg of it.
565    Advance the current program point before making the conflict if
566    ADVANCE_P.  Return TRUE if we will need to advance the current
567    program point.  */
568 static bool
make_pseudo_conflict(rtx reg,enum reg_class cl,rtx dreg,rtx orig_dreg,bool advance_p)569 make_pseudo_conflict (rtx reg, enum reg_class cl, rtx dreg, rtx orig_dreg,
570 		      bool advance_p)
571 {
572   rtx orig_reg = reg;
573   ira_allocno_t a;
574 
575   if (GET_CODE (reg) == SUBREG)
576     reg = SUBREG_REG (reg);
577 
578   if (! REG_P (reg) || REGNO (reg) < FIRST_PSEUDO_REGISTER)
579     return advance_p;
580 
581   a = ira_curr_regno_allocno_map[REGNO (reg)];
582   if (! reg_classes_intersect_p (cl, ALLOCNO_CLASS (a)))
583     return advance_p;
584 
585   if (advance_p)
586     curr_point++;
587 
588   mark_pseudo_reg_live (orig_reg, REGNO (reg));
589   mark_pseudo_reg_live (orig_dreg, REGNO (dreg));
590   mark_pseudo_reg_dead (orig_reg, REGNO (reg));
591   mark_pseudo_reg_dead (orig_dreg, REGNO (dreg));
592 
593   return false;
594 }
595 
596 /* Check and make if necessary conflicts for pseudo DREG of class
597    DEF_CL of the current insn with input operand USE of class USE_CL.
598    ORIG_DREG is the rtx actually accessed, it may be identical to
599    DREG or a subreg of it.  Advance the current program point before
600    making the conflict if ADVANCE_P.  Return TRUE if we will need to
601    advance the current program point.  */
602 static bool
check_and_make_def_use_conflict(rtx dreg,rtx orig_dreg,enum reg_class def_cl,int use,enum reg_class use_cl,bool advance_p)603 check_and_make_def_use_conflict (rtx dreg, rtx orig_dreg,
604 				 enum reg_class def_cl, int use,
605 				 enum reg_class use_cl, bool advance_p)
606 {
607   if (! reg_classes_intersect_p (def_cl, use_cl))
608     return advance_p;
609 
610   advance_p = make_pseudo_conflict (recog_data.operand[use],
611 				    use_cl, dreg, orig_dreg, advance_p);
612 
613   /* Reload may end up swapping commutative operands, so you
614      have to take both orderings into account.  The
615      constraints for the two operands can be completely
616      different.  (Indeed, if the constraints for the two
617      operands are the same for all alternatives, there's no
618      point marking them as commutative.)  */
619   if (use < recog_data.n_operands - 1
620       && recog_data.constraints[use][0] == '%')
621     advance_p
622       = make_pseudo_conflict (recog_data.operand[use + 1],
623 			      use_cl, dreg, orig_dreg, advance_p);
624   if (use >= 1
625       && recog_data.constraints[use - 1][0] == '%')
626     advance_p
627       = make_pseudo_conflict (recog_data.operand[use - 1],
628 			      use_cl, dreg, orig_dreg, advance_p);
629   return advance_p;
630 }
631 
632 /* Check and make if necessary conflicts for definition DEF of class
633    DEF_CL of the current insn with input operands.  Process only
634    constraints of alternative ALT.  */
635 static void
check_and_make_def_conflict(int alt,int def,enum reg_class def_cl)636 check_and_make_def_conflict (int alt, int def, enum reg_class def_cl)
637 {
638   int use, use_match;
639   ira_allocno_t a;
640   enum reg_class use_cl, acl;
641   bool advance_p;
642   rtx dreg = recog_data.operand[def];
643   rtx orig_dreg = dreg;
644 
645   if (def_cl == NO_REGS)
646     return;
647 
648   if (GET_CODE (dreg) == SUBREG)
649     dreg = SUBREG_REG (dreg);
650 
651   if (! REG_P (dreg) || REGNO (dreg) < FIRST_PSEUDO_REGISTER)
652     return;
653 
654   a = ira_curr_regno_allocno_map[REGNO (dreg)];
655   acl = ALLOCNO_CLASS (a);
656   if (! reg_classes_intersect_p (acl, def_cl))
657     return;
658 
659   advance_p = true;
660 
661   int n_operands = recog_data.n_operands;
662   const operand_alternative *op_alt = &recog_op_alt[alt * n_operands];
663   for (use = 0; use < n_operands; use++)
664     {
665       int alt1;
666 
667       if (use == def || recog_data.operand_type[use] == OP_OUT)
668 	continue;
669 
670       if (op_alt[use].anything_ok)
671 	use_cl = ALL_REGS;
672       else
673 	use_cl = op_alt[use].cl;
674 
675       /* If there's any alternative that allows USE to match DEF, do not
676 	 record a conflict.  If that causes us to create an invalid
677 	 instruction due to the earlyclobber, reload must fix it up.  */
678       for (alt1 = 0; alt1 < recog_data.n_alternatives; alt1++)
679 	{
680 	  if (!TEST_BIT (preferred_alternatives, alt1))
681 	    continue;
682 	  const operand_alternative *op_alt1
683 	    = &recog_op_alt[alt1 * n_operands];
684 	  if (op_alt1[use].matches == def
685 	      || (use < n_operands - 1
686 		  && recog_data.constraints[use][0] == '%'
687 		  && op_alt1[use + 1].matches == def)
688 	      || (use >= 1
689 		  && recog_data.constraints[use - 1][0] == '%'
690 		  && op_alt1[use - 1].matches == def))
691 	    break;
692 	}
693 
694       if (alt1 < recog_data.n_alternatives)
695 	continue;
696 
697       advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl,
698 						   use, use_cl, advance_p);
699 
700       if ((use_match = op_alt[use].matches) >= 0)
701 	{
702 	  if (use_match == def)
703 	    continue;
704 
705 	  if (op_alt[use_match].anything_ok)
706 	    use_cl = ALL_REGS;
707 	  else
708 	    use_cl = op_alt[use_match].cl;
709 	  advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl,
710 						       use, use_cl, advance_p);
711 	}
712     }
713 }
714 
715 /* Make conflicts of early clobber pseudo registers of the current
716    insn with its inputs.  Avoid introducing unnecessary conflicts by
717    checking classes of the constraints and pseudos because otherwise
718    significant code degradation is possible for some targets.  */
719 static void
make_early_clobber_and_input_conflicts(void)720 make_early_clobber_and_input_conflicts (void)
721 {
722   int alt;
723   int def, def_match;
724   enum reg_class def_cl;
725 
726   int n_alternatives = recog_data.n_alternatives;
727   int n_operands = recog_data.n_operands;
728   const operand_alternative *op_alt = recog_op_alt;
729   for (alt = 0; alt < n_alternatives; alt++, op_alt += n_operands)
730     if (TEST_BIT (preferred_alternatives, alt))
731       for (def = 0; def < n_operands; def++)
732 	{
733 	  def_cl = NO_REGS;
734 	  if (op_alt[def].earlyclobber)
735 	    {
736 	      if (op_alt[def].anything_ok)
737 		def_cl = ALL_REGS;
738 	      else
739 		def_cl = op_alt[def].cl;
740 	      check_and_make_def_conflict (alt, def, def_cl);
741 	    }
742 	  if ((def_match = op_alt[def].matches) >= 0
743 	      && (op_alt[def_match].earlyclobber
744 		  || op_alt[def].earlyclobber))
745 	    {
746 	      if (op_alt[def_match].anything_ok)
747 		def_cl = ALL_REGS;
748 	      else
749 		def_cl = op_alt[def_match].cl;
750 	      check_and_make_def_conflict (alt, def, def_cl);
751 	    }
752 	}
753 }
754 
755 /* Mark early clobber hard registers of the current INSN as live (if
756    LIVE_P) or dead.  Return true if there are such registers.  */
757 static bool
mark_hard_reg_early_clobbers(rtx_insn * insn,bool live_p)758 mark_hard_reg_early_clobbers (rtx_insn *insn, bool live_p)
759 {
760   df_ref def;
761   bool set_p = false;
762 
763   FOR_EACH_INSN_DEF (def, insn)
764     if (DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER))
765       {
766 	rtx dreg = DF_REF_REG (def);
767 
768 	if (GET_CODE (dreg) == SUBREG)
769 	  dreg = SUBREG_REG (dreg);
770 	if (! REG_P (dreg) || REGNO (dreg) >= FIRST_PSEUDO_REGISTER)
771 	  continue;
772 
773 	/* Hard register clobbers are believed to be early clobber
774 	   because there is no way to say that non-operand hard
775 	   register clobbers are not early ones.  */
776 	if (live_p)
777 	  mark_ref_live (def);
778 	else
779 	  mark_ref_dead (def);
780 	set_p = true;
781       }
782 
783   return set_p;
784 }
785 
786 /* Checks that CONSTRAINTS permits to use only one hard register.  If
787    it is so, the function returns the class of the hard register.
788    Otherwise it returns NO_REGS.  */
789 static enum reg_class
single_reg_class(const char * constraints,rtx op,rtx equiv_const)790 single_reg_class (const char *constraints, rtx op, rtx equiv_const)
791 {
792   int c;
793   enum reg_class cl, next_cl;
794   enum constraint_num cn;
795 
796   cl = NO_REGS;
797   alternative_mask preferred = preferred_alternatives;
798   for (; (c = *constraints); constraints += CONSTRAINT_LEN (c, constraints))
799     if (c == '#')
800       preferred &= ~ALTERNATIVE_BIT (0);
801     else if (c == ',')
802       preferred >>= 1;
803     else if (preferred & 1)
804       switch (c)
805 	{
806 	case 'g':
807 	  return NO_REGS;
808 
809 	default:
810 	  /* ??? Is this the best way to handle memory constraints?  */
811 	  cn = lookup_constraint (constraints);
812 	  if (insn_extra_memory_constraint (cn)
813 	      || insn_extra_special_memory_constraint (cn)
814 	      || insn_extra_address_constraint (cn))
815 	    return NO_REGS;
816 	  if (constraint_satisfied_p (op, cn)
817 	      || (equiv_const != NULL_RTX
818 		  && CONSTANT_P (equiv_const)
819 		  && constraint_satisfied_p (equiv_const, cn)))
820 	    return NO_REGS;
821 	  next_cl = reg_class_for_constraint (cn);
822 	  if (next_cl == NO_REGS)
823 	    break;
824 	  if (cl == NO_REGS
825 	      ? ira_class_singleton[next_cl][GET_MODE (op)] < 0
826 	      : (ira_class_singleton[cl][GET_MODE (op)]
827 		 != ira_class_singleton[next_cl][GET_MODE (op)]))
828 	    return NO_REGS;
829 	  cl = next_cl;
830 	  break;
831 
832 	case '0': case '1': case '2': case '3': case '4':
833 	case '5': case '6': case '7': case '8': case '9':
834 	  next_cl
835 	    = single_reg_class (recog_data.constraints[c - '0'],
836 				recog_data.operand[c - '0'], NULL_RTX);
837 	  if (cl == NO_REGS
838 	      ? ira_class_singleton[next_cl][GET_MODE (op)] < 0
839 	      : (ira_class_singleton[cl][GET_MODE (op)]
840 		 != ira_class_singleton[next_cl][GET_MODE (op)]))
841 	    return NO_REGS;
842 	  cl = next_cl;
843 	  break;
844 	}
845   return cl;
846 }
847 
848 /* The function checks that operand OP_NUM of the current insn can use
849    only one hard register.  If it is so, the function returns the
850    class of the hard register.  Otherwise it returns NO_REGS.  */
851 static enum reg_class
single_reg_operand_class(int op_num)852 single_reg_operand_class (int op_num)
853 {
854   if (op_num < 0 || recog_data.n_alternatives == 0)
855     return NO_REGS;
856   return single_reg_class (recog_data.constraints[op_num],
857 			   recog_data.operand[op_num], NULL_RTX);
858 }
859 
860 /* The function sets up hard register set *SET to hard registers which
861    might be used by insn reloads because the constraints are too
862    strict.  */
863 void
ira_implicitly_set_insn_hard_regs(HARD_REG_SET * set,alternative_mask preferred)864 ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set,
865 				   alternative_mask preferred)
866 {
867   int i, c, regno = 0;
868   enum reg_class cl;
869   rtx op;
870   machine_mode mode;
871 
872   CLEAR_HARD_REG_SET (*set);
873   for (i = 0; i < recog_data.n_operands; i++)
874     {
875       op = recog_data.operand[i];
876 
877       if (GET_CODE (op) == SUBREG)
878 	op = SUBREG_REG (op);
879 
880       if (GET_CODE (op) == SCRATCH
881 	  || (REG_P (op) && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER))
882 	{
883 	  const char *p = recog_data.constraints[i];
884 
885 	  mode = (GET_CODE (op) == SCRATCH
886 		  ? GET_MODE (op) : PSEUDO_REGNO_MODE (regno));
887 	  cl = NO_REGS;
888 	  for (; (c = *p); p += CONSTRAINT_LEN (c, p))
889 	    if (c == '#')
890 	      preferred &= ~ALTERNATIVE_BIT (0);
891 	    else if (c == ',')
892 	      preferred >>= 1;
893 	    else if (preferred & 1)
894 	      {
895 		cl = reg_class_for_constraint (lookup_constraint (p));
896 		if (cl != NO_REGS)
897 		  {
898 		    /* There is no register pressure problem if all of the
899 		       regs in this class are fixed.  */
900 		    int regno = ira_class_singleton[cl][mode];
901 		    if (regno >= 0)
902 		      add_to_hard_reg_set (set, mode, regno);
903 		  }
904 	      }
905 	}
906     }
907 }
908 /* Processes input operands, if IN_P, or output operands otherwise of
909    the current insn with FREQ to find allocno which can use only one
910    hard register and makes other currently living allocnos conflicting
911    with the hard register.  */
912 static void
process_single_reg_class_operands(bool in_p,int freq)913 process_single_reg_class_operands (bool in_p, int freq)
914 {
915   int i, regno;
916   unsigned int px;
917   enum reg_class cl;
918   rtx operand;
919   ira_allocno_t operand_a, a;
920 
921   for (i = 0; i < recog_data.n_operands; i++)
922     {
923       operand = recog_data.operand[i];
924       if (in_p && recog_data.operand_type[i] != OP_IN
925 	  && recog_data.operand_type[i] != OP_INOUT)
926 	continue;
927       if (! in_p && recog_data.operand_type[i] != OP_OUT
928 	  && recog_data.operand_type[i] != OP_INOUT)
929 	continue;
930       cl = single_reg_operand_class (i);
931       if (cl == NO_REGS)
932 	continue;
933 
934       operand_a = NULL;
935 
936       if (GET_CODE (operand) == SUBREG)
937 	operand = SUBREG_REG (operand);
938 
939       if (REG_P (operand)
940 	  && (regno = REGNO (operand)) >= FIRST_PSEUDO_REGISTER)
941 	{
942 	  enum reg_class aclass;
943 
944 	  operand_a = ira_curr_regno_allocno_map[regno];
945 	  aclass = ALLOCNO_CLASS (operand_a);
946 	  if (ira_class_subset_p[cl][aclass])
947 	    {
948 	      /* View the desired allocation of OPERAND as:
949 
950 		    (REG:YMODE YREGNO),
951 
952 		 a simplification of:
953 
954 		    (subreg:YMODE (reg:XMODE XREGNO) OFFSET).  */
955 	      machine_mode ymode, xmode;
956 	      int xregno, yregno;
957 	      poly_int64 offset;
958 
959 	      xmode = recog_data.operand_mode[i];
960 	      xregno = ira_class_singleton[cl][xmode];
961 	      gcc_assert (xregno >= 0);
962 	      ymode = ALLOCNO_MODE (operand_a);
963 	      offset = subreg_lowpart_offset (ymode, xmode);
964 	      yregno = simplify_subreg_regno (xregno, xmode, offset, ymode);
965 	      if (yregno >= 0
966 		  && ira_class_hard_reg_index[aclass][yregno] >= 0)
967 		{
968 		  int cost;
969 
970 		  ira_allocate_and_set_costs
971 		    (&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a),
972 		     aclass, 0);
973 		  ira_init_register_move_cost_if_necessary (xmode);
974 		  cost = freq * (in_p
975 				 ? ira_register_move_cost[xmode][aclass][cl]
976 				 : ira_register_move_cost[xmode][cl][aclass]);
977 		  ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)
978 		    [ira_class_hard_reg_index[aclass][yregno]] -= cost;
979 		}
980 	    }
981 	}
982 
983       EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)
984         {
985 	  ira_object_t obj = ira_object_id_map[px];
986 	  a = OBJECT_ALLOCNO (obj);
987 	  if (a != operand_a)
988 	    {
989 	      /* We could increase costs of A instead of making it
990 		 conflicting with the hard register.  But it works worse
991 		 because it will be spilled in reload in anyway.  */
992 	      IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
993 				reg_class_contents[cl]);
994 	      IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
995 				reg_class_contents[cl]);
996 	    }
997 	}
998     }
999 }
1000 
1001 /* Look through the CALL_INSN_FUNCTION_USAGE of a call insn INSN, and see if
1002    we find a SET rtx that we can use to deduce that a register can be cheaply
1003    caller-saved.  Return such a register, or NULL_RTX if none is found.  */
1004 static rtx
find_call_crossed_cheap_reg(rtx_insn * insn)1005 find_call_crossed_cheap_reg (rtx_insn *insn)
1006 {
1007   rtx cheap_reg = NULL_RTX;
1008   rtx exp = CALL_INSN_FUNCTION_USAGE (insn);
1009 
1010   while (exp != NULL)
1011     {
1012       rtx x = XEXP (exp, 0);
1013       if (GET_CODE (x) == SET)
1014 	{
1015 	  exp = x;
1016 	  break;
1017 	}
1018       exp = XEXP (exp, 1);
1019     }
1020   if (exp != NULL)
1021     {
1022       basic_block bb = BLOCK_FOR_INSN (insn);
1023       rtx reg = SET_SRC (exp);
1024       rtx_insn *prev = PREV_INSN (insn);
1025       while (prev && !(INSN_P (prev)
1026 		       && BLOCK_FOR_INSN (prev) != bb))
1027 	{
1028 	  if (NONDEBUG_INSN_P (prev))
1029 	    {
1030 	      rtx set = single_set (prev);
1031 
1032 	      if (set && rtx_equal_p (SET_DEST (set), reg))
1033 		{
1034 		  rtx src = SET_SRC (set);
1035 		  if (!REG_P (src) || HARD_REGISTER_P (src)
1036 		      || !pseudo_regno_single_word_and_live_p (REGNO (src)))
1037 		    break;
1038 		  if (!modified_between_p (src, prev, insn))
1039 		    cheap_reg = src;
1040 		  break;
1041 		}
1042 	      if (set && rtx_equal_p (SET_SRC (set), reg))
1043 		{
1044 		  rtx dest = SET_DEST (set);
1045 		  if (!REG_P (dest) || HARD_REGISTER_P (dest)
1046 		      || !pseudo_regno_single_word_and_live_p (REGNO (dest)))
1047 		    break;
1048 		  if (!modified_between_p (dest, prev, insn))
1049 		    cheap_reg = dest;
1050 		  break;
1051 		}
1052 
1053 	      if (reg_set_p (reg, prev))
1054 		break;
1055 	    }
1056 	  prev = PREV_INSN (prev);
1057 	}
1058     }
1059   return cheap_reg;
1060 }
1061 
1062 /* Determine whether INSN is a register to register copy of the type where
1063    we do not need to make the source and destiniation registers conflict.
1064    If this is a copy instruction, then return the source reg.  Otherwise,
1065    return NULL_RTX.  */
1066 rtx
non_conflicting_reg_copy_p(rtx_insn * insn)1067 non_conflicting_reg_copy_p (rtx_insn *insn)
1068 {
1069   /* Reload has issues with overlapping pseudos being assigned to the
1070      same hard register, so don't allow it.  See PR87600 for details.  */
1071   if (!targetm.lra_p ())
1072     return NULL_RTX;
1073 
1074   rtx set = single_set (insn);
1075 
1076   /* Disallow anything other than a simple register to register copy
1077      that has no side effects.  */
1078   if (set == NULL_RTX
1079       || !REG_P (SET_DEST (set))
1080       || !REG_P (SET_SRC (set))
1081       || side_effects_p (set))
1082     return NULL_RTX;
1083 
1084   int dst_regno = REGNO (SET_DEST (set));
1085   int src_regno = REGNO (SET_SRC (set));
1086   machine_mode mode = GET_MODE (SET_DEST (set));
1087 
1088   /* By definition, a register does not conflict with itself, therefore we
1089      do not have to handle it specially.  Returning NULL_RTX now, helps
1090      simplify the callers of this function.  */
1091   if (dst_regno == src_regno)
1092     return NULL_RTX;
1093 
1094   /* Computing conflicts for register pairs is difficult to get right, so
1095      for now, disallow it.  */
1096   if ((HARD_REGISTER_NUM_P (dst_regno)
1097        && hard_regno_nregs (dst_regno, mode) != 1)
1098       || (HARD_REGISTER_NUM_P (src_regno)
1099 	  && hard_regno_nregs (src_regno, mode) != 1))
1100     return NULL_RTX;
1101 
1102   return SET_SRC (set);
1103 }
1104 
1105 /* Process insns of the basic block given by its LOOP_TREE_NODE to
1106    update allocno live ranges, allocno hard register conflicts,
1107    intersected calls, and register pressure info for allocnos for the
1108    basic block for and regions containing the basic block.  */
1109 static void
process_bb_node_lives(ira_loop_tree_node_t loop_tree_node)1110 process_bb_node_lives (ira_loop_tree_node_t loop_tree_node)
1111 {
1112   int i, freq;
1113   unsigned int j;
1114   basic_block bb;
1115   rtx_insn *insn;
1116   bitmap_iterator bi;
1117   bitmap reg_live_out;
1118   unsigned int px;
1119   bool set_p;
1120 
1121   bb = loop_tree_node->bb;
1122   if (bb != NULL)
1123     {
1124       for (i = 0; i < ira_pressure_classes_num; i++)
1125 	{
1126 	  curr_reg_pressure[ira_pressure_classes[i]] = 0;
1127 	  high_pressure_start_point[ira_pressure_classes[i]] = -1;
1128 	}
1129       curr_bb_node = loop_tree_node;
1130       reg_live_out = df_get_live_out (bb);
1131       sparseset_clear (objects_live);
1132       REG_SET_TO_HARD_REG_SET (hard_regs_live, reg_live_out);
1133       AND_COMPL_HARD_REG_SET (hard_regs_live, eliminable_regset);
1134       AND_COMPL_HARD_REG_SET (hard_regs_live, ira_no_alloc_regs);
1135       for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
1136 	if (TEST_HARD_REG_BIT (hard_regs_live, i))
1137 	  {
1138 	    enum reg_class aclass, pclass, cl;
1139 
1140 	    aclass = ira_allocno_class_translate[REGNO_REG_CLASS (i)];
1141 	    pclass = ira_pressure_class_translate[aclass];
1142 	    for (j = 0;
1143 		 (cl = ira_reg_class_super_classes[pclass][j])
1144 		   != LIM_REG_CLASSES;
1145 		 j++)
1146 	      {
1147 		if (! ira_reg_pressure_class_p[cl])
1148 		  continue;
1149 		curr_reg_pressure[cl]++;
1150 		if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl])
1151 		  curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl];
1152 		ira_assert (curr_reg_pressure[cl]
1153 			    <= ira_class_hard_regs_num[cl]);
1154 	      }
1155 	  }
1156       EXECUTE_IF_SET_IN_BITMAP (reg_live_out, FIRST_PSEUDO_REGISTER, j, bi)
1157 	mark_pseudo_regno_live (j);
1158 
1159       freq = REG_FREQ_FROM_BB (bb);
1160       if (freq == 0)
1161 	freq = 1;
1162 
1163       /* Invalidate all allocno_saved_at_call entries.  */
1164       last_call_num++;
1165 
1166       /* Scan the code of this basic block, noting which allocnos and
1167 	 hard regs are born or die.
1168 
1169 	 Note that this loop treats uninitialized values as live until
1170 	 the beginning of the block.  For example, if an instruction
1171 	 uses (reg:DI foo), and only (subreg:SI (reg:DI foo) 0) is ever
1172 	 set, FOO will remain live until the beginning of the block.
1173 	 Likewise if FOO is not set at all.  This is unnecessarily
1174 	 pessimistic, but it probably doesn't matter much in practice.  */
1175       FOR_BB_INSNS_REVERSE (bb, insn)
1176 	{
1177 	  ira_allocno_t a;
1178 	  df_ref def, use;
1179 	  bool call_p;
1180 
1181 	  if (!NONDEBUG_INSN_P (insn))
1182 	    continue;
1183 
1184 	  if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1185 	    fprintf (ira_dump_file, "   Insn %u(l%d): point = %d\n",
1186 		     INSN_UID (insn), loop_tree_node->parent->loop_num,
1187 		     curr_point);
1188 
1189 	  call_p = CALL_P (insn);
1190 	  ignore_reg_for_conflicts = non_conflicting_reg_copy_p (insn);
1191 
1192 	  /* Mark each defined value as live.  We need to do this for
1193 	     unused values because they still conflict with quantities
1194 	     that are live at the time of the definition.
1195 
1196 	     Ignore DF_REF_MAY_CLOBBERs on a call instruction.  Such
1197 	     references represent the effect of the called function
1198 	     on a call-clobbered register.  Marking the register as
1199 	     live would stop us from allocating it to a call-crossing
1200 	     allocno.  */
1201 	  FOR_EACH_INSN_DEF (def, insn)
1202 	    if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
1203 	      mark_ref_live (def);
1204 
1205 	  /* If INSN has multiple outputs, then any value used in one
1206 	     of the outputs conflicts with the other outputs.  Model this
1207 	     by making the used value live during the output phase.
1208 
1209 	     It is unsafe to use !single_set here since it will ignore
1210 	     an unused output.  Just because an output is unused does
1211 	     not mean the compiler can assume the side effect will not
1212 	     occur.  Consider if ALLOCNO appears in the address of an
1213 	     output and we reload the output.  If we allocate ALLOCNO
1214 	     to the same hard register as an unused output we could
1215 	     set the hard register before the output reload insn.  */
1216 	  if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
1217 	    FOR_EACH_INSN_USE (use, insn)
1218 	      {
1219 		int i;
1220 		rtx reg;
1221 
1222 		reg = DF_REF_REG (use);
1223 		for (i = XVECLEN (PATTERN (insn), 0) - 1; i >= 0; i--)
1224 		  {
1225 		    rtx set;
1226 
1227 		    set = XVECEXP (PATTERN (insn), 0, i);
1228 		    if (GET_CODE (set) == SET
1229 			&& reg_overlap_mentioned_p (reg, SET_DEST (set)))
1230 		      {
1231 			/* After the previous loop, this is a no-op if
1232 			   REG is contained within SET_DEST (SET).  */
1233 			mark_ref_live (use);
1234 			break;
1235 		      }
1236 		  }
1237 	      }
1238 
1239 	  extract_insn (insn);
1240 	  preferred_alternatives = get_preferred_alternatives (insn);
1241 	  preprocess_constraints (insn);
1242 	  process_single_reg_class_operands (false, freq);
1243 
1244 	  /* See which defined values die here.  */
1245 	  FOR_EACH_INSN_DEF (def, insn)
1246 	    if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER))
1247 	      mark_ref_dead (def);
1248 
1249 	  if (call_p)
1250 	    {
1251 	      /* Try to find a SET in the CALL_INSN_FUNCTION_USAGE, and from
1252 		 there, try to find a pseudo that is live across the call but
1253 		 can be cheaply reconstructed from the return value.  */
1254 	      rtx cheap_reg = find_call_crossed_cheap_reg (insn);
1255 	      if (cheap_reg != NULL_RTX)
1256 		add_reg_note (insn, REG_RETURNED, cheap_reg);
1257 
1258 	      last_call_num++;
1259 	      sparseset_clear (allocnos_processed);
1260 	      /* The current set of live allocnos are live across the call.  */
1261 	      EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
1262 	        {
1263 		  ira_object_t obj = ira_object_id_map[i];
1264 		  a = OBJECT_ALLOCNO (obj);
1265 		  int num = ALLOCNO_NUM (a);
1266 		  HARD_REG_SET this_call_used_reg_set;
1267 
1268 		  get_call_reg_set_usage (insn, &this_call_used_reg_set,
1269 					  call_used_reg_set);
1270 
1271 		  /* Don't allocate allocnos that cross setjmps or any
1272 		     call, if this function receives a nonlocal
1273 		     goto.  */
1274 		  if (cfun->has_nonlocal_label
1275 		      || (!targetm.setjmp_preserves_nonvolatile_regs_p ()
1276 			  && (find_reg_note (insn, REG_SETJMP, NULL_RTX)
1277 			      != NULL_RTX)))
1278 		    {
1279 		      SET_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj));
1280 		      SET_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj));
1281 		    }
1282 		  if (can_throw_internal (insn))
1283 		    {
1284 		      IOR_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj),
1285 					this_call_used_reg_set);
1286 		      IOR_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
1287 					this_call_used_reg_set);
1288 		    }
1289 
1290 		  if (sparseset_bit_p (allocnos_processed, num))
1291 		    continue;
1292 		  sparseset_set_bit (allocnos_processed, num);
1293 
1294 		  if (allocno_saved_at_call[num] != last_call_num)
1295 		    /* Here we are mimicking caller-save.c behavior
1296 		       which does not save hard register at a call if
1297 		       it was saved on previous call in the same basic
1298 		       block and the hard register was not mentioned
1299 		       between the two calls.  */
1300 		    ALLOCNO_CALL_FREQ (a) += freq;
1301 		  /* Mark it as saved at the next call.  */
1302 		  allocno_saved_at_call[num] = last_call_num + 1;
1303 		  ALLOCNO_CALLS_CROSSED_NUM (a)++;
1304 		  IOR_HARD_REG_SET (ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a),
1305 				    this_call_used_reg_set);
1306 		  if (cheap_reg != NULL_RTX
1307 		      && ALLOCNO_REGNO (a) == (int) REGNO (cheap_reg))
1308 		    ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)++;
1309 		}
1310 	    }
1311 
1312 	  make_early_clobber_and_input_conflicts ();
1313 
1314 	  curr_point++;
1315 
1316 	  /* Mark each used value as live.  */
1317 	  FOR_EACH_INSN_USE (use, insn)
1318 	    mark_ref_live (use);
1319 
1320 	  process_single_reg_class_operands (true, freq);
1321 
1322 	  set_p = mark_hard_reg_early_clobbers (insn, true);
1323 
1324 	  if (set_p)
1325 	    {
1326 	      mark_hard_reg_early_clobbers (insn, false);
1327 
1328 	      /* Mark each hard reg as live again.  For example, a
1329 		 hard register can be in clobber and in an insn
1330 		 input.  */
1331 	      FOR_EACH_INSN_USE (use, insn)
1332 		{
1333 		  rtx ureg = DF_REF_REG (use);
1334 
1335 		  if (GET_CODE (ureg) == SUBREG)
1336 		    ureg = SUBREG_REG (ureg);
1337 		  if (! REG_P (ureg) || REGNO (ureg) >= FIRST_PSEUDO_REGISTER)
1338 		    continue;
1339 
1340 		  mark_ref_live (use);
1341 		}
1342 	    }
1343 
1344 	  curr_point++;
1345 	}
1346       ignore_reg_for_conflicts = NULL_RTX;
1347 
1348       if (bb_has_eh_pred (bb))
1349 	for (j = 0; ; ++j)
1350 	  {
1351 	    unsigned int regno = EH_RETURN_DATA_REGNO (j);
1352 	    if (regno == INVALID_REGNUM)
1353 	      break;
1354 	    make_hard_regno_live (regno);
1355 	  }
1356 
1357       /* Allocnos can't go in stack regs at the start of a basic block
1358 	 that is reached by an abnormal edge. Likewise for call
1359 	 clobbered regs, because caller-save, fixup_abnormal_edges and
1360 	 possibly the table driven EH machinery are not quite ready to
1361 	 handle such allocnos live across such edges.  */
1362       if (bb_has_abnormal_pred (bb))
1363 	{
1364 #ifdef STACK_REGS
1365 	  EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)
1366 	    {
1367 	      ira_allocno_t a = OBJECT_ALLOCNO (ira_object_id_map[px]);
1368 
1369 	      ALLOCNO_NO_STACK_REG_P (a) = true;
1370 	      ALLOCNO_TOTAL_NO_STACK_REG_P (a) = true;
1371 	    }
1372 	  for (px = FIRST_STACK_REG; px <= LAST_STACK_REG; px++)
1373 	    make_hard_regno_live (px);
1374 #endif
1375 	  /* No need to record conflicts for call clobbered regs if we
1376 	     have nonlocal labels around, as we don't ever try to
1377 	     allocate such regs in this case.  */
1378 	  if (!cfun->has_nonlocal_label
1379 	      && has_abnormal_call_or_eh_pred_edge_p (bb))
1380 	    for (px = 0; px < FIRST_PSEUDO_REGISTER; px++)
1381 	      if (call_used_regs[px]
1382 #ifdef REAL_PIC_OFFSET_TABLE_REGNUM
1383 		  /* We should create a conflict of PIC pseudo with
1384 		     PIC hard reg as PIC hard reg can have a wrong
1385 		     value after jump described by the abnormal edge.
1386 		     In this case we cannot allocate PIC hard reg to
1387 		     PIC pseudo as PIC pseudo will also have a wrong
1388 		     value.  This code is not critical as LRA can fix
1389 		     it but it is better to have the right allocation
1390 		     earlier.  */
1391 		  || (px == REAL_PIC_OFFSET_TABLE_REGNUM
1392 		      && pic_offset_table_rtx != NULL_RTX
1393 		      && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
1394 #endif
1395 		  )
1396 		make_hard_regno_live (px);
1397 	}
1398 
1399       EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)
1400 	make_object_dead (ira_object_id_map[i]);
1401 
1402       curr_point++;
1403 
1404     }
1405   /* Propagate register pressure to upper loop tree nodes.  */
1406   if (loop_tree_node != ira_loop_tree_root)
1407     for (i = 0; i < ira_pressure_classes_num; i++)
1408       {
1409 	enum reg_class pclass;
1410 
1411 	pclass = ira_pressure_classes[i];
1412 	if (loop_tree_node->reg_pressure[pclass]
1413 	    > loop_tree_node->parent->reg_pressure[pclass])
1414 	  loop_tree_node->parent->reg_pressure[pclass]
1415 	    = loop_tree_node->reg_pressure[pclass];
1416       }
1417 }
1418 
1419 /* Create and set up IRA_START_POINT_RANGES and
1420    IRA_FINISH_POINT_RANGES.  */
1421 static void
create_start_finish_chains(void)1422 create_start_finish_chains (void)
1423 {
1424   ira_object_t obj;
1425   ira_object_iterator oi;
1426   live_range_t r;
1427 
1428   ira_start_point_ranges
1429     = (live_range_t *) ira_allocate (ira_max_point * sizeof (live_range_t));
1430   memset (ira_start_point_ranges, 0, ira_max_point * sizeof (live_range_t));
1431   ira_finish_point_ranges
1432     = (live_range_t *) ira_allocate (ira_max_point * sizeof (live_range_t));
1433   memset (ira_finish_point_ranges, 0, ira_max_point * sizeof (live_range_t));
1434   FOR_EACH_OBJECT (obj, oi)
1435     for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
1436       {
1437 	r->start_next = ira_start_point_ranges[r->start];
1438 	ira_start_point_ranges[r->start] = r;
1439 	r->finish_next = ira_finish_point_ranges[r->finish];
1440  	  ira_finish_point_ranges[r->finish] = r;
1441       }
1442 }
1443 
1444 /* Rebuild IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES after
1445    new live ranges and program points were added as a result if new
1446    insn generation.  */
1447 void
ira_rebuild_start_finish_chains(void)1448 ira_rebuild_start_finish_chains (void)
1449 {
1450   ira_free (ira_finish_point_ranges);
1451   ira_free (ira_start_point_ranges);
1452   create_start_finish_chains ();
1453 }
1454 
1455 /* Compress allocno live ranges by removing program points where
1456    nothing happens.  */
1457 static void
remove_some_program_points_and_update_live_ranges(void)1458 remove_some_program_points_and_update_live_ranges (void)
1459 {
1460   unsigned i;
1461   int n;
1462   int *map;
1463   ira_object_t obj;
1464   ira_object_iterator oi;
1465   live_range_t r, prev_r, next_r;
1466   sbitmap_iterator sbi;
1467   bool born_p, dead_p, prev_born_p, prev_dead_p;
1468 
1469   auto_sbitmap born (ira_max_point);
1470   auto_sbitmap dead (ira_max_point);
1471   bitmap_clear (born);
1472   bitmap_clear (dead);
1473   FOR_EACH_OBJECT (obj, oi)
1474     for (r = OBJECT_LIVE_RANGES (obj); r != NULL; r = r->next)
1475       {
1476 	ira_assert (r->start <= r->finish);
1477 	bitmap_set_bit (born, r->start);
1478 	bitmap_set_bit (dead, r->finish);
1479       }
1480 
1481   auto_sbitmap born_or_dead (ira_max_point);
1482   bitmap_ior (born_or_dead, born, dead);
1483   map = (int *) ira_allocate (sizeof (int) * ira_max_point);
1484   n = -1;
1485   prev_born_p = prev_dead_p = false;
1486   EXECUTE_IF_SET_IN_BITMAP (born_or_dead, 0, i, sbi)
1487     {
1488       born_p = bitmap_bit_p (born, i);
1489       dead_p = bitmap_bit_p (dead, i);
1490       if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p)
1491 	  || (prev_dead_p && ! prev_born_p && dead_p && ! born_p))
1492 	map[i] = n;
1493       else
1494 	map[i] = ++n;
1495       prev_born_p = born_p;
1496       prev_dead_p = dead_p;
1497     }
1498 
1499   n++;
1500   if (internal_flag_ira_verbose > 1 && ira_dump_file != NULL)
1501     fprintf (ira_dump_file, "Compressing live ranges: from %d to %d - %d%%\n",
1502 	     ira_max_point, n, 100 * n / ira_max_point);
1503   ira_max_point = n;
1504 
1505   FOR_EACH_OBJECT (obj, oi)
1506     for (r = OBJECT_LIVE_RANGES (obj), prev_r = NULL; r != NULL; r = next_r)
1507       {
1508 	next_r = r->next;
1509 	r->start = map[r->start];
1510 	r->finish = map[r->finish];
1511 	if (prev_r == NULL || prev_r->start > r->finish + 1)
1512 	  {
1513 	    prev_r = r;
1514 	    continue;
1515 	  }
1516 	prev_r->start = r->start;
1517 	prev_r->next = next_r;
1518 	ira_finish_live_range (r);
1519       }
1520 
1521   ira_free (map);
1522 }
1523 
1524 /* Print live ranges R to file F.  */
1525 void
ira_print_live_range_list(FILE * f,live_range_t r)1526 ira_print_live_range_list (FILE *f, live_range_t r)
1527 {
1528   for (; r != NULL; r = r->next)
1529     fprintf (f, " [%d..%d]", r->start, r->finish);
1530   fprintf (f, "\n");
1531 }
1532 
1533 DEBUG_FUNCTION void
debug(live_range & ref)1534 debug (live_range &ref)
1535 {
1536   ira_print_live_range_list (stderr, &ref);
1537 }
1538 
1539 DEBUG_FUNCTION void
debug(live_range * ptr)1540 debug (live_range *ptr)
1541 {
1542   if (ptr)
1543     debug (*ptr);
1544   else
1545     fprintf (stderr, "<nil>\n");
1546 }
1547 
1548 /* Print live ranges R to stderr.  */
1549 void
ira_debug_live_range_list(live_range_t r)1550 ira_debug_live_range_list (live_range_t r)
1551 {
1552   ira_print_live_range_list (stderr, r);
1553 }
1554 
1555 /* Print live ranges of object OBJ to file F.  */
1556 static void
print_object_live_ranges(FILE * f,ira_object_t obj)1557 print_object_live_ranges (FILE *f, ira_object_t obj)
1558 {
1559   ira_print_live_range_list (f, OBJECT_LIVE_RANGES (obj));
1560 }
1561 
1562 /* Print live ranges of allocno A to file F.  */
1563 static void
print_allocno_live_ranges(FILE * f,ira_allocno_t a)1564 print_allocno_live_ranges (FILE *f, ira_allocno_t a)
1565 {
1566   int n = ALLOCNO_NUM_OBJECTS (a);
1567   int i;
1568 
1569   for (i = 0; i < n; i++)
1570     {
1571       fprintf (f, " a%d(r%d", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
1572       if (n > 1)
1573 	fprintf (f, " [%d]", i);
1574       fprintf (f, "):");
1575       print_object_live_ranges (f, ALLOCNO_OBJECT (a, i));
1576     }
1577 }
1578 
1579 /* Print live ranges of allocno A to stderr.  */
1580 void
ira_debug_allocno_live_ranges(ira_allocno_t a)1581 ira_debug_allocno_live_ranges (ira_allocno_t a)
1582 {
1583   print_allocno_live_ranges (stderr, a);
1584 }
1585 
1586 /* Print live ranges of all allocnos to file F.  */
1587 static void
print_live_ranges(FILE * f)1588 print_live_ranges (FILE *f)
1589 {
1590   ira_allocno_t a;
1591   ira_allocno_iterator ai;
1592 
1593   FOR_EACH_ALLOCNO (a, ai)
1594     print_allocno_live_ranges (f, a);
1595 }
1596 
1597 /* Print live ranges of all allocnos to stderr.  */
1598 void
ira_debug_live_ranges(void)1599 ira_debug_live_ranges (void)
1600 {
1601   print_live_ranges (stderr);
1602 }
1603 
1604 /* The main entry function creates live ranges, set up
1605    CONFLICT_HARD_REGS and TOTAL_CONFLICT_HARD_REGS for objects, and
1606    calculate register pressure info.  */
1607 void
ira_create_allocno_live_ranges(void)1608 ira_create_allocno_live_ranges (void)
1609 {
1610   objects_live = sparseset_alloc (ira_objects_num);
1611   allocnos_processed = sparseset_alloc (ira_allocnos_num);
1612   curr_point = 0;
1613   last_call_num = 0;
1614   allocno_saved_at_call
1615     = (int *) ira_allocate (ira_allocnos_num * sizeof (int));
1616   memset (allocno_saved_at_call, 0, ira_allocnos_num * sizeof (int));
1617   ira_traverse_loop_tree (true, ira_loop_tree_root, NULL,
1618 			  process_bb_node_lives);
1619   ira_max_point = curr_point;
1620   create_start_finish_chains ();
1621   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1622     print_live_ranges (ira_dump_file);
1623   /* Clean up.  */
1624   ira_free (allocno_saved_at_call);
1625   sparseset_free (objects_live);
1626   sparseset_free (allocnos_processed);
1627 }
1628 
1629 /* Compress allocno live ranges.  */
1630 void
ira_compress_allocno_live_ranges(void)1631 ira_compress_allocno_live_ranges (void)
1632 {
1633   remove_some_program_points_and_update_live_ranges ();
1634   ira_rebuild_start_finish_chains ();
1635   if (internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
1636     {
1637       fprintf (ira_dump_file, "Ranges after the compression:\n");
1638       print_live_ranges (ira_dump_file);
1639     }
1640 }
1641 
1642 /* Free arrays IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES.  */
1643 void
ira_finish_allocno_live_ranges(void)1644 ira_finish_allocno_live_ranges (void)
1645 {
1646   ira_free (ira_finish_point_ranges);
1647   ira_free (ira_start_point_ranges);
1648 }
1649