1 /* Code for RTL transformations to satisfy insn constraints.
2    Copyright (C) 2010-2020 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5    This file is part of GCC.
6 
7    GCC is free software; you can redistribute it and/or modify it under
8    the terms of the GNU General Public License as published by the Free
9    Software Foundation; either version 3, or (at your option) any later
10    version.
11 
12    GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13    WARRANTY; without even the implied warranty of MERCHANTABILITY or
14    FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15    for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with GCC; see the file COPYING3.  If not see
19    <http://www.gnu.org/licenses/>.  */
20 
21 
22 /* This file contains code for 3 passes: constraint pass,
23    inheritance/split pass, and pass for undoing failed inheritance and
24    split.
25 
26    The major goal of constraint pass is to transform RTL to satisfy
27    insn and address constraints by:
28      o choosing insn alternatives;
29      o generating *reload insns* (or reloads in brief) and *reload
30        pseudos* which will get necessary hard registers later;
31      o substituting pseudos with equivalent values and removing the
32        instructions that initialized those pseudos.
33 
34    The constraint pass has biggest and most complicated code in LRA.
35    There are a lot of important details like:
36      o reuse of input reload pseudos to simplify reload pseudo
37        allocations;
38      o some heuristics to choose insn alternative to improve the
39        inheritance;
40      o early clobbers etc.
41 
42    The pass is mimicking former reload pass in alternative choosing
43    because the reload pass is oriented to current machine description
44    model.  It might be changed if the machine description model is
45    changed.
46 
47    There is special code for preventing all LRA and this pass cycling
48    in case of bugs.
49 
50    On the first iteration of the pass we process every instruction and
51    choose an alternative for each one.  On subsequent iterations we try
52    to avoid reprocessing instructions if we can be sure that the old
53    choice is still valid.
54 
55    The inheritance/spilt pass is to transform code to achieve
56    ineheritance and live range splitting.  It is done on backward
57    traversal of EBBs.
58 
59    The inheritance optimization goal is to reuse values in hard
60    registers. There is analogous optimization in old reload pass.  The
61    inheritance is achieved by following transformation:
62 
63        reload_p1 <- p	     reload_p1 <- p
64        ...		     new_p <- reload_p1
65        ...		=>   ...
66        reload_p2 <- p	     reload_p2 <- new_p
67 
68    where p is spilled and not changed between the insns.  Reload_p1 is
69    also called *original pseudo* and new_p is called *inheritance
70    pseudo*.
71 
72    The subsequent assignment pass will try to assign the same (or
73    another if it is not possible) hard register to new_p as to
74    reload_p1 or reload_p2.
75 
76    If the assignment pass fails to assign a hard register to new_p,
77    this file will undo the inheritance and restore the original code.
78    This is because implementing the above sequence with a spilled
79    new_p would make the code much worse.  The inheritance is done in
80    EBB scope.  The above is just a simplified example to get an idea
81    of the inheritance as the inheritance is also done for non-reload
82    insns.
83 
84    Splitting (transformation) is also done in EBB scope on the same
85    pass as the inheritance:
86 
87        r <- ... or ... <- r		 r <- ... or ... <- r
88        ...				 s <- r (new insn -- save)
89        ...			  =>
90        ...				 r <- s (new insn -- restore)
91        ... <- r				 ... <- r
92 
93     The *split pseudo* s is assigned to the hard register of the
94     original pseudo or hard register r.
95 
96     Splitting is done:
97       o In EBBs with high register pressure for global pseudos (living
98 	in at least 2 BBs) and assigned to hard registers when there
99 	are more one reloads needing the hard registers;
100       o for pseudos needing save/restore code around calls.
101 
102     If the split pseudo still has the same hard register as the
103     original pseudo after the subsequent assignment pass or the
104     original pseudo was split, the opposite transformation is done on
105     the same pass for undoing inheritance.  */
106 
107 #undef REG_OK_STRICT
108 
109 #include "config.h"
110 #include "system.h"
111 #include "coretypes.h"
112 #include "backend.h"
113 #include "target.h"
114 #include "rtl.h"
115 #include "tree.h"
116 #include "predict.h"
117 #include "df.h"
118 #include "memmodel.h"
119 #include "tm_p.h"
120 #include "expmed.h"
121 #include "optabs.h"
122 #include "regs.h"
123 #include "ira.h"
124 #include "recog.h"
125 #include "output.h"
126 #include "addresses.h"
127 #include "expr.h"
128 #include "cfgrtl.h"
129 #include "rtl-error.h"
130 #include "lra.h"
131 #include "lra-int.h"
132 #include "print-rtl.h"
133 #include "function-abi.h"
134 
135 /* Value of LRA_CURR_RELOAD_NUM at the beginning of BB of the current
136    insn.  Remember that LRA_CURR_RELOAD_NUM is the number of emitted
137    reload insns.  */
138 static int bb_reload_num;
139 
140 /* The current insn being processed and corresponding its single set
141    (NULL otherwise), its data (basic block, the insn data, the insn
142    static data, and the mode of each operand).  */
143 static rtx_insn *curr_insn;
144 static rtx curr_insn_set;
145 static basic_block curr_bb;
146 static lra_insn_recog_data_t curr_id;
147 static struct lra_static_insn_data *curr_static_id;
148 static machine_mode curr_operand_mode[MAX_RECOG_OPERANDS];
149 /* Mode of the register substituted by its equivalence with VOIDmode
150    (e.g. constant) and whose subreg is given operand of the current
151    insn.  VOIDmode in all other cases.  */
152 static machine_mode original_subreg_reg_mode[MAX_RECOG_OPERANDS];
153 
154 
155 
156 /* Start numbers for new registers and insns at the current constraints
157    pass start.	*/
158 static int new_regno_start;
159 static int new_insn_uid_start;
160 
161 /* If LOC is nonnull, strip any outer subreg from it.  */
162 static inline rtx *
strip_subreg(rtx * loc)163 strip_subreg (rtx *loc)
164 {
165   return loc && GET_CODE (*loc) == SUBREG ? &SUBREG_REG (*loc) : loc;
166 }
167 
168 /* Return hard regno of REGNO or if it is was not assigned to a hard
169    register, use a hard register from its allocno class.  */
170 static int
get_try_hard_regno(int regno)171 get_try_hard_regno (int regno)
172 {
173   int hard_regno;
174   enum reg_class rclass;
175 
176   if ((hard_regno = regno) >= FIRST_PSEUDO_REGISTER)
177     hard_regno = lra_get_regno_hard_regno (regno);
178   if (hard_regno >= 0)
179     return hard_regno;
180   rclass = lra_get_allocno_class (regno);
181   if (rclass == NO_REGS)
182     return -1;
183   return ira_class_hard_regs[rclass][0];
184 }
185 
186 /* Return the hard regno of X after removing its subreg.  If X is not
187    a register or a subreg of a register, return -1.  If X is a pseudo,
188    use its assignment.  If FINAL_P return the final hard regno which will
189    be after elimination.  */
190 static int
get_hard_regno(rtx x,bool final_p)191 get_hard_regno (rtx x, bool final_p)
192 {
193   rtx reg;
194   int hard_regno;
195 
196   reg = x;
197   if (SUBREG_P (x))
198     reg = SUBREG_REG (x);
199   if (! REG_P (reg))
200     return -1;
201   if (! HARD_REGISTER_NUM_P (hard_regno = REGNO (reg)))
202     hard_regno = lra_get_regno_hard_regno (hard_regno);
203   if (hard_regno < 0)
204     return -1;
205   if (final_p)
206     hard_regno = lra_get_elimination_hard_regno (hard_regno);
207   if (SUBREG_P (x))
208     hard_regno += subreg_regno_offset (hard_regno, GET_MODE (reg),
209 				       SUBREG_BYTE (x),  GET_MODE (x));
210   return hard_regno;
211 }
212 
213 /* If REGNO is a hard register or has been allocated a hard register,
214    return the class of that register.  If REGNO is a reload pseudo
215    created by the current constraints pass, return its allocno class.
216    Return NO_REGS otherwise.  */
217 static enum reg_class
get_reg_class(int regno)218 get_reg_class (int regno)
219 {
220   int hard_regno;
221 
222   if (! HARD_REGISTER_NUM_P (hard_regno = regno))
223     hard_regno = lra_get_regno_hard_regno (regno);
224   if (hard_regno >= 0)
225     {
226       hard_regno = lra_get_elimination_hard_regno (hard_regno);
227       return REGNO_REG_CLASS (hard_regno);
228     }
229   if (regno >= new_regno_start)
230     return lra_get_allocno_class (regno);
231   return NO_REGS;
232 }
233 
234 /* Return true if REG satisfies (or will satisfy) reg class constraint
235    CL.  Use elimination first if REG is a hard register.  If REG is a
236    reload pseudo created by this constraints pass, assume that it will
237    be allocated a hard register from its allocno class, but allow that
238    class to be narrowed to CL if it is currently a superset of CL and
239    if either:
240 
241    - ALLOW_ALL_RELOAD_CLASS_CHANGES_P is true or
242    - the instruction we're processing is not a reload move.
243 
244    If NEW_CLASS is nonnull, set *NEW_CLASS to the new allocno class of
245    REGNO (reg), or NO_REGS if no change in its class was needed.  */
246 static bool
247 in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class,
248 	    bool allow_all_reload_class_changes_p = false)
249 {
250   enum reg_class rclass, common_class;
251   machine_mode reg_mode;
252   rtx src;
253   int class_size, hard_regno, nregs, i, j;
254   int regno = REGNO (reg);
255 
256   if (new_class != NULL)
257     *new_class = NO_REGS;
258   if (regno < FIRST_PSEUDO_REGISTER)
259     {
260       rtx final_reg = reg;
261       rtx *final_loc = &final_reg;
262 
263       lra_eliminate_reg_if_possible (final_loc);
264       return TEST_HARD_REG_BIT (reg_class_contents[cl], REGNO (*final_loc));
265     }
266   reg_mode = GET_MODE (reg);
267   rclass = get_reg_class (regno);
268   src = curr_insn_set != NULL ? SET_SRC (curr_insn_set) : NULL;
269   if (regno < new_regno_start
270       /* Do not allow the constraints for reload instructions to
271 	 influence the classes of new pseudos.  These reloads are
272 	 typically moves that have many alternatives, and restricting
273 	 reload pseudos for one alternative may lead to situations
274 	 where other reload pseudos are no longer allocatable.  */
275       || (!allow_all_reload_class_changes_p
276 	  && INSN_UID (curr_insn) >= new_insn_uid_start
277 	  && src != NULL
278 	  && ((REG_P (src) || MEM_P (src))
279 	      || (GET_CODE (src) == SUBREG
280 		  && (REG_P (SUBREG_REG (src)) || MEM_P (SUBREG_REG (src)))))))
281     /* When we don't know what class will be used finally for reload
282        pseudos, we use ALL_REGS.  */
283     return ((regno >= new_regno_start && rclass == ALL_REGS)
284 	    || (rclass != NO_REGS && ira_class_subset_p[rclass][cl]
285 		&& ! hard_reg_set_subset_p (reg_class_contents[cl],
286 					    lra_no_alloc_regs)));
287   else
288     {
289       common_class = ira_reg_class_subset[rclass][cl];
290       if (new_class != NULL)
291 	*new_class = common_class;
292       if (hard_reg_set_subset_p (reg_class_contents[common_class],
293 				 lra_no_alloc_regs))
294 	return false;
295       /* Check that there are enough allocatable regs.  */
296       class_size = ira_class_hard_regs_num[common_class];
297       for (i = 0; i < class_size; i++)
298 	{
299 	  hard_regno = ira_class_hard_regs[common_class][i];
300 	  nregs = hard_regno_nregs (hard_regno, reg_mode);
301 	  if (nregs == 1)
302 	    return true;
303 	  for (j = 0; j < nregs; j++)
304 	    if (TEST_HARD_REG_BIT (lra_no_alloc_regs, hard_regno + j)
305 		|| ! TEST_HARD_REG_BIT (reg_class_contents[common_class],
306 					hard_regno + j))
307 	      break;
308 	  if (j >= nregs)
309 	    return true;
310 	}
311       return false;
312     }
313 }
314 
315 /* Return true if REGNO satisfies a memory constraint.	*/
316 static bool
in_mem_p(int regno)317 in_mem_p (int regno)
318 {
319   return get_reg_class (regno) == NO_REGS;
320 }
321 
322 /* Return 1 if ADDR is a valid memory address for mode MODE in address
323    space AS, and check that each pseudo has the proper kind of hard
324    reg.	 */
325 static int
valid_address_p(machine_mode mode ATTRIBUTE_UNUSED,rtx addr,addr_space_t as)326 valid_address_p (machine_mode mode ATTRIBUTE_UNUSED,
327 		 rtx addr, addr_space_t as)
328 {
329 #ifdef GO_IF_LEGITIMATE_ADDRESS
330   lra_assert (ADDR_SPACE_GENERIC_P (as));
331   GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
332   return 0;
333 
334  win:
335   return 1;
336 #else
337   return targetm.addr_space.legitimate_address_p (mode, addr, 0, as);
338 #endif
339 }
340 
341 namespace {
342   /* Temporarily eliminates registers in an address (for the lifetime of
343      the object).  */
344   class address_eliminator {
345   public:
346     address_eliminator (struct address_info *ad);
347     ~address_eliminator ();
348 
349   private:
350     struct address_info *m_ad;
351     rtx *m_base_loc;
352     rtx m_base_reg;
353     rtx *m_index_loc;
354     rtx m_index_reg;
355   };
356 }
357 
address_eliminator(struct address_info * ad)358 address_eliminator::address_eliminator (struct address_info *ad)
359   : m_ad (ad),
360     m_base_loc (strip_subreg (ad->base_term)),
361     m_base_reg (NULL_RTX),
362     m_index_loc (strip_subreg (ad->index_term)),
363     m_index_reg (NULL_RTX)
364 {
365   if (m_base_loc != NULL)
366     {
367       m_base_reg = *m_base_loc;
368       /* If we have non-legitimate address which is decomposed not in
369 	 the way we expected, don't do elimination here.  In such case
370 	 the address will be reloaded and elimination will be done in
371 	 reload insn finally.  */
372       if (REG_P (m_base_reg))
373 	lra_eliminate_reg_if_possible (m_base_loc);
374       if (m_ad->base_term2 != NULL)
375 	*m_ad->base_term2 = *m_ad->base_term;
376     }
377   if (m_index_loc != NULL)
378     {
379       m_index_reg = *m_index_loc;
380       if (REG_P (m_index_reg))
381 	lra_eliminate_reg_if_possible (m_index_loc);
382     }
383 }
384 
~address_eliminator()385 address_eliminator::~address_eliminator ()
386 {
387   if (m_base_loc && *m_base_loc != m_base_reg)
388     {
389       *m_base_loc = m_base_reg;
390       if (m_ad->base_term2 != NULL)
391 	*m_ad->base_term2 = *m_ad->base_term;
392     }
393   if (m_index_loc && *m_index_loc != m_index_reg)
394     *m_index_loc = m_index_reg;
395 }
396 
397 /* Return true if the eliminated form of AD is a legitimate target address.
398    If OP is a MEM, AD is the address within OP, otherwise OP should be
399    ignored.  CONSTRAINT is one constraint that the operand may need
400    to meet.  */
401 static bool
valid_address_p(rtx op,struct address_info * ad,enum constraint_num constraint)402 valid_address_p (rtx op, struct address_info *ad,
403 		 enum constraint_num constraint)
404 {
405   address_eliminator eliminator (ad);
406 
407   /* Allow a memory OP if it matches CONSTRAINT, even if CONSTRAINT is more
408      forgiving than "m".  */
409   if (MEM_P (op)
410       && (insn_extra_memory_constraint (constraint)
411 	  || insn_extra_special_memory_constraint (constraint))
412       && constraint_satisfied_p (op, constraint))
413     return true;
414 
415   return valid_address_p (ad->mode, *ad->outer, ad->as);
416 }
417 
418 /* Return true if the eliminated form of memory reference OP satisfies
419    extra (special) memory constraint CONSTRAINT.  */
420 static bool
satisfies_memory_constraint_p(rtx op,enum constraint_num constraint)421 satisfies_memory_constraint_p (rtx op, enum constraint_num constraint)
422 {
423   struct address_info ad;
424 
425   decompose_mem_address (&ad, op);
426   address_eliminator eliminator (&ad);
427   return constraint_satisfied_p (op, constraint);
428 }
429 
430 /* Return true if the eliminated form of address AD satisfies extra
431    address constraint CONSTRAINT.  */
432 static bool
satisfies_address_constraint_p(struct address_info * ad,enum constraint_num constraint)433 satisfies_address_constraint_p (struct address_info *ad,
434 				enum constraint_num constraint)
435 {
436   address_eliminator eliminator (ad);
437   return constraint_satisfied_p (*ad->outer, constraint);
438 }
439 
440 /* Return true if the eliminated form of address OP satisfies extra
441    address constraint CONSTRAINT.  */
442 static bool
satisfies_address_constraint_p(rtx op,enum constraint_num constraint)443 satisfies_address_constraint_p (rtx op, enum constraint_num constraint)
444 {
445   struct address_info ad;
446 
447   decompose_lea_address (&ad, &op);
448   return satisfies_address_constraint_p (&ad, constraint);
449 }
450 
451 /* Initiate equivalences for LRA.  As we keep original equivalences
452    before any elimination, we need to make copies otherwise any change
453    in insns might change the equivalences.  */
454 void
lra_init_equiv(void)455 lra_init_equiv (void)
456 {
457   ira_expand_reg_equiv ();
458   for (int i = FIRST_PSEUDO_REGISTER; i < max_reg_num (); i++)
459     {
460       rtx res;
461 
462       if ((res = ira_reg_equiv[i].memory) != NULL_RTX)
463 	ira_reg_equiv[i].memory = copy_rtx (res);
464       if ((res = ira_reg_equiv[i].invariant) != NULL_RTX)
465 	ira_reg_equiv[i].invariant = copy_rtx (res);
466     }
467 }
468 
469 static rtx loc_equivalence_callback (rtx, const_rtx, void *);
470 
471 /* Update equivalence for REGNO.  We need to this as the equivalence
472    might contain other pseudos which are changed by their
473    equivalences.  */
474 static void
update_equiv(int regno)475 update_equiv (int regno)
476 {
477   rtx x;
478 
479   if ((x = ira_reg_equiv[regno].memory) != NULL_RTX)
480     ira_reg_equiv[regno].memory
481       = simplify_replace_fn_rtx (x, NULL_RTX, loc_equivalence_callback,
482 				 NULL_RTX);
483   if ((x = ira_reg_equiv[regno].invariant) != NULL_RTX)
484     ira_reg_equiv[regno].invariant
485       = simplify_replace_fn_rtx (x, NULL_RTX, loc_equivalence_callback,
486 				 NULL_RTX);
487 }
488 
489 /* If we have decided to substitute X with another value, return that
490    value, otherwise return X.  */
491 static rtx
get_equiv(rtx x)492 get_equiv (rtx x)
493 {
494   int regno;
495   rtx res;
496 
497   if (! REG_P (x) || (regno = REGNO (x)) < FIRST_PSEUDO_REGISTER
498       || ! ira_reg_equiv[regno].defined_p
499       || ! ira_reg_equiv[regno].profitable_p
500       || lra_get_regno_hard_regno (regno) >= 0)
501     return x;
502   if ((res = ira_reg_equiv[regno].memory) != NULL_RTX)
503     {
504       if (targetm.cannot_substitute_mem_equiv_p (res))
505 	return x;
506       return res;
507     }
508   if ((res = ira_reg_equiv[regno].constant) != NULL_RTX)
509     return res;
510   if ((res = ira_reg_equiv[regno].invariant) != NULL_RTX)
511     return res;
512   gcc_unreachable ();
513 }
514 
515 /* If we have decided to substitute X with the equivalent value,
516    return that value after elimination for INSN, otherwise return
517    X.  */
518 static rtx
get_equiv_with_elimination(rtx x,rtx_insn * insn)519 get_equiv_with_elimination (rtx x, rtx_insn *insn)
520 {
521   rtx res = get_equiv (x);
522 
523   if (x == res || CONSTANT_P (res))
524     return res;
525   return lra_eliminate_regs_1 (insn, res, GET_MODE (res),
526 			       false, false, 0, true);
527 }
528 
529 /* Set up curr_operand_mode.  */
530 static void
init_curr_operand_mode(void)531 init_curr_operand_mode (void)
532 {
533   int nop = curr_static_id->n_operands;
534   for (int i = 0; i < nop; i++)
535     {
536       machine_mode mode = GET_MODE (*curr_id->operand_loc[i]);
537       if (mode == VOIDmode)
538 	{
539 	  /* The .md mode for address operands is the mode of the
540 	     addressed value rather than the mode of the address itself.  */
541 	  if (curr_id->icode >= 0 && curr_static_id->operand[i].is_address)
542 	    mode = Pmode;
543 	  else
544 	    mode = curr_static_id->operand[i].mode;
545 	}
546       curr_operand_mode[i] = mode;
547     }
548 }
549 
550 
551 
552 /* The page contains code to reuse input reloads.  */
553 
554 /* Structure describes input reload of the current insns.  */
555 struct input_reload
556 {
557   /* True for input reload of matched operands.  */
558   bool match_p;
559   /* Reloaded value.  */
560   rtx input;
561   /* Reload pseudo used.  */
562   rtx reg;
563 };
564 
565 /* The number of elements in the following array.  */
566 static int curr_insn_input_reloads_num;
567 /* Array containing info about input reloads.  It is used to find the
568    same input reload and reuse the reload pseudo in this case.	*/
569 static struct input_reload curr_insn_input_reloads[LRA_MAX_INSN_RELOADS];
570 
571 /* Initiate data concerning reuse of input reloads for the current
572    insn.  */
573 static void
init_curr_insn_input_reloads(void)574 init_curr_insn_input_reloads (void)
575 {
576   curr_insn_input_reloads_num = 0;
577 }
578 
579 /* Create a new pseudo using MODE, RCLASS, ORIGINAL or reuse an existing
580    reload pseudo.  Don't reuse an existing reload pseudo if IN_SUBREG_P
581    is true and the reused pseudo should be wrapped up in a SUBREG.
582    The result pseudo is returned through RESULT_REG.  Return TRUE if we
583    created a new pseudo, FALSE if we reused an existing reload pseudo.
584    Use TITLE to describe new registers for debug purposes.  */
585 static bool
get_reload_reg(enum op_type type,machine_mode mode,rtx original,enum reg_class rclass,bool in_subreg_p,const char * title,rtx * result_reg)586 get_reload_reg (enum op_type type, machine_mode mode, rtx original,
587 		enum reg_class rclass, bool in_subreg_p,
588 		const char *title, rtx *result_reg)
589 {
590   int i, regno;
591   enum reg_class new_class;
592   bool unique_p = false;
593 
594   if (type == OP_OUT)
595     {
596       /* Output reload registers tend to start out with a conservative
597 	 choice of register class.  Usually this is ALL_REGS, although
598 	 a target might narrow it (for performance reasons) through
599 	 targetm.preferred_reload_class.  It's therefore quite common
600 	 for a reload instruction to require a more restrictive class
601 	 than the class that was originally assigned to the reload register.
602 
603 	 In these situations, it's more efficient to refine the choice
604 	 of register class rather than create a second reload register.
605 	 This also helps to avoid cycling for registers that are only
606 	 used by reload instructions.  */
607       rtx src = curr_insn_set != NULL ? SET_SRC (curr_insn_set) : NULL;
608       if (REG_P (original)
609 	  && (int) REGNO (original) >= new_regno_start
610 	  && INSN_UID (curr_insn) >= new_insn_uid_start
611 	  && in_class_p (original, rclass, &new_class, true)
612 	  && src != NULL
613 	  && ((REG_P (src) || MEM_P (src))
614 	      || (GET_CODE (src) == SUBREG
615 		  && (REG_P (SUBREG_REG (src)) || MEM_P (SUBREG_REG (src))))))
616 	{
617 	  unsigned int regno = REGNO (original);
618 	  if (lra_dump_file != NULL)
619 	    {
620 	      fprintf (lra_dump_file, "	 Reuse r%d for output ", regno);
621 	      dump_value_slim (lra_dump_file, original, 1);
622 	    }
623 	  if (new_class != lra_get_allocno_class (regno))
624 	    lra_change_class (regno, new_class, ", change to", false);
625 	  if (lra_dump_file != NULL)
626 	    fprintf (lra_dump_file, "\n");
627 	  *result_reg = original;
628 	  return false;
629 	}
630       *result_reg
631 	= lra_create_new_reg_with_unique_value (mode, original, rclass, title);
632       return true;
633     }
634   /* Prevent reuse value of expression with side effects,
635      e.g. volatile memory.  */
636   if (! side_effects_p (original))
637     for (i = 0; i < curr_insn_input_reloads_num; i++)
638       {
639 	if (! curr_insn_input_reloads[i].match_p
640 	    && rtx_equal_p (curr_insn_input_reloads[i].input, original)
641 	    && in_class_p (curr_insn_input_reloads[i].reg, rclass, &new_class))
642 	  {
643 	    rtx reg = curr_insn_input_reloads[i].reg;
644 	    regno = REGNO (reg);
645 	    /* If input is equal to original and both are VOIDmode,
646 	       GET_MODE (reg) might be still different from mode.
647 	       Ensure we don't return *result_reg with wrong mode.  */
648 	    if (GET_MODE (reg) != mode)
649 	      {
650 		if (in_subreg_p)
651 		  continue;
652 		if (maybe_lt (GET_MODE_SIZE (GET_MODE (reg)),
653 			      GET_MODE_SIZE (mode)))
654 		  continue;
655 		reg = lowpart_subreg (mode, reg, GET_MODE (reg));
656 		if (reg == NULL_RTX || GET_CODE (reg) != SUBREG)
657 		  continue;
658 	      }
659 	    *result_reg = reg;
660 	    if (lra_dump_file != NULL)
661 	      {
662 		fprintf (lra_dump_file, "	 Reuse r%d for reload ", regno);
663 		dump_value_slim (lra_dump_file, original, 1);
664 	      }
665 	    if (new_class != lra_get_allocno_class (regno))
666 	      lra_change_class (regno, new_class, ", change to", false);
667 	    if (lra_dump_file != NULL)
668 	      fprintf (lra_dump_file, "\n");
669 	    return false;
670 	  }
671 	/* If we have an input reload with a different mode, make sure it
672 	   will get a different hard reg.  */
673 	else if (REG_P (original)
674 		 && REG_P (curr_insn_input_reloads[i].input)
675 		 && REGNO (original) == REGNO (curr_insn_input_reloads[i].input)
676 		 && (GET_MODE (original)
677 		     != GET_MODE (curr_insn_input_reloads[i].input)))
678 	  unique_p = true;
679       }
680   *result_reg = (unique_p
681 		 ? lra_create_new_reg_with_unique_value
682 		 : lra_create_new_reg) (mode, original, rclass, title);
683   lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS);
684   curr_insn_input_reloads[curr_insn_input_reloads_num].input = original;
685   curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = false;
686   curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = *result_reg;
687   return true;
688 }
689 
690 
691 /* The page contains major code to choose the current insn alternative
692    and generate reloads for it.	 */
693 
694 /* Return the offset from REGNO of the least significant register
695    in (reg:MODE REGNO).
696 
697    This function is used to tell whether two registers satisfy
698    a matching constraint.  (reg:MODE1 REGNO1) matches (reg:MODE2 REGNO2) if:
699 
700          REGNO1 + lra_constraint_offset (REGNO1, MODE1)
701 	 == REGNO2 + lra_constraint_offset (REGNO2, MODE2)  */
702 int
lra_constraint_offset(int regno,machine_mode mode)703 lra_constraint_offset (int regno, machine_mode mode)
704 {
705   lra_assert (regno < FIRST_PSEUDO_REGISTER);
706 
707   scalar_int_mode int_mode;
708   if (WORDS_BIG_ENDIAN
709       && is_a <scalar_int_mode> (mode, &int_mode)
710       && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD)
711     return hard_regno_nregs (regno, mode) - 1;
712   return 0;
713 }
714 
715 /* Like rtx_equal_p except that it allows a REG and a SUBREG to match
716    if they are the same hard reg, and has special hacks for
717    auto-increment and auto-decrement.  This is specifically intended for
718    process_alt_operands to use in determining whether two operands
719    match.  X is the operand whose number is the lower of the two.
720 
721    It is supposed that X is the output operand and Y is the input
722    operand.  Y_HARD_REGNO is the final hard regno of register Y or
723    register in subreg Y as we know it now.  Otherwise, it is a
724    negative value.  */
725 static bool
operands_match_p(rtx x,rtx y,int y_hard_regno)726 operands_match_p (rtx x, rtx y, int y_hard_regno)
727 {
728   int i;
729   RTX_CODE code = GET_CODE (x);
730   const char *fmt;
731 
732   if (x == y)
733     return true;
734   if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x))))
735       && (REG_P (y) || (GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y)))))
736     {
737       int j;
738 
739       i = get_hard_regno (x, false);
740       if (i < 0)
741 	goto slow;
742 
743       if ((j = y_hard_regno) < 0)
744 	goto slow;
745 
746       i += lra_constraint_offset (i, GET_MODE (x));
747       j += lra_constraint_offset (j, GET_MODE (y));
748 
749       return i == j;
750     }
751 
752   /* If two operands must match, because they are really a single
753      operand of an assembler insn, then two post-increments are invalid
754      because the assembler insn would increment only once.  On the
755      other hand, a post-increment matches ordinary indexing if the
756      post-increment is the output operand.  */
757   if (code == POST_DEC || code == POST_INC || code == POST_MODIFY)
758     return operands_match_p (XEXP (x, 0), y, y_hard_regno);
759 
760   /* Two pre-increments are invalid because the assembler insn would
761      increment only once.  On the other hand, a pre-increment matches
762      ordinary indexing if the pre-increment is the input operand.  */
763   if (GET_CODE (y) == PRE_DEC || GET_CODE (y) == PRE_INC
764       || GET_CODE (y) == PRE_MODIFY)
765     return operands_match_p (x, XEXP (y, 0), -1);
766 
767  slow:
768 
769   if (code == REG && REG_P (y))
770     return REGNO (x) == REGNO (y);
771 
772   if (code == REG && GET_CODE (y) == SUBREG && REG_P (SUBREG_REG (y))
773       && x == SUBREG_REG (y))
774     return true;
775   if (GET_CODE (y) == REG && code == SUBREG && REG_P (SUBREG_REG (x))
776       && SUBREG_REG (x) == y)
777     return true;
778 
779   /* Now we have disposed of all the cases in which different rtx
780      codes can match.  */
781   if (code != GET_CODE (y))
782     return false;
783 
784   /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent.  */
785   if (GET_MODE (x) != GET_MODE (y))
786     return false;
787 
788   switch (code)
789     {
790     CASE_CONST_UNIQUE:
791       return false;
792 
793     case CONST_VECTOR:
794       if (!same_vector_encodings_p (x, y))
795 	return false;
796       break;
797 
798     case LABEL_REF:
799       return label_ref_label (x) == label_ref_label (y);
800     case SYMBOL_REF:
801       return XSTR (x, 0) == XSTR (y, 0);
802 
803     default:
804       break;
805     }
806 
807   /* Compare the elements.  If any pair of corresponding elements fail
808      to match, return false for the whole things.  */
809 
810   fmt = GET_RTX_FORMAT (code);
811   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
812     {
813       int val, j;
814       switch (fmt[i])
815 	{
816 	case 'w':
817 	  if (XWINT (x, i) != XWINT (y, i))
818 	    return false;
819 	  break;
820 
821 	case 'i':
822 	  if (XINT (x, i) != XINT (y, i))
823 	    return false;
824 	  break;
825 
826 	case 'p':
827 	  if (maybe_ne (SUBREG_BYTE (x), SUBREG_BYTE (y)))
828 	    return false;
829 	  break;
830 
831 	case 'e':
832 	  val = operands_match_p (XEXP (x, i), XEXP (y, i), -1);
833 	  if (val == 0)
834 	    return false;
835 	  break;
836 
837 	case '0':
838 	  break;
839 
840 	case 'E':
841 	  if (XVECLEN (x, i) != XVECLEN (y, i))
842 	    return false;
843 	  for (j = XVECLEN (x, i) - 1; j >= 0; --j)
844 	    {
845 	      val = operands_match_p (XVECEXP (x, i, j), XVECEXP (y, i, j), -1);
846 	      if (val == 0)
847 		return false;
848 	    }
849 	  break;
850 
851 	  /* It is believed that rtx's at this level will never
852 	     contain anything but integers and other rtx's, except for
853 	     within LABEL_REFs and SYMBOL_REFs.	 */
854 	default:
855 	  gcc_unreachable ();
856 	}
857     }
858   return true;
859 }
860 
861 /* True if X is a constant that can be forced into the constant pool.
862    MODE is the mode of the operand, or VOIDmode if not known.  */
863 #define CONST_POOL_OK_P(MODE, X)		\
864   ((MODE) != VOIDmode				\
865    && CONSTANT_P (X)				\
866    && GET_CODE (X) != HIGH			\
867    && GET_MODE_SIZE (MODE).is_constant ()	\
868    && !targetm.cannot_force_const_mem (MODE, X))
869 
870 /* True if C is a non-empty register class that has too few registers
871    to be safely used as a reload target class.	*/
872 #define SMALL_REGISTER_CLASS_P(C)		\
873   (ira_class_hard_regs_num [(C)] == 1		\
874    || (ira_class_hard_regs_num [(C)] >= 1	\
875        && targetm.class_likely_spilled_p (C)))
876 
877 /* If REG is a reload pseudo, try to make its class satisfying CL.  */
878 static void
narrow_reload_pseudo_class(rtx reg,enum reg_class cl)879 narrow_reload_pseudo_class (rtx reg, enum reg_class cl)
880 {
881   enum reg_class rclass;
882 
883   /* Do not make more accurate class from reloads generated.  They are
884      mostly moves with a lot of constraints.  Making more accurate
885      class may results in very narrow class and impossibility of find
886      registers for several reloads of one insn.	 */
887   if (INSN_UID (curr_insn) >= new_insn_uid_start)
888     return;
889   if (GET_CODE (reg) == SUBREG)
890     reg = SUBREG_REG (reg);
891   if (! REG_P (reg) || (int) REGNO (reg) < new_regno_start)
892     return;
893   if (in_class_p (reg, cl, &rclass) && rclass != cl)
894     lra_change_class (REGNO (reg), rclass, "      Change to", true);
895 }
896 
897 /* Searches X for any reference to a reg with the same value as REGNO,
898    returning the rtx of the reference found if any.  Otherwise,
899    returns NULL_RTX.  */
900 static rtx
regno_val_use_in(unsigned int regno,rtx x)901 regno_val_use_in (unsigned int regno, rtx x)
902 {
903   const char *fmt;
904   int i, j;
905   rtx tem;
906 
907   if (REG_P (x) && lra_reg_info[REGNO (x)].val == lra_reg_info[regno].val)
908     return x;
909 
910   fmt = GET_RTX_FORMAT (GET_CODE (x));
911   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
912     {
913       if (fmt[i] == 'e')
914 	{
915 	  if ((tem = regno_val_use_in (regno, XEXP (x, i))))
916 	    return tem;
917 	}
918       else if (fmt[i] == 'E')
919 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
920 	  if ((tem = regno_val_use_in (regno , XVECEXP (x, i, j))))
921 	    return tem;
922     }
923 
924   return NULL_RTX;
925 }
926 
927 /* Return true if all current insn non-output operands except INS (it
928    has a negaitve end marker) do not use pseudos with the same value
929    as REGNO.  */
930 static bool
check_conflict_input_operands(int regno,signed char * ins)931 check_conflict_input_operands (int regno, signed char *ins)
932 {
933   int in;
934   int n_operands = curr_static_id->n_operands;
935 
936   for (int nop = 0; nop < n_operands; nop++)
937     if (! curr_static_id->operand[nop].is_operator
938 	&& curr_static_id->operand[nop].type != OP_OUT)
939       {
940 	for (int i = 0; (in = ins[i]) >= 0; i++)
941 	  if (in == nop)
942 	    break;
943 	if (in < 0
944 	    && regno_val_use_in (regno, *curr_id->operand_loc[nop]) != NULL_RTX)
945 	  return false;
946       }
947   return true;
948 }
949 
950 /* Generate reloads for matching OUT and INS (array of input operand
951    numbers with end marker -1) with reg class GOAL_CLASS, considering
952    output operands OUTS (similar array to INS) needing to be in different
953    registers.  Add input and output reloads correspondingly to the lists
954    *BEFORE and *AFTER.  OUT might be negative.  In this case we generate
955    input reloads for matched input operands INS.  EARLY_CLOBBER_P is a flag
956    that the output operand is early clobbered for chosen alternative.  */
957 static void
match_reload(signed char out,signed char * ins,signed char * outs,enum reg_class goal_class,rtx_insn ** before,rtx_insn ** after,bool early_clobber_p)958 match_reload (signed char out, signed char *ins, signed char *outs,
959 	      enum reg_class goal_class, rtx_insn **before,
960 	      rtx_insn **after, bool early_clobber_p)
961 {
962   bool out_conflict;
963   int i, in;
964   rtx new_in_reg, new_out_reg, reg;
965   machine_mode inmode, outmode;
966   rtx in_rtx = *curr_id->operand_loc[ins[0]];
967   rtx out_rtx = out < 0 ? in_rtx : *curr_id->operand_loc[out];
968 
969   inmode = curr_operand_mode[ins[0]];
970   outmode = out < 0 ? inmode : curr_operand_mode[out];
971   push_to_sequence (*before);
972   if (inmode != outmode)
973     {
974       /* process_alt_operands has already checked that the mode sizes
975 	 are ordered.  */
976       if (partial_subreg_p (outmode, inmode))
977 	{
978 	  reg = new_in_reg
979 	    = lra_create_new_reg_with_unique_value (inmode, in_rtx,
980 						    goal_class, "");
981 	  new_out_reg = gen_lowpart_SUBREG (outmode, reg);
982 	  LRA_SUBREG_P (new_out_reg) = 1;
983 	  /* If the input reg is dying here, we can use the same hard
984 	     register for REG and IN_RTX.  We do it only for original
985 	     pseudos as reload pseudos can die although original
986 	     pseudos still live where reload pseudos dies.  */
987 	  if (REG_P (in_rtx) && (int) REGNO (in_rtx) < lra_new_regno_start
988 	      && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx))
989 	      && (!early_clobber_p
990 		  || check_conflict_input_operands(REGNO (in_rtx), ins)))
991 	    lra_assign_reg_val (REGNO (in_rtx), REGNO (reg));
992 	}
993       else
994 	{
995 	  reg = new_out_reg
996 	    = lra_create_new_reg_with_unique_value (outmode, out_rtx,
997 						    goal_class, "");
998 	  new_in_reg = gen_lowpart_SUBREG (inmode, reg);
999 	  /* NEW_IN_REG is non-paradoxical subreg.  We don't want
1000 	     NEW_OUT_REG living above.  We add clobber clause for
1001 	     this.  This is just a temporary clobber.  We can remove
1002 	     it at the end of LRA work.  */
1003 	  rtx_insn *clobber = emit_clobber (new_out_reg);
1004 	  LRA_TEMP_CLOBBER_P (PATTERN (clobber)) = 1;
1005 	  LRA_SUBREG_P (new_in_reg) = 1;
1006 	  if (GET_CODE (in_rtx) == SUBREG)
1007 	    {
1008 	      rtx subreg_reg = SUBREG_REG (in_rtx);
1009 
1010 	      /* If SUBREG_REG is dying here and sub-registers IN_RTX
1011 		 and NEW_IN_REG are similar, we can use the same hard
1012 		 register for REG and SUBREG_REG.  */
1013 	      if (REG_P (subreg_reg)
1014 		  && (int) REGNO (subreg_reg) < lra_new_regno_start
1015 		  && GET_MODE (subreg_reg) == outmode
1016 		  && known_eq (SUBREG_BYTE (in_rtx), SUBREG_BYTE (new_in_reg))
1017 		  && find_regno_note (curr_insn, REG_DEAD, REGNO (subreg_reg))
1018 		  && (! early_clobber_p
1019 		      || check_conflict_input_operands (REGNO (subreg_reg),
1020 							ins)))
1021 		lra_assign_reg_val (REGNO (subreg_reg), REGNO (reg));
1022 	    }
1023 	}
1024     }
1025   else
1026     {
1027       /* Pseudos have values -- see comments for lra_reg_info.
1028 	 Different pseudos with the same value do not conflict even if
1029 	 they live in the same place.  When we create a pseudo we
1030 	 assign value of original pseudo (if any) from which we
1031 	 created the new pseudo.  If we create the pseudo from the
1032 	 input pseudo, the new pseudo will have no conflict with the
1033 	 input pseudo which is wrong when the input pseudo lives after
1034 	 the insn and as the new pseudo value is changed by the insn
1035 	 output.  Therefore we create the new pseudo from the output
1036 	 except the case when we have single matched dying input
1037 	 pseudo.
1038 
1039 	 We cannot reuse the current output register because we might
1040 	 have a situation like "a <- a op b", where the constraints
1041 	 force the second input operand ("b") to match the output
1042 	 operand ("a").  "b" must then be copied into a new register
1043 	 so that it doesn't clobber the current value of "a".
1044 
1045 	 We cannot use the same value if the output pseudo is
1046 	 early clobbered or the input pseudo is mentioned in the
1047 	 output, e.g. as an address part in memory, because
1048 	 output reload will actually extend the pseudo liveness.
1049 	 We don't care about eliminable hard regs here as we are
1050 	 interesting only in pseudos.  */
1051 
1052       /* Matching input's register value is the same as one of the other
1053 	 output operand.  Output operands in a parallel insn must be in
1054 	 different registers.  */
1055       out_conflict = false;
1056       if (REG_P (in_rtx))
1057 	{
1058 	  for (i = 0; outs[i] >= 0; i++)
1059 	    {
1060 	      rtx other_out_rtx = *curr_id->operand_loc[outs[i]];
1061 	      if (REG_P (other_out_rtx)
1062 		  && (regno_val_use_in (REGNO (in_rtx), other_out_rtx)
1063 		      != NULL_RTX))
1064 		{
1065 		  out_conflict = true;
1066 		  break;
1067 		}
1068 	    }
1069 	}
1070 
1071       new_in_reg = new_out_reg
1072 	= (! early_clobber_p && ins[1] < 0 && REG_P (in_rtx)
1073 	   && (int) REGNO (in_rtx) < lra_new_regno_start
1074 	   && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx))
1075 	   && (! early_clobber_p
1076 	       || check_conflict_input_operands (REGNO (in_rtx), ins))
1077 	   && (out < 0
1078 	       || regno_val_use_in (REGNO (in_rtx), out_rtx) == NULL_RTX)
1079 	   && !out_conflict
1080 	   ? lra_create_new_reg (inmode, in_rtx, goal_class, "")
1081 	   : lra_create_new_reg_with_unique_value (outmode, out_rtx,
1082 						   goal_class, ""));
1083     }
1084   /* In operand can be got from transformations before processing insn
1085      constraints.  One example of such transformations is subreg
1086      reloading (see function simplify_operand_subreg).  The new
1087      pseudos created by the transformations might have inaccurate
1088      class (ALL_REGS) and we should make their classes more
1089      accurate.  */
1090   narrow_reload_pseudo_class (in_rtx, goal_class);
1091   lra_emit_move (copy_rtx (new_in_reg), in_rtx);
1092   *before = get_insns ();
1093   end_sequence ();
1094   /* Add the new pseudo to consider values of subsequent input reload
1095      pseudos.  */
1096   lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS);
1097   curr_insn_input_reloads[curr_insn_input_reloads_num].input = in_rtx;
1098   curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = true;
1099   curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = new_in_reg;
1100   for (i = 0; (in = ins[i]) >= 0; i++)
1101     if (GET_MODE (*curr_id->operand_loc[in]) == VOIDmode
1102 	|| GET_MODE (new_in_reg) == GET_MODE (*curr_id->operand_loc[in]))
1103       *curr_id->operand_loc[in] = new_in_reg;
1104     else
1105       {
1106 	lra_assert
1107 	  (GET_MODE (new_out_reg) == GET_MODE (*curr_id->operand_loc[in]));
1108 	*curr_id->operand_loc[in] = new_out_reg;
1109       }
1110   lra_update_dups (curr_id, ins);
1111   if (out < 0)
1112     return;
1113   /* See a comment for the input operand above.  */
1114   narrow_reload_pseudo_class (out_rtx, goal_class);
1115   if (find_reg_note (curr_insn, REG_UNUSED, out_rtx) == NULL_RTX)
1116     {
1117       reg = SUBREG_P (out_rtx) ? SUBREG_REG (out_rtx) : out_rtx;
1118       start_sequence ();
1119       /* If we had strict_low_part, use it also in reload to keep other
1120 	 parts unchanged but do it only for regs as strict_low_part
1121 	 has no sense for memory and probably there is no insn pattern
1122 	 to match the reload insn in memory case.  */
1123       if (out >= 0 && curr_static_id->operand[out].strict_low && REG_P (reg))
1124 	out_rtx = gen_rtx_STRICT_LOW_PART (VOIDmode, out_rtx);
1125       lra_emit_move (out_rtx, copy_rtx (new_out_reg));
1126       emit_insn (*after);
1127       *after = get_insns ();
1128       end_sequence ();
1129     }
1130   *curr_id->operand_loc[out] = new_out_reg;
1131   lra_update_dup (curr_id, out);
1132 }
1133 
1134 /* Return register class which is union of all reg classes in insn
1135    constraint alternative string starting with P.  */
1136 static enum reg_class
reg_class_from_constraints(const char * p)1137 reg_class_from_constraints (const char *p)
1138 {
1139   int c, len;
1140   enum reg_class op_class = NO_REGS;
1141 
1142   do
1143     switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c)
1144       {
1145       case '#':
1146       case ',':
1147 	return op_class;
1148 
1149       case 'g':
1150 	op_class = reg_class_subunion[op_class][GENERAL_REGS];
1151 	break;
1152 
1153       default:
1154 	enum constraint_num cn = lookup_constraint (p);
1155 	enum reg_class cl = reg_class_for_constraint (cn);
1156 	if (cl == NO_REGS)
1157 	  {
1158 	    if (insn_extra_address_constraint (cn))
1159 	      op_class
1160 		= (reg_class_subunion
1161 		   [op_class][base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
1162 					      ADDRESS, SCRATCH)]);
1163 	    break;
1164 	  }
1165 
1166 	op_class = reg_class_subunion[op_class][cl];
1167  	break;
1168       }
1169   while ((p += len), c);
1170   return op_class;
1171 }
1172 
1173 /* If OP is a register, return the class of the register as per
1174    get_reg_class, otherwise return NO_REGS.  */
1175 static inline enum reg_class
get_op_class(rtx op)1176 get_op_class (rtx op)
1177 {
1178   return REG_P (op) ? get_reg_class (REGNO (op)) : NO_REGS;
1179 }
1180 
1181 /* Return generated insn mem_pseudo:=val if TO_P or val:=mem_pseudo
1182    otherwise.  If modes of MEM_PSEUDO and VAL are different, use
1183    SUBREG for VAL to make them equal.  */
1184 static rtx_insn *
emit_spill_move(bool to_p,rtx mem_pseudo,rtx val)1185 emit_spill_move (bool to_p, rtx mem_pseudo, rtx val)
1186 {
1187   if (GET_MODE (mem_pseudo) != GET_MODE (val))
1188     {
1189       /* Usually size of mem_pseudo is greater than val size but in
1190 	 rare cases it can be less as it can be defined by target
1191 	 dependent macro HARD_REGNO_CALLER_SAVE_MODE.  */
1192       if (! MEM_P (val))
1193 	{
1194 	  val = gen_lowpart_SUBREG (GET_MODE (mem_pseudo),
1195 				    GET_CODE (val) == SUBREG
1196 				    ? SUBREG_REG (val) : val);
1197 	  LRA_SUBREG_P (val) = 1;
1198 	}
1199       else
1200 	{
1201 	  mem_pseudo = gen_lowpart_SUBREG (GET_MODE (val), mem_pseudo);
1202 	  LRA_SUBREG_P (mem_pseudo) = 1;
1203 	}
1204     }
1205   return to_p ? gen_move_insn (mem_pseudo, val)
1206 	      : gen_move_insn (val, mem_pseudo);
1207 }
1208 
1209 /* Process a special case insn (register move), return true if we
1210    don't need to process it anymore.  INSN should be a single set
1211    insn.  Set up that RTL was changed through CHANGE_P and that hook
1212    TARGET_SECONDARY_MEMORY_NEEDED says to use secondary memory through
1213    SEC_MEM_P.  */
1214 static bool
check_and_process_move(bool * change_p,bool * sec_mem_p ATTRIBUTE_UNUSED)1215 check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED)
1216 {
1217   int sregno, dregno;
1218   rtx dest, src, dreg, sreg, new_reg, scratch_reg;
1219   rtx_insn *before;
1220   enum reg_class dclass, sclass, secondary_class;
1221   secondary_reload_info sri;
1222 
1223   lra_assert (curr_insn_set != NULL_RTX);
1224   dreg = dest = SET_DEST (curr_insn_set);
1225   sreg = src = SET_SRC (curr_insn_set);
1226   if (GET_CODE (dest) == SUBREG)
1227     dreg = SUBREG_REG (dest);
1228   if (GET_CODE (src) == SUBREG)
1229     sreg = SUBREG_REG (src);
1230   if (! (REG_P (dreg) || MEM_P (dreg)) || ! (REG_P (sreg) || MEM_P (sreg)))
1231     return false;
1232   sclass = dclass = NO_REGS;
1233   if (REG_P (dreg))
1234     dclass = get_reg_class (REGNO (dreg));
1235   gcc_assert (dclass < LIM_REG_CLASSES);
1236   if (dclass == ALL_REGS)
1237     /* ALL_REGS is used for new pseudos created by transformations
1238        like reload of SUBREG_REG (see function
1239        simplify_operand_subreg).  We don't know their class yet.  We
1240        should figure out the class from processing the insn
1241        constraints not in this fast path function.  Even if ALL_REGS
1242        were a right class for the pseudo, secondary_... hooks usually
1243        are not define for ALL_REGS.  */
1244     return false;
1245   if (REG_P (sreg))
1246     sclass = get_reg_class (REGNO (sreg));
1247   gcc_assert (sclass < LIM_REG_CLASSES);
1248   if (sclass == ALL_REGS)
1249     /* See comments above.  */
1250     return false;
1251   if (sclass == NO_REGS && dclass == NO_REGS)
1252     return false;
1253   if (targetm.secondary_memory_needed (GET_MODE (src), sclass, dclass)
1254       && ((sclass != NO_REGS && dclass != NO_REGS)
1255 	  || (GET_MODE (src)
1256 	      != targetm.secondary_memory_needed_mode (GET_MODE (src)))))
1257     {
1258       *sec_mem_p = true;
1259       return false;
1260     }
1261   if (! REG_P (dreg) || ! REG_P (sreg))
1262     return false;
1263   sri.prev_sri = NULL;
1264   sri.icode = CODE_FOR_nothing;
1265   sri.extra_cost = 0;
1266   secondary_class = NO_REGS;
1267   /* Set up hard register for a reload pseudo for hook
1268      secondary_reload because some targets just ignore unassigned
1269      pseudos in the hook.  */
1270   if (dclass != NO_REGS && lra_get_regno_hard_regno (REGNO (dreg)) < 0)
1271     {
1272       dregno = REGNO (dreg);
1273       reg_renumber[dregno] = ira_class_hard_regs[dclass][0];
1274     }
1275   else
1276     dregno = -1;
1277   if (sclass != NO_REGS && lra_get_regno_hard_regno (REGNO (sreg)) < 0)
1278     {
1279       sregno = REGNO (sreg);
1280       reg_renumber[sregno] = ira_class_hard_regs[sclass][0];
1281     }
1282   else
1283     sregno = -1;
1284   if (sclass != NO_REGS)
1285     secondary_class
1286       = (enum reg_class) targetm.secondary_reload (false, dest,
1287 						   (reg_class_t) sclass,
1288 						   GET_MODE (src), &sri);
1289   if (sclass == NO_REGS
1290       || ((secondary_class != NO_REGS || sri.icode != CODE_FOR_nothing)
1291 	  && dclass != NO_REGS))
1292     {
1293       enum reg_class old_sclass = secondary_class;
1294       secondary_reload_info old_sri = sri;
1295 
1296       sri.prev_sri = NULL;
1297       sri.icode = CODE_FOR_nothing;
1298       sri.extra_cost = 0;
1299       secondary_class
1300 	= (enum reg_class) targetm.secondary_reload (true, src,
1301 						     (reg_class_t) dclass,
1302 						     GET_MODE (src), &sri);
1303       /* Check the target hook consistency.  */
1304       lra_assert
1305 	((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing)
1306 	 || (old_sclass == NO_REGS && old_sri.icode == CODE_FOR_nothing)
1307 	 || (secondary_class == old_sclass && sri.icode == old_sri.icode));
1308     }
1309   if (sregno >= 0)
1310     reg_renumber [sregno] = -1;
1311   if (dregno >= 0)
1312     reg_renumber [dregno] = -1;
1313   if (secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing)
1314     return false;
1315   *change_p = true;
1316   new_reg = NULL_RTX;
1317   if (secondary_class != NO_REGS)
1318     new_reg = lra_create_new_reg_with_unique_value (GET_MODE (src), NULL_RTX,
1319 						    secondary_class,
1320 						    "secondary");
1321   start_sequence ();
1322   if (sri.icode == CODE_FOR_nothing)
1323     lra_emit_move (new_reg, src);
1324   else
1325     {
1326       enum reg_class scratch_class;
1327 
1328       scratch_class = (reg_class_from_constraints
1329 		       (insn_data[sri.icode].operand[2].constraint));
1330       scratch_reg = (lra_create_new_reg_with_unique_value
1331 		     (insn_data[sri.icode].operand[2].mode, NULL_RTX,
1332 		      scratch_class, "scratch"));
1333       emit_insn (GEN_FCN (sri.icode) (new_reg != NULL_RTX ? new_reg : dest,
1334 				      src, scratch_reg));
1335     }
1336   before = get_insns ();
1337   end_sequence ();
1338   lra_process_new_insns (curr_insn, before, NULL, "Inserting the move");
1339   if (new_reg != NULL_RTX)
1340     SET_SRC (curr_insn_set) = new_reg;
1341   else
1342     {
1343       if (lra_dump_file != NULL)
1344 	{
1345 	  fprintf (lra_dump_file, "Deleting move %u\n", INSN_UID (curr_insn));
1346 	  dump_insn_slim (lra_dump_file, curr_insn);
1347 	}
1348       lra_set_insn_deleted (curr_insn);
1349       return true;
1350     }
1351   return false;
1352 }
1353 
1354 /* The following data describe the result of process_alt_operands.
1355    The data are used in curr_insn_transform to generate reloads.  */
1356 
1357 /* The chosen reg classes which should be used for the corresponding
1358    operands.  */
1359 static enum reg_class goal_alt[MAX_RECOG_OPERANDS];
1360 /* True if the operand should be the same as another operand and that
1361    other operand does not need a reload.  */
1362 static bool goal_alt_match_win[MAX_RECOG_OPERANDS];
1363 /* True if the operand does not need a reload.	*/
1364 static bool goal_alt_win[MAX_RECOG_OPERANDS];
1365 /* True if the operand can be offsetable memory.  */
1366 static bool goal_alt_offmemok[MAX_RECOG_OPERANDS];
1367 /* The number of an operand to which given operand can be matched to.  */
1368 static int goal_alt_matches[MAX_RECOG_OPERANDS];
1369 /* The number of elements in the following array.  */
1370 static int goal_alt_dont_inherit_ops_num;
1371 /* Numbers of operands whose reload pseudos should not be inherited.  */
1372 static int goal_alt_dont_inherit_ops[MAX_RECOG_OPERANDS];
1373 /* True if the insn commutative operands should be swapped.  */
1374 static bool goal_alt_swapped;
1375 /* The chosen insn alternative.	 */
1376 static int goal_alt_number;
1377 
1378 /* True if the corresponding operand is the result of an equivalence
1379    substitution.  */
1380 static bool equiv_substition_p[MAX_RECOG_OPERANDS];
1381 
1382 /* The following five variables are used to choose the best insn
1383    alternative.	 They reflect final characteristics of the best
1384    alternative.	 */
1385 
1386 /* Number of necessary reloads and overall cost reflecting the
1387    previous value and other unpleasantness of the best alternative.  */
1388 static int best_losers, best_overall;
1389 /* Overall number hard registers used for reloads.  For example, on
1390    some targets we need 2 general registers to reload DFmode and only
1391    one floating point register.	 */
1392 static int best_reload_nregs;
1393 /* Overall number reflecting distances of previous reloading the same
1394    value.  The distances are counted from the current BB start.  It is
1395    used to improve inheritance chances.  */
1396 static int best_reload_sum;
1397 
1398 /* True if the current insn should have no correspondingly input or
1399    output reloads.  */
1400 static bool no_input_reloads_p, no_output_reloads_p;
1401 
1402 /* True if we swapped the commutative operands in the current
1403    insn.  */
1404 static int curr_swapped;
1405 
1406 /* if CHECK_ONLY_P is false, arrange for address element *LOC to be a
1407    register of class CL.  Add any input reloads to list BEFORE.  AFTER
1408    is nonnull if *LOC is an automodified value; handle that case by
1409    adding the required output reloads to list AFTER.  Return true if
1410    the RTL was changed.
1411 
1412    if CHECK_ONLY_P is true, check that the *LOC is a correct address
1413    register.  Return false if the address register is correct.  */
1414 static bool
process_addr_reg(rtx * loc,bool check_only_p,rtx_insn ** before,rtx_insn ** after,enum reg_class cl)1415 process_addr_reg (rtx *loc, bool check_only_p, rtx_insn **before, rtx_insn **after,
1416 		  enum reg_class cl)
1417 {
1418   int regno;
1419   enum reg_class rclass, new_class;
1420   rtx reg;
1421   rtx new_reg;
1422   machine_mode mode;
1423   bool subreg_p, before_p = false;
1424 
1425   subreg_p = GET_CODE (*loc) == SUBREG;
1426   if (subreg_p)
1427     {
1428       reg = SUBREG_REG (*loc);
1429       mode = GET_MODE (reg);
1430 
1431       /* For mode with size bigger than ptr_mode, there unlikely to be "mov"
1432 	 between two registers with different classes, but there normally will
1433 	 be "mov" which transfers element of vector register into the general
1434 	 register, and this normally will be a subreg which should be reloaded
1435 	 as a whole.  This is particularly likely to be triggered when
1436 	 -fno-split-wide-types specified.  */
1437       if (!REG_P (reg)
1438 	  || in_class_p (reg, cl, &new_class)
1439 	  || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode)))
1440        loc = &SUBREG_REG (*loc);
1441     }
1442 
1443   reg = *loc;
1444   mode = GET_MODE (reg);
1445   if (! REG_P (reg))
1446     {
1447       if (check_only_p)
1448 	return true;
1449       /* Always reload memory in an address even if the target supports
1450 	 such addresses.  */
1451       new_reg = lra_create_new_reg_with_unique_value (mode, reg, cl, "address");
1452       before_p = true;
1453     }
1454   else
1455     {
1456       regno = REGNO (reg);
1457       rclass = get_reg_class (regno);
1458       if (! check_only_p
1459 	  && (*loc = get_equiv_with_elimination (reg, curr_insn)) != reg)
1460 	{
1461 	  if (lra_dump_file != NULL)
1462 	    {
1463 	      fprintf (lra_dump_file,
1464 		       "Changing pseudo %d in address of insn %u on equiv ",
1465 		       REGNO (reg), INSN_UID (curr_insn));
1466 	      dump_value_slim (lra_dump_file, *loc, 1);
1467 	      fprintf (lra_dump_file, "\n");
1468 	    }
1469 	  *loc = copy_rtx (*loc);
1470 	}
1471       if (*loc != reg || ! in_class_p (reg, cl, &new_class))
1472 	{
1473 	  if (check_only_p)
1474 	    return true;
1475 	  reg = *loc;
1476 	  if (get_reload_reg (after == NULL ? OP_IN : OP_INOUT,
1477 			      mode, reg, cl, subreg_p, "address", &new_reg))
1478 	    before_p = true;
1479 	}
1480       else if (new_class != NO_REGS && rclass != new_class)
1481 	{
1482 	  if (check_only_p)
1483 	    return true;
1484 	  lra_change_class (regno, new_class, "	   Change to", true);
1485 	  return false;
1486 	}
1487       else
1488 	return false;
1489     }
1490   if (before_p)
1491     {
1492       push_to_sequence (*before);
1493       lra_emit_move (new_reg, reg);
1494       *before = get_insns ();
1495       end_sequence ();
1496     }
1497   *loc = new_reg;
1498   if (after != NULL)
1499     {
1500       start_sequence ();
1501       lra_emit_move (before_p ? copy_rtx (reg) : reg, new_reg);
1502       emit_insn (*after);
1503       *after = get_insns ();
1504       end_sequence ();
1505     }
1506   return true;
1507 }
1508 
1509 /* Insert move insn in simplify_operand_subreg. BEFORE returns
1510    the insn to be inserted before curr insn. AFTER returns the
1511    the insn to be inserted after curr insn.  ORIGREG and NEWREG
1512    are the original reg and new reg for reload.  */
1513 static void
insert_move_for_subreg(rtx_insn ** before,rtx_insn ** after,rtx origreg,rtx newreg)1514 insert_move_for_subreg (rtx_insn **before, rtx_insn **after, rtx origreg,
1515 			rtx newreg)
1516 {
1517   if (before)
1518     {
1519       push_to_sequence (*before);
1520       lra_emit_move (newreg, origreg);
1521       *before = get_insns ();
1522       end_sequence ();
1523     }
1524   if (after)
1525     {
1526       start_sequence ();
1527       lra_emit_move (origreg, newreg);
1528       emit_insn (*after);
1529       *after = get_insns ();
1530       end_sequence ();
1531     }
1532 }
1533 
1534 static int valid_address_p (machine_mode mode, rtx addr, addr_space_t as);
1535 static bool process_address (int, bool, rtx_insn **, rtx_insn **);
1536 
1537 /* Make reloads for subreg in operand NOP with internal subreg mode
1538    REG_MODE, add new reloads for further processing.  Return true if
1539    any change was done.  */
1540 static bool
simplify_operand_subreg(int nop,machine_mode reg_mode)1541 simplify_operand_subreg (int nop, machine_mode reg_mode)
1542 {
1543   int hard_regno, inner_hard_regno;
1544   rtx_insn *before, *after;
1545   machine_mode mode, innermode;
1546   rtx reg, new_reg;
1547   rtx operand = *curr_id->operand_loc[nop];
1548   enum reg_class regclass;
1549   enum op_type type;
1550 
1551   before = after = NULL;
1552 
1553   if (GET_CODE (operand) != SUBREG)
1554     return false;
1555 
1556   mode = GET_MODE (operand);
1557   reg = SUBREG_REG (operand);
1558   innermode = GET_MODE (reg);
1559   type = curr_static_id->operand[nop].type;
1560   if (MEM_P (reg))
1561     {
1562       const bool addr_was_valid
1563 	= valid_address_p (innermode, XEXP (reg, 0), MEM_ADDR_SPACE (reg));
1564       alter_subreg (curr_id->operand_loc[nop], false);
1565       rtx subst = *curr_id->operand_loc[nop];
1566       lra_assert (MEM_P (subst));
1567       const bool addr_is_valid = valid_address_p (GET_MODE (subst),
1568 						  XEXP (subst, 0),
1569 						  MEM_ADDR_SPACE (subst));
1570       if (!addr_was_valid
1571 	  || addr_is_valid
1572 	  || ((get_constraint_type (lookup_constraint
1573 				    (curr_static_id->operand[nop].constraint))
1574 	       != CT_SPECIAL_MEMORY)
1575 	      /* We still can reload address and if the address is
1576 		 valid, we can remove subreg without reloading its
1577 		 inner memory.  */
1578 	      && valid_address_p (GET_MODE (subst),
1579 				  regno_reg_rtx
1580 				  [ira_class_hard_regs
1581 				   [base_reg_class (GET_MODE (subst),
1582 						    MEM_ADDR_SPACE (subst),
1583 						    ADDRESS, SCRATCH)][0]],
1584 				  MEM_ADDR_SPACE (subst))))
1585 	{
1586 	  /* If we change the address for a paradoxical subreg of memory, the
1587 	     new address might violate the necessary alignment or the access
1588 	     might be slow; take this into consideration.  We need not worry
1589 	     about accesses beyond allocated memory for paradoxical memory
1590 	     subregs as we don't substitute such equiv memory (see processing
1591 	     equivalences in function lra_constraints) and because for spilled
1592 	     pseudos we allocate stack memory enough for the biggest
1593 	     corresponding paradoxical subreg.
1594 
1595 	     However, do not blindly simplify a (subreg (mem ...)) for
1596 	     WORD_REGISTER_OPERATIONS targets as this may lead to loading junk
1597 	     data into a register when the inner is narrower than outer or
1598 	     missing important data from memory when the inner is wider than
1599 	     outer.  This rule only applies to modes that are no wider than
1600 	     a word.
1601 
1602 	     If valid memory becomes invalid after subreg elimination
1603 	     and address might be different we still have to reload
1604 	     memory.
1605 	  */
1606 	  if ((! addr_was_valid
1607 	       || addr_is_valid
1608 	       || known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (innermode)))
1609 	      && !(maybe_ne (GET_MODE_PRECISION (mode),
1610 			     GET_MODE_PRECISION (innermode))
1611 		   && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD)
1612 		   && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD)
1613 		   && WORD_REGISTER_OPERATIONS)
1614 	      && (!(MEM_ALIGN (subst) < GET_MODE_ALIGNMENT (mode)
1615 		    && targetm.slow_unaligned_access (mode, MEM_ALIGN (subst)))
1616 		  || (MEM_ALIGN (reg) < GET_MODE_ALIGNMENT (innermode)
1617 		      && targetm.slow_unaligned_access (innermode,
1618 							MEM_ALIGN (reg)))))
1619 	    return true;
1620 
1621 	  *curr_id->operand_loc[nop] = operand;
1622 
1623 	  /* But if the address was not valid, we cannot reload the MEM without
1624 	     reloading the address first.  */
1625 	  if (!addr_was_valid)
1626 	    process_address (nop, false, &before, &after);
1627 
1628 	  /* INNERMODE is fast, MODE slow.  Reload the mem in INNERMODE.  */
1629 	  enum reg_class rclass
1630 	    = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS);
1631 	  if (get_reload_reg (curr_static_id->operand[nop].type, innermode,
1632 			      reg, rclass, TRUE, "slow/invalid mem", &new_reg))
1633 	    {
1634 	      bool insert_before, insert_after;
1635 	      bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg));
1636 
1637 	      insert_before = (type != OP_OUT
1638 			       || partial_subreg_p (mode, innermode));
1639 	      insert_after = type != OP_IN;
1640 	      insert_move_for_subreg (insert_before ? &before : NULL,
1641 				      insert_after ? &after : NULL,
1642 				      reg, new_reg);
1643 	    }
1644 	  SUBREG_REG (operand) = new_reg;
1645 
1646 	  /* Convert to MODE.  */
1647 	  reg = operand;
1648 	  rclass
1649 	    = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS);
1650 	  if (get_reload_reg (curr_static_id->operand[nop].type, mode, reg,
1651 			      rclass, TRUE, "slow/invalid mem", &new_reg))
1652 	    {
1653 	      bool insert_before, insert_after;
1654 	      bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg));
1655 
1656 	      insert_before = type != OP_OUT;
1657 	      insert_after = type != OP_IN;
1658 	      insert_move_for_subreg (insert_before ? &before : NULL,
1659 				      insert_after ? &after : NULL,
1660 				      reg, new_reg);
1661 	    }
1662 	  *curr_id->operand_loc[nop] = new_reg;
1663 	  lra_process_new_insns (curr_insn, before, after,
1664 				 "Inserting slow/invalid mem reload");
1665 	  return true;
1666 	}
1667 
1668       /* If the address was valid and became invalid, prefer to reload
1669 	 the memory.  Typical case is when the index scale should
1670 	 correspond the memory.  */
1671       *curr_id->operand_loc[nop] = operand;
1672       /* Do not return false here as the MEM_P (reg) will be processed
1673 	 later in this function.  */
1674     }
1675   else if (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER)
1676     {
1677       alter_subreg (curr_id->operand_loc[nop], false);
1678       return true;
1679     }
1680   else if (CONSTANT_P (reg))
1681     {
1682       /* Try to simplify subreg of constant.  It is usually result of
1683 	 equivalence substitution.  */
1684       if (innermode == VOIDmode
1685 	  && (innermode = original_subreg_reg_mode[nop]) == VOIDmode)
1686 	innermode = curr_static_id->operand[nop].mode;
1687       if ((new_reg = simplify_subreg (mode, reg, innermode,
1688 				      SUBREG_BYTE (operand))) != NULL_RTX)
1689 	{
1690 	  *curr_id->operand_loc[nop] = new_reg;
1691 	  return true;
1692 	}
1693     }
1694   /* Put constant into memory when we have mixed modes.  It generates
1695      a better code in most cases as it does not need a secondary
1696      reload memory.  It also prevents LRA looping when LRA is using
1697      secondary reload memory again and again.  */
1698   if (CONSTANT_P (reg) && CONST_POOL_OK_P (reg_mode, reg)
1699       && SCALAR_INT_MODE_P (reg_mode) != SCALAR_INT_MODE_P (mode))
1700     {
1701       SUBREG_REG (operand) = force_const_mem (reg_mode, reg);
1702       alter_subreg (curr_id->operand_loc[nop], false);
1703       return true;
1704     }
1705   /* Force a reload of the SUBREG_REG if this is a constant or PLUS or
1706      if there may be a problem accessing OPERAND in the outer
1707      mode.  */
1708   if ((REG_P (reg)
1709        && REGNO (reg) >= FIRST_PSEUDO_REGISTER
1710        && (hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0
1711        /* Don't reload paradoxical subregs because we could be looping
1712 	  having repeatedly final regno out of hard regs range.  */
1713        && (hard_regno_nregs (hard_regno, innermode)
1714 	   >= hard_regno_nregs (hard_regno, mode))
1715        && simplify_subreg_regno (hard_regno, innermode,
1716 				 SUBREG_BYTE (operand), mode) < 0
1717        /* Don't reload subreg for matching reload.  It is actually
1718 	  valid subreg in LRA.  */
1719        && ! LRA_SUBREG_P (operand))
1720       || CONSTANT_P (reg) || GET_CODE (reg) == PLUS || MEM_P (reg))
1721     {
1722       enum reg_class rclass;
1723 
1724       if (REG_P (reg))
1725 	/* There is a big probability that we will get the same class
1726 	   for the new pseudo and we will get the same insn which
1727 	   means infinite looping.  So spill the new pseudo.  */
1728 	rclass = NO_REGS;
1729       else
1730 	/* The class will be defined later in curr_insn_transform.  */
1731 	rclass
1732 	  = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS);
1733 
1734       if (get_reload_reg (curr_static_id->operand[nop].type, reg_mode, reg,
1735 			  rclass, TRUE, "subreg reg", &new_reg))
1736 	{
1737 	  bool insert_before, insert_after;
1738 	  bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg));
1739 
1740 	  insert_before = (type != OP_OUT
1741 			   || read_modify_subreg_p (operand));
1742 	  insert_after = (type != OP_IN);
1743 	  insert_move_for_subreg (insert_before ? &before : NULL,
1744 				  insert_after ? &after : NULL,
1745 				  reg, new_reg);
1746 	}
1747       SUBREG_REG (operand) = new_reg;
1748       lra_process_new_insns (curr_insn, before, after,
1749 			     "Inserting subreg reload");
1750       return true;
1751     }
1752   /* Force a reload for a paradoxical subreg. For paradoxical subreg,
1753      IRA allocates hardreg to the inner pseudo reg according to its mode
1754      instead of the outermode, so the size of the hardreg may not be enough
1755      to contain the outermode operand, in that case we may need to insert
1756      reload for the reg. For the following two types of paradoxical subreg,
1757      we need to insert reload:
1758      1. If the op_type is OP_IN, and the hardreg could not be paired with
1759         other hardreg to contain the outermode operand
1760         (checked by in_hard_reg_set_p), we need to insert the reload.
1761      2. If the op_type is OP_OUT or OP_INOUT.
1762 
1763      Here is a paradoxical subreg example showing how the reload is generated:
1764 
1765      (insn 5 4 7 2 (set (reg:TI 106 [ __comp ])
1766         (subreg:TI (reg:DI 107 [ __comp ]) 0)) {*movti_internal_rex64}
1767 
1768      In IRA, reg107 is allocated to a DImode hardreg. We use x86-64 as example
1769      here, if reg107 is assigned to hardreg R15, because R15 is the last
1770      hardreg, compiler cannot find another hardreg to pair with R15 to
1771      contain TImode data. So we insert a TImode reload reg180 for it.
1772      After reload is inserted:
1773 
1774      (insn 283 0 0 (set (subreg:DI (reg:TI 180 [orig:107 __comp ] [107]) 0)
1775         (reg:DI 107 [ __comp ])) -1
1776      (insn 5 4 7 2 (set (reg:TI 106 [ __comp ])
1777         (subreg:TI (reg:TI 180 [orig:107 __comp ] [107]) 0)) {*movti_internal_rex64}
1778 
1779      Two reload hard registers will be allocated to reg180 to save TImode data
1780      in LRA_assign.
1781 
1782      For LRA pseudos this should normally be handled by the biggest_mode
1783      mechanism.  However, it's possible for new uses of an LRA pseudo
1784      to be introduced after we've allocated it, such as when undoing
1785      inheritance, and the allocated register might not then be appropriate
1786      for the new uses.  */
1787   else if (REG_P (reg)
1788 	   && REGNO (reg) >= FIRST_PSEUDO_REGISTER
1789 	   && paradoxical_subreg_p (operand)
1790 	   && (inner_hard_regno = lra_get_regno_hard_regno (REGNO (reg))) >= 0
1791 	   && ((hard_regno
1792 		= simplify_subreg_regno (inner_hard_regno, innermode,
1793 					 SUBREG_BYTE (operand), mode)) < 0
1794 	       || ((hard_regno_nregs (inner_hard_regno, innermode)
1795 		    < hard_regno_nregs (hard_regno, mode))
1796 		   && (regclass = lra_get_allocno_class (REGNO (reg)))
1797 		   && (type != OP_IN
1798 		       || !in_hard_reg_set_p (reg_class_contents[regclass],
1799 					      mode, hard_regno)
1800 		       || overlaps_hard_reg_set_p (lra_no_alloc_regs,
1801 						   mode, hard_regno)))))
1802     {
1803       /* The class will be defined later in curr_insn_transform.  */
1804       enum reg_class rclass
1805 	= (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS);
1806 
1807       if (get_reload_reg (curr_static_id->operand[nop].type, mode, reg,
1808                           rclass, TRUE, "paradoxical subreg", &new_reg))
1809         {
1810 	  rtx subreg;
1811 	  bool insert_before, insert_after;
1812 
1813 	  PUT_MODE (new_reg, mode);
1814           subreg = gen_lowpart_SUBREG (innermode, new_reg);
1815 	  bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg));
1816 
1817 	  insert_before = (type != OP_OUT);
1818 	  insert_after = (type != OP_IN);
1819 	  insert_move_for_subreg (insert_before ? &before : NULL,
1820 				  insert_after ? &after : NULL,
1821 				  reg, subreg);
1822 	}
1823       SUBREG_REG (operand) = new_reg;
1824       lra_process_new_insns (curr_insn, before, after,
1825                              "Inserting paradoxical subreg reload");
1826       return true;
1827     }
1828   return false;
1829 }
1830 
1831 /* Return TRUE if X refers for a hard register from SET.  */
1832 static bool
uses_hard_regs_p(rtx x,HARD_REG_SET set)1833 uses_hard_regs_p (rtx x, HARD_REG_SET set)
1834 {
1835   int i, j, x_hard_regno;
1836   machine_mode mode;
1837   const char *fmt;
1838   enum rtx_code code;
1839 
1840   if (x == NULL_RTX)
1841     return false;
1842   code = GET_CODE (x);
1843   mode = GET_MODE (x);
1844 
1845   if (code == SUBREG)
1846     {
1847       /* For all SUBREGs we want to check whether the full multi-register
1848 	 overlaps the set.  For normal SUBREGs this means 'get_hard_regno' of
1849 	 the inner register, for paradoxical SUBREGs this means the
1850 	 'get_hard_regno' of the full SUBREG and for complete SUBREGs either is
1851 	 fine.  Use the wider mode for all cases.  */
1852       rtx subreg = SUBREG_REG (x);
1853       mode = wider_subreg_mode (x);
1854       if (mode == GET_MODE (subreg))
1855 	{
1856 	  x = subreg;
1857 	  code = GET_CODE (x);
1858 	}
1859     }
1860 
1861   if (REG_P (x) || SUBREG_P (x))
1862     {
1863       x_hard_regno = get_hard_regno (x, true);
1864       return (x_hard_regno >= 0
1865 	      && overlaps_hard_reg_set_p (set, mode, x_hard_regno));
1866     }
1867   if (MEM_P (x))
1868     {
1869       struct address_info ad;
1870 
1871       decompose_mem_address (&ad, x);
1872       if (ad.base_term != NULL && uses_hard_regs_p (*ad.base_term, set))
1873 	return true;
1874       if (ad.index_term != NULL && uses_hard_regs_p (*ad.index_term, set))
1875 	return true;
1876     }
1877   fmt = GET_RTX_FORMAT (code);
1878   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1879     {
1880       if (fmt[i] == 'e')
1881 	{
1882 	  if (uses_hard_regs_p (XEXP (x, i), set))
1883 	    return true;
1884 	}
1885       else if (fmt[i] == 'E')
1886 	{
1887 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1888 	    if (uses_hard_regs_p (XVECEXP (x, i, j), set))
1889 	      return true;
1890 	}
1891     }
1892   return false;
1893 }
1894 
1895 /* Return true if OP is a spilled pseudo. */
1896 static inline bool
spilled_pseudo_p(rtx op)1897 spilled_pseudo_p (rtx op)
1898 {
1899   return (REG_P (op)
1900 	  && REGNO (op) >= FIRST_PSEUDO_REGISTER && in_mem_p (REGNO (op)));
1901 }
1902 
1903 /* Return true if X is a general constant.  */
1904 static inline bool
general_constant_p(rtx x)1905 general_constant_p (rtx x)
1906 {
1907   return CONSTANT_P (x) && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (x));
1908 }
1909 
1910 static bool
reg_in_class_p(rtx reg,enum reg_class cl)1911 reg_in_class_p (rtx reg, enum reg_class cl)
1912 {
1913   if (cl == NO_REGS)
1914     return get_reg_class (REGNO (reg)) == NO_REGS;
1915   return in_class_p (reg, cl, NULL);
1916 }
1917 
1918 /* Return true if SET of RCLASS contains no hard regs which can be
1919    used in MODE.  */
1920 static bool
prohibited_class_reg_set_mode_p(enum reg_class rclass,HARD_REG_SET & set,machine_mode mode)1921 prohibited_class_reg_set_mode_p (enum reg_class rclass,
1922 				 HARD_REG_SET &set,
1923 				 machine_mode mode)
1924 {
1925   HARD_REG_SET temp;
1926 
1927   lra_assert (hard_reg_set_subset_p (reg_class_contents[rclass], set));
1928   temp = set & ~lra_no_alloc_regs;
1929   return (hard_reg_set_subset_p
1930 	  (temp, ira_prohibited_class_mode_regs[rclass][mode]));
1931 }
1932 
1933 
1934 /* Used to check validity info about small class input operands.  It
1935    should be incremented at start of processing an insn
1936    alternative.  */
1937 static unsigned int curr_small_class_check = 0;
1938 
1939 /* Update number of used inputs of class OP_CLASS for operand NOP
1940    of alternative NALT.  Return true if we have more such class operands
1941    than the number of available regs.  */
1942 static bool
update_and_check_small_class_inputs(int nop,int nalt,enum reg_class op_class)1943 update_and_check_small_class_inputs (int nop, int nalt,
1944 				     enum reg_class op_class)
1945 {
1946   static unsigned int small_class_check[LIM_REG_CLASSES];
1947   static int small_class_input_nums[LIM_REG_CLASSES];
1948 
1949   if (SMALL_REGISTER_CLASS_P (op_class)
1950       /* We are interesting in classes became small because of fixing
1951 	 some hard regs, e.g. by an user through GCC options.  */
1952       && hard_reg_set_intersect_p (reg_class_contents[op_class],
1953 				   ira_no_alloc_regs)
1954       && (curr_static_id->operand[nop].type != OP_OUT
1955 	  || TEST_BIT (curr_static_id->operand[nop].early_clobber_alts, nalt)))
1956     {
1957       if (small_class_check[op_class] == curr_small_class_check)
1958 	small_class_input_nums[op_class]++;
1959       else
1960 	{
1961 	  small_class_check[op_class] = curr_small_class_check;
1962 	  small_class_input_nums[op_class] = 1;
1963 	}
1964       if (small_class_input_nums[op_class] > ira_class_hard_regs_num[op_class])
1965 	return true;
1966     }
1967   return false;
1968 }
1969 
1970 /* Major function to choose the current insn alternative and what
1971    operands should be reloaded and how.	 If ONLY_ALTERNATIVE is not
1972    negative we should consider only this alternative.  Return false if
1973    we cannot choose the alternative or find how to reload the
1974    operands.  */
1975 static bool
process_alt_operands(int only_alternative)1976 process_alt_operands (int only_alternative)
1977 {
1978   bool ok_p = false;
1979   int nop, overall, nalt;
1980   int n_alternatives = curr_static_id->n_alternatives;
1981   int n_operands = curr_static_id->n_operands;
1982   /* LOSERS counts the operands that don't fit this alternative and
1983      would require loading.  */
1984   int losers;
1985   int addr_losers;
1986   /* REJECT is a count of how undesirable this alternative says it is
1987      if any reloading is required.  If the alternative matches exactly
1988      then REJECT is ignored, but otherwise it gets this much counted
1989      against it in addition to the reloading needed.  */
1990   int reject;
1991   /* This is defined by '!' or '?' alternative constraint and added to
1992      reject.  But in some cases it can be ignored.  */
1993   int static_reject;
1994   int op_reject;
1995   /* The number of elements in the following array.  */
1996   int early_clobbered_regs_num;
1997   /* Numbers of operands which are early clobber registers.  */
1998   int early_clobbered_nops[MAX_RECOG_OPERANDS];
1999   enum reg_class curr_alt[MAX_RECOG_OPERANDS];
2000   HARD_REG_SET curr_alt_set[MAX_RECOG_OPERANDS];
2001   bool curr_alt_match_win[MAX_RECOG_OPERANDS];
2002   bool curr_alt_win[MAX_RECOG_OPERANDS];
2003   bool curr_alt_offmemok[MAX_RECOG_OPERANDS];
2004   int curr_alt_matches[MAX_RECOG_OPERANDS];
2005   /* The number of elements in the following array.  */
2006   int curr_alt_dont_inherit_ops_num;
2007   /* Numbers of operands whose reload pseudos should not be inherited.	*/
2008   int curr_alt_dont_inherit_ops[MAX_RECOG_OPERANDS];
2009   rtx op;
2010   /* The register when the operand is a subreg of register, otherwise the
2011      operand itself.  */
2012   rtx no_subreg_reg_operand[MAX_RECOG_OPERANDS];
2013   /* The register if the operand is a register or subreg of register,
2014      otherwise NULL.  */
2015   rtx operand_reg[MAX_RECOG_OPERANDS];
2016   int hard_regno[MAX_RECOG_OPERANDS];
2017   machine_mode biggest_mode[MAX_RECOG_OPERANDS];
2018   int reload_nregs, reload_sum;
2019   bool costly_p;
2020   enum reg_class cl;
2021 
2022   /* Calculate some data common for all alternatives to speed up the
2023      function.	*/
2024   for (nop = 0; nop < n_operands; nop++)
2025     {
2026       rtx reg;
2027 
2028       op = no_subreg_reg_operand[nop] = *curr_id->operand_loc[nop];
2029       /* The real hard regno of the operand after the allocation.  */
2030       hard_regno[nop] = get_hard_regno (op, true);
2031 
2032       operand_reg[nop] = reg = op;
2033       biggest_mode[nop] = GET_MODE (op);
2034       if (GET_CODE (op) == SUBREG)
2035 	{
2036 	  biggest_mode[nop] = wider_subreg_mode (op);
2037 	  operand_reg[nop] = reg = SUBREG_REG (op);
2038 	}
2039       if (! REG_P (reg))
2040 	operand_reg[nop] = NULL_RTX;
2041       else if (REGNO (reg) >= FIRST_PSEUDO_REGISTER
2042 	       || ((int) REGNO (reg)
2043 		   == lra_get_elimination_hard_regno (REGNO (reg))))
2044 	no_subreg_reg_operand[nop] = reg;
2045       else
2046 	operand_reg[nop] = no_subreg_reg_operand[nop]
2047 	  /* Just use natural mode for elimination result.  It should
2048 	     be enough for extra constraints hooks.  */
2049 	  = regno_reg_rtx[hard_regno[nop]];
2050     }
2051 
2052   /* The constraints are made of several alternatives.	Each operand's
2053      constraint looks like foo,bar,... with commas separating the
2054      alternatives.  The first alternatives for all operands go
2055      together, the second alternatives go together, etc.
2056 
2057      First loop over alternatives.  */
2058   alternative_mask preferred = curr_id->preferred_alternatives;
2059   if (only_alternative >= 0)
2060     preferred &= ALTERNATIVE_BIT (only_alternative);
2061 
2062   for (nalt = 0; nalt < n_alternatives; nalt++)
2063     {
2064       /* Loop over operands for one constraint alternative.  */
2065       if (!TEST_BIT (preferred, nalt))
2066 	continue;
2067 
2068       bool matching_early_clobber[MAX_RECOG_OPERANDS];
2069       curr_small_class_check++;
2070       overall = losers = addr_losers = 0;
2071       static_reject = reject = reload_nregs = reload_sum = 0;
2072       for (nop = 0; nop < n_operands; nop++)
2073 	{
2074 	  int inc = (curr_static_id
2075 		     ->operand_alternative[nalt * n_operands + nop].reject);
2076 	  if (lra_dump_file != NULL && inc != 0)
2077 	    fprintf (lra_dump_file,
2078 		     "            Staticly defined alt reject+=%d\n", inc);
2079 	  static_reject += inc;
2080 	  matching_early_clobber[nop] = 0;
2081 	}
2082       reject += static_reject;
2083       early_clobbered_regs_num = 0;
2084 
2085       for (nop = 0; nop < n_operands; nop++)
2086 	{
2087 	  const char *p;
2088 	  char *end;
2089 	  int len, c, m, i, opalt_num, this_alternative_matches;
2090 	  bool win, did_match, offmemok, early_clobber_p;
2091 	  /* false => this operand can be reloaded somehow for this
2092 	     alternative.  */
2093 	  bool badop;
2094 	  /* true => this operand can be reloaded if the alternative
2095 	     allows regs.  */
2096 	  bool winreg;
2097 	  /* True if a constant forced into memory would be OK for
2098 	     this operand.  */
2099 	  bool constmemok;
2100 	  enum reg_class this_alternative, this_costly_alternative;
2101 	  HARD_REG_SET this_alternative_set, this_costly_alternative_set;
2102 	  bool this_alternative_match_win, this_alternative_win;
2103 	  bool this_alternative_offmemok;
2104 	  bool scratch_p;
2105 	  machine_mode mode;
2106 	  enum constraint_num cn;
2107 
2108 	  opalt_num = nalt * n_operands + nop;
2109 	  if (curr_static_id->operand_alternative[opalt_num].anything_ok)
2110 	    {
2111 	      /* Fast track for no constraints at all.	*/
2112 	      curr_alt[nop] = NO_REGS;
2113 	      CLEAR_HARD_REG_SET (curr_alt_set[nop]);
2114 	      curr_alt_win[nop] = true;
2115 	      curr_alt_match_win[nop] = false;
2116 	      curr_alt_offmemok[nop] = false;
2117 	      curr_alt_matches[nop] = -1;
2118 	      continue;
2119 	    }
2120 
2121 	  op = no_subreg_reg_operand[nop];
2122 	  mode = curr_operand_mode[nop];
2123 
2124 	  win = did_match = winreg = offmemok = constmemok = false;
2125 	  badop = true;
2126 
2127 	  early_clobber_p = false;
2128 	  p = curr_static_id->operand_alternative[opalt_num].constraint;
2129 
2130 	  this_costly_alternative = this_alternative = NO_REGS;
2131 	  /* We update set of possible hard regs besides its class
2132 	     because reg class might be inaccurate.  For example,
2133 	     union of LO_REGS (l), HI_REGS(h), and STACK_REG(k) in ARM
2134 	     is translated in HI_REGS because classes are merged by
2135 	     pairs and there is no accurate intermediate class.	 */
2136 	  CLEAR_HARD_REG_SET (this_alternative_set);
2137 	  CLEAR_HARD_REG_SET (this_costly_alternative_set);
2138 	  this_alternative_win = false;
2139 	  this_alternative_match_win = false;
2140 	  this_alternative_offmemok = false;
2141 	  this_alternative_matches = -1;
2142 
2143 	  /* An empty constraint should be excluded by the fast
2144 	     track.  */
2145 	  lra_assert (*p != 0 && *p != ',');
2146 
2147 	  op_reject = 0;
2148 	  /* Scan this alternative's specs for this operand; set WIN
2149 	     if the operand fits any letter in this alternative.
2150 	     Otherwise, clear BADOP if this operand could fit some
2151 	     letter after reloads, or set WINREG if this operand could
2152 	     fit after reloads provided the constraint allows some
2153 	     registers.	 */
2154 	  costly_p = false;
2155 	  do
2156 	    {
2157 	      switch ((c = *p, len = CONSTRAINT_LEN (c, p)), c)
2158 		{
2159 		case '\0':
2160 		  len = 0;
2161 		  break;
2162 		case ',':
2163 		  c = '\0';
2164 		  break;
2165 
2166 		case '&':
2167 		  early_clobber_p = true;
2168 		  break;
2169 
2170 		case '$':
2171 		  op_reject += LRA_MAX_REJECT;
2172 		  break;
2173 		case '^':
2174 		  op_reject += LRA_LOSER_COST_FACTOR;
2175 		  break;
2176 
2177 		case '#':
2178 		  /* Ignore rest of this alternative.  */
2179 		  c = '\0';
2180 		  break;
2181 
2182 		case '0':  case '1':  case '2':	 case '3':  case '4':
2183 		case '5':  case '6':  case '7':	 case '8':  case '9':
2184 		  {
2185 		    int m_hregno;
2186 		    bool match_p;
2187 
2188 		    m = strtoul (p, &end, 10);
2189 		    p = end;
2190 		    len = 0;
2191 		    lra_assert (nop > m);
2192 
2193 		    /* Reject matches if we don't know which operand is
2194 		       bigger.  This situation would arguably be a bug in
2195 		       an .md pattern, but could also occur in a user asm.  */
2196 		    if (!ordered_p (GET_MODE_SIZE (biggest_mode[m]),
2197 				    GET_MODE_SIZE (biggest_mode[nop])))
2198 		      break;
2199 
2200 		    /* Don't match wrong asm insn operands for proper
2201 		       diagnostic later.  */
2202 		    if (INSN_CODE (curr_insn) < 0
2203 			&& (curr_operand_mode[m] == BLKmode
2204 			    || curr_operand_mode[nop] == BLKmode)
2205 			&& curr_operand_mode[m] != curr_operand_mode[nop])
2206 		      break;
2207 
2208 		    m_hregno = get_hard_regno (*curr_id->operand_loc[m], false);
2209 		    /* We are supposed to match a previous operand.
2210 		       If we do, we win if that one did.  If we do
2211 		       not, count both of the operands as losers.
2212 		       (This is too conservative, since most of the
2213 		       time only a single reload insn will be needed
2214 		       to make the two operands win.  As a result,
2215 		       this alternative may be rejected when it is
2216 		       actually desirable.)  */
2217 		    match_p = false;
2218 		    if (operands_match_p (*curr_id->operand_loc[nop],
2219 					  *curr_id->operand_loc[m], m_hregno))
2220 		      {
2221 			/* We should reject matching of an early
2222 			   clobber operand if the matching operand is
2223 			   not dying in the insn.  */
2224 			if (!TEST_BIT (curr_static_id->operand[m]
2225 				       .early_clobber_alts, nalt)
2226 			    || operand_reg[nop] == NULL_RTX
2227 			    || (find_regno_note (curr_insn, REG_DEAD,
2228 						 REGNO (op))
2229 				|| REGNO (op) == REGNO (operand_reg[m])))
2230 			  match_p = true;
2231 		      }
2232 		    if (match_p)
2233 		      {
2234 			/* If we are matching a non-offsettable
2235 			   address where an offsettable address was
2236 			   expected, then we must reject this
2237 			   combination, because we can't reload
2238 			   it.	*/
2239 			if (curr_alt_offmemok[m]
2240 			    && MEM_P (*curr_id->operand_loc[m])
2241 			    && curr_alt[m] == NO_REGS && ! curr_alt_win[m])
2242 			  continue;
2243 		      }
2244 		    else
2245 		      {
2246 			/* If the operands do not match and one
2247 			   operand is INOUT, we can not match them.
2248 			   Try other possibilities, e.g. other
2249 			   alternatives or commutative operand
2250 			   exchange.  */
2251 			if (curr_static_id->operand[nop].type == OP_INOUT
2252 			    || curr_static_id->operand[m].type == OP_INOUT)
2253 			  break;
2254 			/* Operands don't match.  If the operands are
2255 			   different user defined explicit hard
2256 			   registers, then we cannot make them match
2257 			   when one is early clobber operand.  */
2258 			if ((REG_P (*curr_id->operand_loc[nop])
2259 			     || SUBREG_P (*curr_id->operand_loc[nop]))
2260 			    && (REG_P (*curr_id->operand_loc[m])
2261 				|| SUBREG_P (*curr_id->operand_loc[m])))
2262 			  {
2263 			    rtx nop_reg = *curr_id->operand_loc[nop];
2264 			    if (SUBREG_P (nop_reg))
2265 			      nop_reg = SUBREG_REG (nop_reg);
2266 			    rtx m_reg = *curr_id->operand_loc[m];
2267 			    if (SUBREG_P (m_reg))
2268 			      m_reg = SUBREG_REG (m_reg);
2269 
2270 			    if (REG_P (nop_reg)
2271 				&& HARD_REGISTER_P (nop_reg)
2272 				&& REG_USERVAR_P (nop_reg)
2273 				&& REG_P (m_reg)
2274 				&& HARD_REGISTER_P (m_reg)
2275 				&& REG_USERVAR_P (m_reg))
2276 			      {
2277 				int i;
2278 
2279 				for (i = 0; i < early_clobbered_regs_num; i++)
2280 				  if (m == early_clobbered_nops[i])
2281 				    break;
2282 				if (i < early_clobbered_regs_num
2283 				    || early_clobber_p)
2284 				  break;
2285 			      }
2286 			  }
2287 			/* Both operands must allow a reload register,
2288 			   otherwise we cannot make them match.  */
2289 			if (curr_alt[m] == NO_REGS)
2290 			  break;
2291 			/* Retroactively mark the operand we had to
2292 			   match as a loser, if it wasn't already and
2293 			   it wasn't matched to a register constraint
2294 			   (e.g it might be matched by memory). */
2295 			if (curr_alt_win[m]
2296 			    && (operand_reg[m] == NULL_RTX
2297 				|| hard_regno[m] < 0))
2298 			  {
2299 			    losers++;
2300 			    reload_nregs
2301 			      += (ira_reg_class_max_nregs[curr_alt[m]]
2302 				  [GET_MODE (*curr_id->operand_loc[m])]);
2303 			  }
2304 
2305 			/* Prefer matching earlyclobber alternative as
2306 			   it results in less hard regs required for
2307 			   the insn than a non-matching earlyclobber
2308 			   alternative.  */
2309 			if (TEST_BIT (curr_static_id->operand[m]
2310 				      .early_clobber_alts, nalt))
2311 			  {
2312 			    if (lra_dump_file != NULL)
2313 			      fprintf
2314 				(lra_dump_file,
2315 				 "            %d Matching earlyclobber alt:"
2316 				 " reject--\n",
2317 				 nop);
2318 			    if (!matching_early_clobber[m])
2319 			      {
2320 				reject--;
2321 				matching_early_clobber[m] = 1;
2322 			      }
2323 			  }
2324 			/* Otherwise we prefer no matching
2325 			   alternatives because it gives more freedom
2326 			   in RA.  */
2327 			else if (operand_reg[nop] == NULL_RTX
2328 				 || (find_regno_note (curr_insn, REG_DEAD,
2329 						      REGNO (operand_reg[nop]))
2330 				     == NULL_RTX))
2331 			  {
2332 			    if (lra_dump_file != NULL)
2333 			      fprintf
2334 				(lra_dump_file,
2335 				 "            %d Matching alt: reject+=2\n",
2336 				 nop);
2337 			    reject += 2;
2338 			  }
2339 		      }
2340 		    /* If we have to reload this operand and some
2341 		       previous operand also had to match the same
2342 		       thing as this operand, we don't know how to do
2343 		       that.  */
2344 		    if (!match_p || !curr_alt_win[m])
2345 		      {
2346 			for (i = 0; i < nop; i++)
2347 			  if (curr_alt_matches[i] == m)
2348 			    break;
2349 			if (i < nop)
2350 			  break;
2351 		      }
2352 		    else
2353 		      did_match = true;
2354 
2355 		    this_alternative_matches = m;
2356 		    /* This can be fixed with reloads if the operand
2357 		       we are supposed to match can be fixed with
2358 		       reloads. */
2359 		    badop = false;
2360 		    this_alternative = curr_alt[m];
2361 		    this_alternative_set = curr_alt_set[m];
2362 		    winreg = this_alternative != NO_REGS;
2363 		    break;
2364 		  }
2365 
2366 		case 'g':
2367 		  if (MEM_P (op)
2368 		      || general_constant_p (op)
2369 		      || spilled_pseudo_p (op))
2370 		    win = true;
2371 		  cl = GENERAL_REGS;
2372 		  goto reg;
2373 
2374 		default:
2375 		  cn = lookup_constraint (p);
2376 		  switch (get_constraint_type (cn))
2377 		    {
2378 		    case CT_REGISTER:
2379 		      cl = reg_class_for_constraint (cn);
2380 		      if (cl != NO_REGS)
2381 			goto reg;
2382 		      break;
2383 
2384 		    case CT_CONST_INT:
2385 		      if (CONST_INT_P (op)
2386 			  && insn_const_int_ok_for_constraint (INTVAL (op), cn))
2387 			win = true;
2388 		      break;
2389 
2390 		    case CT_MEMORY:
2391 		      if (MEM_P (op)
2392 			  && satisfies_memory_constraint_p (op, cn))
2393 			win = true;
2394 		      else if (spilled_pseudo_p (op))
2395 			win = true;
2396 
2397 		      /* If we didn't already win, we can reload constants
2398 			 via force_const_mem or put the pseudo value into
2399 			 memory, or make other memory by reloading the
2400 			 address like for 'o'.  */
2401 		      if (CONST_POOL_OK_P (mode, op)
2402 			  || MEM_P (op) || REG_P (op)
2403 			  /* We can restore the equiv insn by a
2404 			     reload.  */
2405 			  || equiv_substition_p[nop])
2406 			badop = false;
2407 		      constmemok = true;
2408 		      offmemok = true;
2409 		      break;
2410 
2411 		    case CT_ADDRESS:
2412 		      /* An asm operand with an address constraint
2413 			 that doesn't satisfy address_operand has
2414 			 is_address cleared, so that we don't try to
2415 			 make a non-address fit.  */
2416 		      if (!curr_static_id->operand[nop].is_address)
2417 			break;
2418 		      /* If we didn't already win, we can reload the address
2419 			 into a base register.  */
2420 		      if (satisfies_address_constraint_p (op, cn))
2421 			win = true;
2422 		      cl = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
2423 					   ADDRESS, SCRATCH);
2424 		      badop = false;
2425 		      goto reg;
2426 
2427 		    case CT_FIXED_FORM:
2428 		      if (constraint_satisfied_p (op, cn))
2429 			win = true;
2430 		      break;
2431 
2432 		    case CT_SPECIAL_MEMORY:
2433 		      if (MEM_P (op)
2434 			  && satisfies_memory_constraint_p (op, cn))
2435 			win = true;
2436 		      else if (spilled_pseudo_p (op))
2437 			win = true;
2438 		      break;
2439 		    }
2440 		  break;
2441 
2442 		reg:
2443 		  if (mode == BLKmode)
2444 		    break;
2445 		  this_alternative = reg_class_subunion[this_alternative][cl];
2446 		  this_alternative_set |= reg_class_contents[cl];
2447 		  if (costly_p)
2448 		    {
2449 		      this_costly_alternative
2450 			= reg_class_subunion[this_costly_alternative][cl];
2451 		      this_costly_alternative_set |= reg_class_contents[cl];
2452 		    }
2453 		  winreg = true;
2454 		  if (REG_P (op))
2455 		    {
2456 		      if (hard_regno[nop] >= 0
2457 			  && in_hard_reg_set_p (this_alternative_set,
2458 						mode, hard_regno[nop]))
2459 			win = true;
2460 		      else if (hard_regno[nop] < 0
2461 			       && in_class_p (op, this_alternative, NULL))
2462 			win = true;
2463 		    }
2464 		  break;
2465 		}
2466 	      if (c != ' ' && c != '\t')
2467 		costly_p = c == '*';
2468 	    }
2469 	  while ((p += len), c);
2470 
2471 	  scratch_p = (operand_reg[nop] != NULL_RTX
2472 		       && lra_former_scratch_p (REGNO (operand_reg[nop])));
2473 	  /* Record which operands fit this alternative.  */
2474 	  if (win)
2475 	    {
2476 	      this_alternative_win = true;
2477 	      if (operand_reg[nop] != NULL_RTX)
2478 		{
2479 		  if (hard_regno[nop] >= 0)
2480 		    {
2481 		      if (in_hard_reg_set_p (this_costly_alternative_set,
2482 					     mode, hard_regno[nop]))
2483 			{
2484 			  if (lra_dump_file != NULL)
2485 			    fprintf (lra_dump_file,
2486 				     "            %d Costly set: reject++\n",
2487 				     nop);
2488 			  reject++;
2489 			}
2490 		    }
2491 		  else
2492 		    {
2493 		      /* Prefer won reg to spilled pseudo under other
2494 			 equal conditions for possibe inheritance.  */
2495 		      if (! scratch_p)
2496 			{
2497 			  if (lra_dump_file != NULL)
2498 			    fprintf
2499 			      (lra_dump_file,
2500 			       "            %d Non pseudo reload: reject++\n",
2501 			       nop);
2502 			  reject++;
2503 			}
2504 		      if (in_class_p (operand_reg[nop],
2505 				      this_costly_alternative, NULL))
2506 			{
2507 			  if (lra_dump_file != NULL)
2508 			    fprintf
2509 			      (lra_dump_file,
2510 			       "            %d Non pseudo costly reload:"
2511 			       " reject++\n",
2512 			       nop);
2513 			  reject++;
2514 			}
2515 		    }
2516 		  /* We simulate the behavior of old reload here.
2517 		     Although scratches need hard registers and it
2518 		     might result in spilling other pseudos, no reload
2519 		     insns are generated for the scratches.  So it
2520 		     might cost something but probably less than old
2521 		     reload pass believes.  */
2522 		  if (scratch_p)
2523 		    {
2524 		      if (lra_dump_file != NULL)
2525 			fprintf (lra_dump_file,
2526 				 "            %d Scratch win: reject+=2\n",
2527 				 nop);
2528 		      reject += 2;
2529 		    }
2530 		}
2531 	    }
2532 	  else if (did_match)
2533 	    this_alternative_match_win = true;
2534 	  else
2535 	    {
2536 	      int const_to_mem = 0;
2537 	      bool no_regs_p;
2538 
2539 	      reject += op_reject;
2540 	      /* Never do output reload of stack pointer.  It makes
2541 		 impossible to do elimination when SP is changed in
2542 		 RTL.  */
2543 	      if (op == stack_pointer_rtx && ! frame_pointer_needed
2544 		  && curr_static_id->operand[nop].type != OP_IN)
2545 		goto fail;
2546 
2547 	      /* If this alternative asks for a specific reg class, see if there
2548 		 is at least one allocatable register in that class.  */
2549 	      no_regs_p
2550 		= (this_alternative == NO_REGS
2551 		   || (hard_reg_set_subset_p
2552 		       (reg_class_contents[this_alternative],
2553 			lra_no_alloc_regs)));
2554 
2555 	      /* For asms, verify that the class for this alternative is possible
2556 		 for the mode that is specified.  */
2557 	      if (!no_regs_p && INSN_CODE (curr_insn) < 0)
2558 		{
2559 		  int i;
2560 		  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2561 		    if (targetm.hard_regno_mode_ok (i, mode)
2562 			&& in_hard_reg_set_p (reg_class_contents[this_alternative],
2563 					      mode, i))
2564 		      break;
2565 		  if (i == FIRST_PSEUDO_REGISTER)
2566 		    winreg = false;
2567 		}
2568 
2569 	      /* If this operand accepts a register, and if the
2570 		 register class has at least one allocatable register,
2571 		 then this operand can be reloaded.  */
2572 	      if (winreg && !no_regs_p)
2573 		badop = false;
2574 
2575 	      if (badop)
2576 		{
2577 		  if (lra_dump_file != NULL)
2578 		    fprintf (lra_dump_file,
2579 			     "            alt=%d: Bad operand -- refuse\n",
2580 			     nalt);
2581 		  goto fail;
2582 		}
2583 
2584 	      if (this_alternative != NO_REGS)
2585 		{
2586 		  HARD_REG_SET available_regs
2587 		    = (reg_class_contents[this_alternative]
2588 		       & ~((ira_prohibited_class_mode_regs
2589 			    [this_alternative][mode])
2590 			   | lra_no_alloc_regs));
2591 		  if (hard_reg_set_empty_p (available_regs))
2592 		    {
2593 		      /* There are no hard regs holding a value of given
2594 			 mode.  */
2595 		      if (offmemok)
2596 			{
2597 			  this_alternative = NO_REGS;
2598 			  if (lra_dump_file != NULL)
2599 			    fprintf (lra_dump_file,
2600 				     "            %d Using memory because of"
2601 				     " a bad mode: reject+=2\n",
2602 				     nop);
2603 			  reject += 2;
2604 			}
2605 		      else
2606 			{
2607 			  if (lra_dump_file != NULL)
2608 			    fprintf (lra_dump_file,
2609 				     "            alt=%d: Wrong mode -- refuse\n",
2610 				     nalt);
2611 			  goto fail;
2612 			}
2613 		    }
2614 		}
2615 
2616 	      /* If not assigned pseudo has a class which a subset of
2617 		 required reg class, it is a less costly alternative
2618 		 as the pseudo still can get a hard reg of necessary
2619 		 class.  */
2620 	      if (! no_regs_p && REG_P (op) && hard_regno[nop] < 0
2621 		  && (cl = get_reg_class (REGNO (op))) != NO_REGS
2622 		  && ira_class_subset_p[this_alternative][cl])
2623 		{
2624 		  if (lra_dump_file != NULL)
2625 		    fprintf
2626 		      (lra_dump_file,
2627 		       "            %d Super set class reg: reject-=3\n", nop);
2628 		  reject -= 3;
2629 		}
2630 
2631 	      this_alternative_offmemok = offmemok;
2632 	      if (this_costly_alternative != NO_REGS)
2633 		{
2634 		  if (lra_dump_file != NULL)
2635 		    fprintf (lra_dump_file,
2636 			     "            %d Costly loser: reject++\n", nop);
2637 		  reject++;
2638 		}
2639 	      /* If the operand is dying, has a matching constraint,
2640 		 and satisfies constraints of the matched operand
2641 		 which failed to satisfy the own constraints, most probably
2642 		 the reload for this operand will be gone.  */
2643 	      if (this_alternative_matches >= 0
2644 		  && !curr_alt_win[this_alternative_matches]
2645 		  && REG_P (op)
2646 		  && find_regno_note (curr_insn, REG_DEAD, REGNO (op))
2647 		  && (hard_regno[nop] >= 0
2648 		      ? in_hard_reg_set_p (this_alternative_set,
2649 					   mode, hard_regno[nop])
2650 		      : in_class_p (op, this_alternative, NULL)))
2651 		{
2652 		  if (lra_dump_file != NULL)
2653 		    fprintf
2654 		      (lra_dump_file,
2655 		       "            %d Dying matched operand reload: reject++\n",
2656 		       nop);
2657 		  reject++;
2658 		}
2659 	      else
2660 		{
2661 		  /* Strict_low_part requires to reload the register
2662 		     not the sub-register.  In this case we should
2663 		     check that a final reload hard reg can hold the
2664 		     value mode.  */
2665 		  if (curr_static_id->operand[nop].strict_low
2666 		      && REG_P (op)
2667 		      && hard_regno[nop] < 0
2668 		      && GET_CODE (*curr_id->operand_loc[nop]) == SUBREG
2669 		      && ira_class_hard_regs_num[this_alternative] > 0
2670 		      && (!targetm.hard_regno_mode_ok
2671 			  (ira_class_hard_regs[this_alternative][0],
2672 			   GET_MODE (*curr_id->operand_loc[nop]))))
2673 		    {
2674 		      if (lra_dump_file != NULL)
2675 			fprintf
2676 			  (lra_dump_file,
2677 			   "            alt=%d: Strict low subreg reload -- refuse\n",
2678 			   nalt);
2679 		      goto fail;
2680 		    }
2681 		  losers++;
2682 		}
2683 	      if (operand_reg[nop] != NULL_RTX
2684 		  /* Output operands and matched input operands are
2685 		     not inherited.  The following conditions do not
2686 		     exactly describe the previous statement but they
2687 		     are pretty close.  */
2688 		  && curr_static_id->operand[nop].type != OP_OUT
2689 		  && (this_alternative_matches < 0
2690 		      || curr_static_id->operand[nop].type != OP_IN))
2691 		{
2692 		  int last_reload = (lra_reg_info[ORIGINAL_REGNO
2693 						  (operand_reg[nop])]
2694 				     .last_reload);
2695 
2696 		  /* The value of reload_sum has sense only if we
2697 		     process insns in their order.  It happens only on
2698 		     the first constraints sub-pass when we do most of
2699 		     reload work.  */
2700 		  if (lra_constraint_iter == 1 && last_reload > bb_reload_num)
2701 		    reload_sum += last_reload - bb_reload_num;
2702 		}
2703 	      /* If this is a constant that is reloaded into the
2704 		 desired class by copying it to memory first, count
2705 		 that as another reload.  This is consistent with
2706 		 other code and is required to avoid choosing another
2707 		 alternative when the constant is moved into memory.
2708 		 Note that the test here is precisely the same as in
2709 		 the code below that calls force_const_mem.  */
2710 	      if (CONST_POOL_OK_P (mode, op)
2711 		  && ((targetm.preferred_reload_class
2712 		       (op, this_alternative) == NO_REGS)
2713 		      || no_input_reloads_p))
2714 		{
2715 		  const_to_mem = 1;
2716 		  if (! no_regs_p)
2717 		    losers++;
2718 		}
2719 
2720 	      /* Alternative loses if it requires a type of reload not
2721 		 permitted for this insn.  We can always reload
2722 		 objects with a REG_UNUSED note.  */
2723 	      if ((curr_static_id->operand[nop].type != OP_IN
2724 		   && no_output_reloads_p
2725 		   && ! find_reg_note (curr_insn, REG_UNUSED, op))
2726 		  || (curr_static_id->operand[nop].type != OP_OUT
2727 		      && no_input_reloads_p && ! const_to_mem)
2728 		  || (this_alternative_matches >= 0
2729 		      && (no_input_reloads_p
2730 			  || (no_output_reloads_p
2731 			      && (curr_static_id->operand
2732 				  [this_alternative_matches].type != OP_IN)
2733 			      && ! find_reg_note (curr_insn, REG_UNUSED,
2734 						  no_subreg_reg_operand
2735 						  [this_alternative_matches])))))
2736 		{
2737 		  if (lra_dump_file != NULL)
2738 		    fprintf
2739 		      (lra_dump_file,
2740 		       "            alt=%d: No input/otput reload -- refuse\n",
2741 		       nalt);
2742 		  goto fail;
2743 		}
2744 
2745 	      /* Alternative loses if it required class pseudo cannot
2746 		 hold value of required mode.  Such insns can be
2747 		 described by insn definitions with mode iterators.  */
2748 	      if (GET_MODE (*curr_id->operand_loc[nop]) != VOIDmode
2749 		  && ! hard_reg_set_empty_p (this_alternative_set)
2750 		  /* It is common practice for constraints to use a
2751 		     class which does not have actually enough regs to
2752 		     hold the value (e.g. x86 AREG for mode requiring
2753 		     more one general reg).  Therefore we have 2
2754 		     conditions to check that the reload pseudo cannot
2755 		     hold the mode value.  */
2756 		  && (!targetm.hard_regno_mode_ok
2757 		      (ira_class_hard_regs[this_alternative][0],
2758 		       GET_MODE (*curr_id->operand_loc[nop])))
2759 		  /* The above condition is not enough as the first
2760 		     reg in ira_class_hard_regs can be not aligned for
2761 		     multi-words mode values.  */
2762 		  && (prohibited_class_reg_set_mode_p
2763 		      (this_alternative, this_alternative_set,
2764 		       GET_MODE (*curr_id->operand_loc[nop]))))
2765 		{
2766 		  if (lra_dump_file != NULL)
2767 		    fprintf (lra_dump_file,
2768 			     "            alt=%d: reload pseudo for op %d "
2769 			     "cannot hold the mode value -- refuse\n",
2770 			     nalt, nop);
2771 		  goto fail;
2772 		}
2773 
2774 	      /* Check strong discouragement of reload of non-constant
2775 		 into class THIS_ALTERNATIVE.  */
2776 	      if (! CONSTANT_P (op) && ! no_regs_p
2777 		  && (targetm.preferred_reload_class
2778 		      (op, this_alternative) == NO_REGS
2779 		      || (curr_static_id->operand[nop].type == OP_OUT
2780 			  && (targetm.preferred_output_reload_class
2781 			      (op, this_alternative) == NO_REGS))))
2782 		{
2783 		  if (offmemok && REG_P (op))
2784 		    {
2785 		      if (lra_dump_file != NULL)
2786 			fprintf
2787 			  (lra_dump_file,
2788 			   "            %d Spill pseudo into memory: reject+=3\n",
2789 			   nop);
2790 		      reject += 3;
2791 		    }
2792 		  else
2793 		    {
2794 		      if (lra_dump_file != NULL)
2795 			fprintf
2796 			  (lra_dump_file,
2797 			   "            %d Non-prefered reload: reject+=%d\n",
2798 			   nop, LRA_MAX_REJECT);
2799 		      reject += LRA_MAX_REJECT;
2800 		    }
2801 		}
2802 
2803 	      if (! (MEM_P (op) && offmemok)
2804 		  && ! (const_to_mem && constmemok))
2805 		{
2806 		  /* We prefer to reload pseudos over reloading other
2807 		     things, since such reloads may be able to be
2808 		     eliminated later.  So bump REJECT in other cases.
2809 		     Don't do this in the case where we are forcing a
2810 		     constant into memory and it will then win since
2811 		     we don't want to have a different alternative
2812 		     match then.  */
2813 		  if (! (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER))
2814 		    {
2815 		      if (lra_dump_file != NULL)
2816 			fprintf
2817 			  (lra_dump_file,
2818 			   "            %d Non-pseudo reload: reject+=2\n",
2819 			   nop);
2820 		      reject += 2;
2821 		    }
2822 
2823 		  if (! no_regs_p)
2824 		    reload_nregs
2825 		      += ira_reg_class_max_nregs[this_alternative][mode];
2826 
2827 		  if (SMALL_REGISTER_CLASS_P (this_alternative))
2828 		    {
2829 		      if (lra_dump_file != NULL)
2830 			fprintf
2831 			  (lra_dump_file,
2832 			   "            %d Small class reload: reject+=%d\n",
2833 			   nop, LRA_LOSER_COST_FACTOR / 2);
2834 		      reject += LRA_LOSER_COST_FACTOR / 2;
2835 		    }
2836 		}
2837 
2838 	      /* We are trying to spill pseudo into memory.  It is
2839 		 usually more costly than moving to a hard register
2840 		 although it might takes the same number of
2841 		 reloads.
2842 
2843 		 Non-pseudo spill may happen also.  Suppose a target allows both
2844 		 register and memory in the operand constraint alternatives,
2845 		 then it's typical that an eliminable register has a substition
2846 		 of "base + offset" which can either be reloaded by a simple
2847 		 "new_reg <= base + offset" which will match the register
2848 		 constraint, or a similar reg addition followed by further spill
2849 		 to and reload from memory which will match the memory
2850 		 constraint, but this memory spill will be much more costly
2851 		 usually.
2852 
2853 		 Code below increases the reject for both pseudo and non-pseudo
2854 		 spill.  */
2855 	      if (no_regs_p
2856 		  && !(MEM_P (op) && offmemok)
2857 		  && !(REG_P (op) && hard_regno[nop] < 0))
2858 		{
2859 		  if (lra_dump_file != NULL)
2860 		    fprintf
2861 		      (lra_dump_file,
2862 		       "            %d Spill %spseudo into memory: reject+=3\n",
2863 		       nop, REG_P (op) ? "" : "Non-");
2864 		  reject += 3;
2865 		  if (VECTOR_MODE_P (mode))
2866 		    {
2867 		      /* Spilling vectors into memory is usually more
2868 			 costly as they contain big values.  */
2869 		      if (lra_dump_file != NULL)
2870 			fprintf
2871 			  (lra_dump_file,
2872 			   "            %d Spill vector pseudo: reject+=2\n",
2873 			   nop);
2874 		      reject += 2;
2875 		    }
2876 		}
2877 
2878 	      /* When we use an operand requiring memory in given
2879 		 alternative, the insn should write *and* read the
2880 		 value to/from memory it is costly in comparison with
2881 		 an insn alternative which does not use memory
2882 		 (e.g. register or immediate operand).  We exclude
2883 		 memory operand for such case as we can satisfy the
2884 		 memory constraints by reloading address.  */
2885 	      if (no_regs_p && offmemok && !MEM_P (op))
2886 		{
2887 		  if (lra_dump_file != NULL)
2888 		    fprintf
2889 		      (lra_dump_file,
2890 		       "            Using memory insn operand %d: reject+=3\n",
2891 		       nop);
2892 		  reject += 3;
2893 		}
2894 
2895 	      /* If reload requires moving value through secondary
2896 		 memory, it will need one more insn at least.  */
2897 	      if (this_alternative != NO_REGS
2898 		  && REG_P (op) && (cl = get_reg_class (REGNO (op))) != NO_REGS
2899 		  && ((curr_static_id->operand[nop].type != OP_OUT
2900 		       && targetm.secondary_memory_needed (GET_MODE (op), cl,
2901 							   this_alternative))
2902 		      || (curr_static_id->operand[nop].type != OP_IN
2903 			  && (targetm.secondary_memory_needed
2904 			      (GET_MODE (op), this_alternative, cl)))))
2905 		losers++;
2906 
2907 	      if (MEM_P (op) && offmemok)
2908 		addr_losers++;
2909 	      else
2910 		{
2911 		  /* Input reloads can be inherited more often than
2912 		     output reloads can be removed, so penalize output
2913 		     reloads.  */
2914 		  if (!REG_P (op) || curr_static_id->operand[nop].type != OP_IN)
2915 		    {
2916 		      if (lra_dump_file != NULL)
2917 			fprintf
2918 			  (lra_dump_file,
2919 			   "            %d Non input pseudo reload: reject++\n",
2920 			   nop);
2921 		      reject++;
2922 		    }
2923 
2924 		  if (curr_static_id->operand[nop].type == OP_INOUT)
2925 		    {
2926 		      if (lra_dump_file != NULL)
2927 			fprintf
2928 			  (lra_dump_file,
2929 			   "            %d Input/Output reload: reject+=%d\n",
2930 			   nop, LRA_LOSER_COST_FACTOR);
2931 		      reject += LRA_LOSER_COST_FACTOR;
2932 		    }
2933 		}
2934 	    }
2935 
2936 	  if (early_clobber_p && ! scratch_p)
2937 	    {
2938 	      if (lra_dump_file != NULL)
2939 		fprintf (lra_dump_file,
2940 			 "            %d Early clobber: reject++\n", nop);
2941 	      reject++;
2942 	    }
2943 	  /* ??? We check early clobbers after processing all operands
2944 	     (see loop below) and there we update the costs more.
2945 	     Should we update the cost (may be approximately) here
2946 	     because of early clobber register reloads or it is a rare
2947 	     or non-important thing to be worth to do it.  */
2948 	  overall = (losers * LRA_LOSER_COST_FACTOR + reject
2949 		     - (addr_losers == losers ? static_reject : 0));
2950 	  if ((best_losers == 0 || losers != 0) && best_overall < overall)
2951             {
2952               if (lra_dump_file != NULL)
2953 		fprintf (lra_dump_file,
2954 			 "            alt=%d,overall=%d,losers=%d -- refuse\n",
2955 			 nalt, overall, losers);
2956               goto fail;
2957             }
2958 
2959 	  if (update_and_check_small_class_inputs (nop, nalt,
2960 						   this_alternative))
2961 	    {
2962 	      if (lra_dump_file != NULL)
2963 		fprintf (lra_dump_file,
2964 			 "            alt=%d, not enough small class regs -- refuse\n",
2965 			 nalt);
2966 	      goto fail;
2967 	    }
2968 	  curr_alt[nop] = this_alternative;
2969 	  curr_alt_set[nop] = this_alternative_set;
2970 	  curr_alt_win[nop] = this_alternative_win;
2971 	  curr_alt_match_win[nop] = this_alternative_match_win;
2972 	  curr_alt_offmemok[nop] = this_alternative_offmemok;
2973 	  curr_alt_matches[nop] = this_alternative_matches;
2974 
2975 	  if (this_alternative_matches >= 0
2976 	      && !did_match && !this_alternative_win)
2977 	    curr_alt_win[this_alternative_matches] = false;
2978 
2979 	  if (early_clobber_p && operand_reg[nop] != NULL_RTX)
2980 	    early_clobbered_nops[early_clobbered_regs_num++] = nop;
2981 	}
2982 
2983       if (curr_insn_set != NULL_RTX && n_operands == 2
2984 	  /* Prevent processing non-move insns.  */
2985 	  && (GET_CODE (SET_SRC (curr_insn_set)) == SUBREG
2986 	      || SET_SRC (curr_insn_set) == no_subreg_reg_operand[1])
2987 	  && ((! curr_alt_win[0] && ! curr_alt_win[1]
2988 	       && REG_P (no_subreg_reg_operand[0])
2989 	       && REG_P (no_subreg_reg_operand[1])
2990 	       && (reg_in_class_p (no_subreg_reg_operand[0], curr_alt[1])
2991 		   || reg_in_class_p (no_subreg_reg_operand[1], curr_alt[0])))
2992 	      || (! curr_alt_win[0] && curr_alt_win[1]
2993 		  && REG_P (no_subreg_reg_operand[1])
2994 		  /* Check that we reload memory not the memory
2995 		     address.  */
2996 		  && ! (curr_alt_offmemok[0]
2997 			&& MEM_P (no_subreg_reg_operand[0]))
2998 		  && reg_in_class_p (no_subreg_reg_operand[1], curr_alt[0]))
2999 	      || (curr_alt_win[0] && ! curr_alt_win[1]
3000 		  && REG_P (no_subreg_reg_operand[0])
3001 		  /* Check that we reload memory not the memory
3002 		     address.  */
3003 		  && ! (curr_alt_offmemok[1]
3004 			&& MEM_P (no_subreg_reg_operand[1]))
3005 		  && reg_in_class_p (no_subreg_reg_operand[0], curr_alt[1])
3006 		  && (! CONST_POOL_OK_P (curr_operand_mode[1],
3007 					 no_subreg_reg_operand[1])
3008 		      || (targetm.preferred_reload_class
3009 			  (no_subreg_reg_operand[1],
3010 			   (enum reg_class) curr_alt[1]) != NO_REGS))
3011 		  /* If it is a result of recent elimination in move
3012 		     insn we can transform it into an add still by
3013 		     using this alternative.  */
3014 		  && GET_CODE (no_subreg_reg_operand[1]) != PLUS
3015 		  /* Likewise if the source has been replaced with an
3016 		     equivalent value.  This only happens once -- the reload
3017 		     will use the equivalent value instead of the register it
3018 		     replaces -- so there should be no danger of cycling.  */
3019 		  && !equiv_substition_p[1])))
3020 	{
3021 	  /* We have a move insn and a new reload insn will be similar
3022 	     to the current insn.  We should avoid such situation as
3023 	     it results in LRA cycling.  */
3024 	  if (lra_dump_file != NULL)
3025 	    fprintf (lra_dump_file,
3026 		     "            Cycle danger: overall += LRA_MAX_REJECT\n");
3027 	  overall += LRA_MAX_REJECT;
3028 	}
3029       ok_p = true;
3030       curr_alt_dont_inherit_ops_num = 0;
3031       for (nop = 0; nop < early_clobbered_regs_num; nop++)
3032 	{
3033 	  int i, j, clobbered_hard_regno, first_conflict_j, last_conflict_j;
3034 	  HARD_REG_SET temp_set;
3035 
3036 	  i = early_clobbered_nops[nop];
3037 	  if ((! curr_alt_win[i] && ! curr_alt_match_win[i])
3038 	      || hard_regno[i] < 0)
3039 	    continue;
3040 	  lra_assert (operand_reg[i] != NULL_RTX);
3041 	  clobbered_hard_regno = hard_regno[i];
3042 	  CLEAR_HARD_REG_SET (temp_set);
3043 	  add_to_hard_reg_set (&temp_set, biggest_mode[i], clobbered_hard_regno);
3044 	  first_conflict_j = last_conflict_j = -1;
3045 	  for (j = 0; j < n_operands; j++)
3046 	    if (j == i
3047 		/* We don't want process insides of match_operator and
3048 		   match_parallel because otherwise we would process
3049 		   their operands once again generating a wrong
3050 		   code.  */
3051 		|| curr_static_id->operand[j].is_operator)
3052 	      continue;
3053 	    else if ((curr_alt_matches[j] == i && curr_alt_match_win[j])
3054 		     || (curr_alt_matches[i] == j && curr_alt_match_win[i]))
3055 	      continue;
3056 	    /* If we don't reload j-th operand, check conflicts.  */
3057 	    else if ((curr_alt_win[j] || curr_alt_match_win[j])
3058 		     && uses_hard_regs_p (*curr_id->operand_loc[j], temp_set))
3059 	      {
3060 		if (first_conflict_j < 0)
3061 		  first_conflict_j = j;
3062 		last_conflict_j = j;
3063 		/* Both the earlyclobber operand and conflicting operand
3064 		   cannot both be user defined hard registers.  */
3065 		if (HARD_REGISTER_P (operand_reg[i])
3066 		    && REG_USERVAR_P (operand_reg[i])
3067 		    && operand_reg[j] != NULL_RTX
3068 		    && HARD_REGISTER_P (operand_reg[j])
3069 		    && REG_USERVAR_P (operand_reg[j]))
3070 		  {
3071 		    /* For asm, let curr_insn_transform diagnose it.  */
3072 		    if (INSN_CODE (curr_insn) < 0)
3073 		      return false;
3074 		    fatal_insn ("unable to generate reloads for "
3075 				"impossible constraints:", curr_insn);
3076 		  }
3077 	      }
3078 	  if (last_conflict_j < 0)
3079 	    continue;
3080 
3081 	  /* If an earlyclobber operand conflicts with another non-matching
3082 	     operand (ie, they have been assigned the same hard register),
3083 	     then it is better to reload the other operand, as there may
3084 	     exist yet another operand with a matching constraint associated
3085 	     with the earlyclobber operand.  However, if one of the operands
3086 	     is an explicit use of a hard register, then we must reload the
3087 	     other non-hard register operand.  */
3088 	  if (HARD_REGISTER_P (operand_reg[i])
3089 	      || (first_conflict_j == last_conflict_j
3090 		  && operand_reg[last_conflict_j] != NULL_RTX
3091 		  && !curr_alt_match_win[last_conflict_j]
3092 		  && !HARD_REGISTER_P (operand_reg[last_conflict_j])))
3093 	    {
3094 	      curr_alt_win[last_conflict_j] = false;
3095 	      curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++]
3096 		= last_conflict_j;
3097 	      losers++;
3098 	      if (lra_dump_file != NULL)
3099 		fprintf
3100 		  (lra_dump_file,
3101 		   "            %d Conflict early clobber reload: reject--\n",
3102 		   i);
3103 	    }
3104 	  else
3105 	    {
3106 	      /* We need to reload early clobbered register and the
3107 		 matched registers.  */
3108 	      for (j = 0; j < n_operands; j++)
3109 		if (curr_alt_matches[j] == i)
3110 		  {
3111 		    curr_alt_match_win[j] = false;
3112 		    losers++;
3113 		    overall += LRA_LOSER_COST_FACTOR;
3114 		  }
3115 	      if (! curr_alt_match_win[i])
3116 		curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] = i;
3117 	      else
3118 		{
3119 		  /* Remember pseudos used for match reloads are never
3120 		     inherited.  */
3121 		  lra_assert (curr_alt_matches[i] >= 0);
3122 		  curr_alt_win[curr_alt_matches[i]] = false;
3123 		}
3124 	      curr_alt_win[i] = curr_alt_match_win[i] = false;
3125 	      losers++;
3126 	      if (lra_dump_file != NULL)
3127 		fprintf
3128 		  (lra_dump_file,
3129 		   "            %d Matched conflict early clobber reloads: "
3130 		   "reject--\n",
3131 		   i);
3132 	    }
3133 	  /* Early clobber was already reflected in REJECT. */
3134 	  if (!matching_early_clobber[i])
3135 	    {
3136 	      lra_assert (reject > 0);
3137 	      reject--;
3138 	      matching_early_clobber[i] = 1;
3139 	    }
3140 	  overall += LRA_LOSER_COST_FACTOR - 1;
3141 	}
3142       if (lra_dump_file != NULL)
3143 	fprintf (lra_dump_file, "          alt=%d,overall=%d,losers=%d,rld_nregs=%d\n",
3144 		 nalt, overall, losers, reload_nregs);
3145 
3146       /* If this alternative can be made to work by reloading, and it
3147 	 needs less reloading than the others checked so far, record
3148 	 it as the chosen goal for reloading.  */
3149       if ((best_losers != 0 && losers == 0)
3150 	  || (((best_losers == 0 && losers == 0)
3151 	       || (best_losers != 0 && losers != 0))
3152 	      && (best_overall > overall
3153 		  || (best_overall == overall
3154 		      /* If the cost of the reloads is the same,
3155 			 prefer alternative which requires minimal
3156 			 number of reload regs.  */
3157 		      && (reload_nregs < best_reload_nregs
3158 			  || (reload_nregs == best_reload_nregs
3159 			      && (best_reload_sum < reload_sum
3160 				  || (best_reload_sum == reload_sum
3161 				      && nalt < goal_alt_number))))))))
3162 	{
3163 	  for (nop = 0; nop < n_operands; nop++)
3164 	    {
3165 	      goal_alt_win[nop] = curr_alt_win[nop];
3166 	      goal_alt_match_win[nop] = curr_alt_match_win[nop];
3167 	      goal_alt_matches[nop] = curr_alt_matches[nop];
3168 	      goal_alt[nop] = curr_alt[nop];
3169 	      goal_alt_offmemok[nop] = curr_alt_offmemok[nop];
3170 	    }
3171 	  goal_alt_dont_inherit_ops_num = curr_alt_dont_inherit_ops_num;
3172 	  for (nop = 0; nop < curr_alt_dont_inherit_ops_num; nop++)
3173 	    goal_alt_dont_inherit_ops[nop] = curr_alt_dont_inherit_ops[nop];
3174 	  goal_alt_swapped = curr_swapped;
3175 	  best_overall = overall;
3176 	  best_losers = losers;
3177 	  best_reload_nregs = reload_nregs;
3178 	  best_reload_sum = reload_sum;
3179 	  goal_alt_number = nalt;
3180 	}
3181       if (losers == 0)
3182 	/* Everything is satisfied.  Do not process alternatives
3183 	   anymore.  */
3184 	break;
3185     fail:
3186       ;
3187     }
3188   return ok_p;
3189 }
3190 
3191 /* Make reload base reg from address AD.  */
3192 static rtx
base_to_reg(struct address_info * ad)3193 base_to_reg (struct address_info *ad)
3194 {
3195   enum reg_class cl;
3196   int code = -1;
3197   rtx new_inner = NULL_RTX;
3198   rtx new_reg = NULL_RTX;
3199   rtx_insn *insn;
3200   rtx_insn *last_insn = get_last_insn();
3201 
3202   lra_assert (ad->disp == ad->disp_term);
3203   cl = base_reg_class (ad->mode, ad->as, ad->base_outer_code,
3204                        get_index_code (ad));
3205   new_reg = lra_create_new_reg (GET_MODE (*ad->base), NULL_RTX,
3206                                 cl, "base");
3207   new_inner = simplify_gen_binary (PLUS, GET_MODE (new_reg), new_reg,
3208                                    ad->disp_term == NULL
3209                                    ? const0_rtx
3210                                    : *ad->disp_term);
3211   if (!valid_address_p (ad->mode, new_inner, ad->as))
3212     return NULL_RTX;
3213   insn = emit_insn (gen_rtx_SET (new_reg, *ad->base));
3214   code = recog_memoized (insn);
3215   if (code < 0)
3216     {
3217       delete_insns_since (last_insn);
3218       return NULL_RTX;
3219     }
3220 
3221   return new_inner;
3222 }
3223 
3224 /* Make reload base reg + DISP from address AD.  Return the new pseudo.  */
3225 static rtx
base_plus_disp_to_reg(struct address_info * ad,rtx disp)3226 base_plus_disp_to_reg (struct address_info *ad, rtx disp)
3227 {
3228   enum reg_class cl;
3229   rtx new_reg;
3230 
3231   lra_assert (ad->base == ad->base_term);
3232   cl = base_reg_class (ad->mode, ad->as, ad->base_outer_code,
3233 		       get_index_code (ad));
3234   new_reg = lra_create_new_reg (GET_MODE (*ad->base_term), NULL_RTX,
3235 				cl, "base + disp");
3236   lra_emit_add (new_reg, *ad->base_term, disp);
3237   return new_reg;
3238 }
3239 
3240 /* Make reload of index part of address AD.  Return the new
3241    pseudo.  */
3242 static rtx
index_part_to_reg(struct address_info * ad)3243 index_part_to_reg (struct address_info *ad)
3244 {
3245   rtx new_reg;
3246 
3247   new_reg = lra_create_new_reg (GET_MODE (*ad->index), NULL_RTX,
3248 				INDEX_REG_CLASS, "index term");
3249   expand_mult (GET_MODE (*ad->index), *ad->index_term,
3250 	       GEN_INT (get_index_scale (ad)), new_reg, 1);
3251   return new_reg;
3252 }
3253 
3254 /* Return true if we can add a displacement to address AD, even if that
3255    makes the address invalid.  The fix-up code requires any new address
3256    to be the sum of the BASE_TERM, INDEX and DISP_TERM fields.  */
3257 static bool
can_add_disp_p(struct address_info * ad)3258 can_add_disp_p (struct address_info *ad)
3259 {
3260   return (!ad->autoinc_p
3261 	  && ad->segment == NULL
3262 	  && ad->base == ad->base_term
3263 	  && ad->disp == ad->disp_term);
3264 }
3265 
3266 /* Make equiv substitution in address AD.  Return true if a substitution
3267    was made.  */
3268 static bool
equiv_address_substitution(struct address_info * ad)3269 equiv_address_substitution (struct address_info *ad)
3270 {
3271   rtx base_reg, new_base_reg, index_reg, new_index_reg, *base_term, *index_term;
3272   poly_int64 disp;
3273   HOST_WIDE_INT scale;
3274   bool change_p;
3275 
3276   base_term = strip_subreg (ad->base_term);
3277   if (base_term == NULL)
3278     base_reg = new_base_reg = NULL_RTX;
3279   else
3280     {
3281       base_reg = *base_term;
3282       new_base_reg = get_equiv_with_elimination (base_reg, curr_insn);
3283     }
3284   index_term = strip_subreg (ad->index_term);
3285   if (index_term == NULL)
3286     index_reg = new_index_reg = NULL_RTX;
3287   else
3288     {
3289       index_reg = *index_term;
3290       new_index_reg = get_equiv_with_elimination (index_reg, curr_insn);
3291     }
3292   if (base_reg == new_base_reg && index_reg == new_index_reg)
3293     return false;
3294   disp = 0;
3295   change_p = false;
3296   if (lra_dump_file != NULL)
3297     {
3298       fprintf (lra_dump_file, "Changing address in insn %d ",
3299 	       INSN_UID (curr_insn));
3300       dump_value_slim (lra_dump_file, *ad->outer, 1);
3301     }
3302   if (base_reg != new_base_reg)
3303     {
3304       poly_int64 offset;
3305       if (REG_P (new_base_reg))
3306 	{
3307 	  *base_term = new_base_reg;
3308 	  change_p = true;
3309 	}
3310       else if (GET_CODE (new_base_reg) == PLUS
3311 	       && REG_P (XEXP (new_base_reg, 0))
3312 	       && poly_int_rtx_p (XEXP (new_base_reg, 1), &offset)
3313 	       && can_add_disp_p (ad))
3314 	{
3315 	  disp += offset;
3316 	  *base_term = XEXP (new_base_reg, 0);
3317 	  change_p = true;
3318 	}
3319       if (ad->base_term2 != NULL)
3320 	*ad->base_term2 = *ad->base_term;
3321     }
3322   if (index_reg != new_index_reg)
3323     {
3324       poly_int64 offset;
3325       if (REG_P (new_index_reg))
3326 	{
3327 	  *index_term = new_index_reg;
3328 	  change_p = true;
3329 	}
3330       else if (GET_CODE (new_index_reg) == PLUS
3331 	       && REG_P (XEXP (new_index_reg, 0))
3332 	       && poly_int_rtx_p (XEXP (new_index_reg, 1), &offset)
3333 	       && can_add_disp_p (ad)
3334 	       && (scale = get_index_scale (ad)))
3335 	{
3336 	  disp += offset * scale;
3337 	  *index_term = XEXP (new_index_reg, 0);
3338 	  change_p = true;
3339 	}
3340     }
3341   if (maybe_ne (disp, 0))
3342     {
3343       if (ad->disp != NULL)
3344 	*ad->disp = plus_constant (GET_MODE (*ad->inner), *ad->disp, disp);
3345       else
3346 	{
3347 	  *ad->inner = plus_constant (GET_MODE (*ad->inner), *ad->inner, disp);
3348 	  update_address (ad);
3349 	}
3350       change_p = true;
3351     }
3352   if (lra_dump_file != NULL)
3353     {
3354       if (! change_p)
3355 	fprintf (lra_dump_file, " -- no change\n");
3356       else
3357 	{
3358 	  fprintf (lra_dump_file, " on equiv ");
3359 	  dump_value_slim (lra_dump_file, *ad->outer, 1);
3360 	  fprintf (lra_dump_file, "\n");
3361 	}
3362     }
3363   return change_p;
3364 }
3365 
3366 /* Major function to make reloads for an address in operand NOP or
3367    check its correctness (If CHECK_ONLY_P is true). The supported
3368    cases are:
3369 
3370    1) an address that existed before LRA started, at which point it
3371    must have been valid.  These addresses are subject to elimination
3372    and may have become invalid due to the elimination offset being out
3373    of range.
3374 
3375    2) an address created by forcing a constant to memory
3376    (force_const_to_mem).  The initial form of these addresses might
3377    not be valid, and it is this function's job to make them valid.
3378 
3379    3) a frame address formed from a register and a (possibly zero)
3380    constant offset.  As above, these addresses might not be valid and
3381    this function must make them so.
3382 
3383    Add reloads to the lists *BEFORE and *AFTER.  We might need to add
3384    reloads to *AFTER because of inc/dec, {pre, post} modify in the
3385    address.  Return true for any RTL change.
3386 
3387    The function is a helper function which does not produce all
3388    transformations (when CHECK_ONLY_P is false) which can be
3389    necessary.  It does just basic steps.  To do all necessary
3390    transformations use function process_address.  */
3391 static bool
process_address_1(int nop,bool check_only_p,rtx_insn ** before,rtx_insn ** after)3392 process_address_1 (int nop, bool check_only_p,
3393 		   rtx_insn **before, rtx_insn **after)
3394 {
3395   struct address_info ad;
3396   rtx new_reg;
3397   HOST_WIDE_INT scale;
3398   rtx op = *curr_id->operand_loc[nop];
3399   const char *constraint = curr_static_id->operand[nop].constraint;
3400   enum constraint_num cn = lookup_constraint (constraint);
3401   bool change_p = false;
3402 
3403   if (MEM_P (op)
3404       && GET_MODE (op) == BLKmode
3405       && GET_CODE (XEXP (op, 0)) == SCRATCH)
3406     return false;
3407 
3408   if (insn_extra_address_constraint (cn)
3409       /* When we find an asm operand with an address constraint that
3410 	 doesn't satisfy address_operand to begin with, we clear
3411 	 is_address, so that we don't try to make a non-address fit.
3412 	 If the asm statement got this far, it's because other
3413 	 constraints are available, and we'll use them, disregarding
3414 	 the unsatisfiable address ones.  */
3415       && curr_static_id->operand[nop].is_address)
3416     decompose_lea_address (&ad, curr_id->operand_loc[nop]);
3417   /* Do not attempt to decompose arbitrary addresses generated by combine
3418      for asm operands with loose constraints, e.g 'X'.  */
3419   else if (MEM_P (op)
3420 	   && !(INSN_CODE (curr_insn) < 0
3421 		&& get_constraint_type (cn) == CT_FIXED_FORM
3422 	        && constraint_satisfied_p (op, cn)))
3423     decompose_mem_address (&ad, op);
3424   else if (GET_CODE (op) == SUBREG
3425 	   && MEM_P (SUBREG_REG (op)))
3426     decompose_mem_address (&ad, SUBREG_REG (op));
3427   else
3428     return false;
3429   /* If INDEX_REG_CLASS is assigned to base_term already and isn't to
3430      index_term, swap them so to avoid assigning INDEX_REG_CLASS to both
3431      when INDEX_REG_CLASS is a single register class.  */
3432   if (ad.base_term != NULL
3433       && ad.index_term != NULL
3434       && ira_class_hard_regs_num[INDEX_REG_CLASS] == 1
3435       && REG_P (*ad.base_term)
3436       && REG_P (*ad.index_term)
3437       && in_class_p (*ad.base_term, INDEX_REG_CLASS, NULL)
3438       && ! in_class_p (*ad.index_term, INDEX_REG_CLASS, NULL))
3439     {
3440       std::swap (ad.base, ad.index);
3441       std::swap (ad.base_term, ad.index_term);
3442     }
3443   if (! check_only_p)
3444     change_p = equiv_address_substitution (&ad);
3445   if (ad.base_term != NULL
3446       && (process_addr_reg
3447 	  (ad.base_term, check_only_p, before,
3448 	   (ad.autoinc_p
3449 	    && !(REG_P (*ad.base_term)
3450 		 && find_regno_note (curr_insn, REG_DEAD,
3451 				     REGNO (*ad.base_term)) != NULL_RTX)
3452 	    ? after : NULL),
3453 	   base_reg_class (ad.mode, ad.as, ad.base_outer_code,
3454 			   get_index_code (&ad)))))
3455     {
3456       change_p = true;
3457       if (ad.base_term2 != NULL)
3458 	*ad.base_term2 = *ad.base_term;
3459     }
3460   if (ad.index_term != NULL
3461       && process_addr_reg (ad.index_term, check_only_p,
3462 			   before, NULL, INDEX_REG_CLASS))
3463     change_p = true;
3464 
3465   /* Target hooks sometimes don't treat extra-constraint addresses as
3466      legitimate address_operands, so handle them specially.  */
3467   if (insn_extra_address_constraint (cn)
3468       && satisfies_address_constraint_p (&ad, cn))
3469     return change_p;
3470 
3471   if (check_only_p)
3472     return change_p;
3473 
3474   /* There are three cases where the shape of *AD.INNER may now be invalid:
3475 
3476      1) the original address was valid, but either elimination or
3477      equiv_address_substitution was applied and that made
3478      the address invalid.
3479 
3480      2) the address is an invalid symbolic address created by
3481      force_const_to_mem.
3482 
3483      3) the address is a frame address with an invalid offset.
3484 
3485      4) the address is a frame address with an invalid base.
3486 
3487      All these cases involve a non-autoinc address, so there is no
3488      point revalidating other types.  */
3489   if (ad.autoinc_p || valid_address_p (op, &ad, cn))
3490     return change_p;
3491 
3492   /* Any index existed before LRA started, so we can assume that the
3493      presence and shape of the index is valid.  */
3494   push_to_sequence (*before);
3495   lra_assert (ad.disp == ad.disp_term);
3496   if (ad.base == NULL)
3497     {
3498       if (ad.index == NULL)
3499 	{
3500 	  rtx_insn *insn;
3501 	  rtx_insn *last = get_last_insn ();
3502 	  int code = -1;
3503 	  enum reg_class cl = base_reg_class (ad.mode, ad.as,
3504 					      SCRATCH, SCRATCH);
3505 	  rtx addr = *ad.inner;
3506 
3507 	  new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, "addr");
3508 	  if (HAVE_lo_sum)
3509 	    {
3510 	      /* addr => lo_sum (new_base, addr), case (2) above.  */
3511 	      insn = emit_insn (gen_rtx_SET
3512 				(new_reg,
3513 				 gen_rtx_HIGH (Pmode, copy_rtx (addr))));
3514 	      code = recog_memoized (insn);
3515 	      if (code >= 0)
3516 		{
3517 		  *ad.inner = gen_rtx_LO_SUM (Pmode, new_reg, addr);
3518 		  if (!valid_address_p (op, &ad, cn))
3519 		    {
3520 		      /* Try to put lo_sum into register.  */
3521 		      insn = emit_insn (gen_rtx_SET
3522 					(new_reg,
3523 					 gen_rtx_LO_SUM (Pmode, new_reg, addr)));
3524 		      code = recog_memoized (insn);
3525 		      if (code >= 0)
3526 			{
3527 			  *ad.inner = new_reg;
3528 			  if (!valid_address_p (op, &ad, cn))
3529 			    {
3530 			      *ad.inner = addr;
3531 			      code = -1;
3532 			    }
3533 			}
3534 
3535 		    }
3536 		}
3537 	      if (code < 0)
3538 		delete_insns_since (last);
3539 	    }
3540 
3541 	  if (code < 0)
3542 	    {
3543 	      /* addr => new_base, case (2) above.  */
3544 	      lra_emit_move (new_reg, addr);
3545 
3546 	      for (insn = last == NULL_RTX ? get_insns () : NEXT_INSN (last);
3547 		   insn != NULL_RTX;
3548 		   insn = NEXT_INSN (insn))
3549 		if (recog_memoized (insn) < 0)
3550 		  break;
3551 	      if (insn != NULL_RTX)
3552 		{
3553 		  /* Do nothing if we cannot generate right insns.
3554 		     This is analogous to reload pass behavior.  */
3555 		  delete_insns_since (last);
3556 		  end_sequence ();
3557 		  return false;
3558 		}
3559 	      *ad.inner = new_reg;
3560 	    }
3561 	}
3562       else
3563 	{
3564 	  /* index * scale + disp => new base + index * scale,
3565 	     case (1) above.  */
3566 	  enum reg_class cl = base_reg_class (ad.mode, ad.as, PLUS,
3567 					      GET_CODE (*ad.index));
3568 
3569 	  lra_assert (INDEX_REG_CLASS != NO_REGS);
3570 	  new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, "disp");
3571 	  lra_emit_move (new_reg, *ad.disp);
3572 	  *ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg),
3573 					   new_reg, *ad.index);
3574 	}
3575     }
3576   else if (ad.index == NULL)
3577     {
3578       int regno;
3579       enum reg_class cl;
3580       rtx set;
3581       rtx_insn *insns, *last_insn;
3582       /* Try to reload base into register only if the base is invalid
3583          for the address but with valid offset, case (4) above.  */
3584       start_sequence ();
3585       new_reg = base_to_reg (&ad);
3586 
3587       /* base + disp => new base, cases (1) and (3) above.  */
3588       /* Another option would be to reload the displacement into an
3589 	 index register.  However, postreload has code to optimize
3590 	 address reloads that have the same base and different
3591 	 displacements, so reloading into an index register would
3592 	 not necessarily be a win.  */
3593       if (new_reg == NULL_RTX)
3594 	{
3595 	  /* See if the target can split the displacement into a
3596 	     legitimate new displacement from a local anchor.  */
3597 	  gcc_assert (ad.disp == ad.disp_term);
3598 	  poly_int64 orig_offset;
3599 	  rtx offset1, offset2;
3600 	  if (poly_int_rtx_p (*ad.disp, &orig_offset)
3601 	      && targetm.legitimize_address_displacement (&offset1, &offset2,
3602 							  orig_offset,
3603 							  ad.mode))
3604 	    {
3605 	      new_reg = base_plus_disp_to_reg (&ad, offset1);
3606 	      new_reg = gen_rtx_PLUS (GET_MODE (new_reg), new_reg, offset2);
3607 	    }
3608 	  else
3609 	    new_reg = base_plus_disp_to_reg (&ad, *ad.disp);
3610 	}
3611       insns = get_insns ();
3612       last_insn = get_last_insn ();
3613       /* If we generated at least two insns, try last insn source as
3614 	 an address.  If we succeed, we generate one less insn.  */
3615       if (REG_P (new_reg)
3616 	  && last_insn != insns
3617 	  && (set = single_set (last_insn)) != NULL_RTX
3618 	  && GET_CODE (SET_SRC (set)) == PLUS
3619 	  && REG_P (XEXP (SET_SRC (set), 0))
3620 	  && CONSTANT_P (XEXP (SET_SRC (set), 1)))
3621 	{
3622 	  *ad.inner = SET_SRC (set);
3623 	  if (valid_address_p (op, &ad, cn))
3624 	    {
3625 	      *ad.base_term = XEXP (SET_SRC (set), 0);
3626 	      *ad.disp_term = XEXP (SET_SRC (set), 1);
3627 	      cl = base_reg_class (ad.mode, ad.as, ad.base_outer_code,
3628 				   get_index_code (&ad));
3629 	      regno = REGNO (*ad.base_term);
3630 	      if (regno >= FIRST_PSEUDO_REGISTER
3631 		  && cl != lra_get_allocno_class (regno))
3632 		lra_change_class (regno, cl, "      Change to", true);
3633 	      new_reg = SET_SRC (set);
3634 	      delete_insns_since (PREV_INSN (last_insn));
3635 	    }
3636 	}
3637       end_sequence ();
3638       emit_insn (insns);
3639       *ad.inner = new_reg;
3640     }
3641   else if (ad.disp_term != NULL)
3642     {
3643       /* base + scale * index + disp => new base + scale * index,
3644 	 case (1) above.  */
3645       gcc_assert (ad.disp == ad.disp_term);
3646       new_reg = base_plus_disp_to_reg (&ad, *ad.disp);
3647       *ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg),
3648 				       new_reg, *ad.index);
3649     }
3650   else if ((scale = get_index_scale (&ad)) == 1)
3651     {
3652       /* The last transformation to one reg will be made in
3653 	 curr_insn_transform function.  */
3654       end_sequence ();
3655       return false;
3656     }
3657   else if (scale != 0)
3658     {
3659       /* base + scale * index => base + new_reg,
3660 	 case (1) above.
3661       Index part of address may become invalid.  For example, we
3662       changed pseudo on the equivalent memory and a subreg of the
3663       pseudo onto the memory of different mode for which the scale is
3664       prohibitted.  */
3665       new_reg = index_part_to_reg (&ad);
3666       *ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg),
3667 				       *ad.base_term, new_reg);
3668     }
3669   else
3670     {
3671       enum reg_class cl = base_reg_class (ad.mode, ad.as,
3672 					  SCRATCH, SCRATCH);
3673       rtx addr = *ad.inner;
3674 
3675       new_reg = lra_create_new_reg (Pmode, NULL_RTX, cl, "addr");
3676       /* addr => new_base.  */
3677       lra_emit_move (new_reg, addr);
3678       *ad.inner = new_reg;
3679     }
3680   *before = get_insns ();
3681   end_sequence ();
3682   return true;
3683 }
3684 
3685 /* If CHECK_ONLY_P is false, do address reloads until it is necessary.
3686    Use process_address_1 as a helper function.  Return true for any
3687    RTL changes.
3688 
3689    If CHECK_ONLY_P is true, just check address correctness.  Return
3690    false if the address correct.  */
3691 static bool
process_address(int nop,bool check_only_p,rtx_insn ** before,rtx_insn ** after)3692 process_address (int nop, bool check_only_p,
3693 		 rtx_insn **before, rtx_insn **after)
3694 {
3695   bool res = false;
3696 
3697   while (process_address_1 (nop, check_only_p, before, after))
3698     {
3699       if (check_only_p)
3700 	return true;
3701       res = true;
3702     }
3703   return res;
3704 }
3705 
3706 /* Emit insns to reload VALUE into a new register.  VALUE is an
3707    auto-increment or auto-decrement RTX whose operand is a register or
3708    memory location; so reloading involves incrementing that location.
3709    IN is either identical to VALUE, or some cheaper place to reload
3710    value being incremented/decremented from.
3711 
3712    INC_AMOUNT is the number to increment or decrement by (always
3713    positive and ignored for POST_MODIFY/PRE_MODIFY).
3714 
3715    Return pseudo containing the result.	 */
3716 static rtx
emit_inc(enum reg_class new_rclass,rtx in,rtx value,poly_int64 inc_amount)3717 emit_inc (enum reg_class new_rclass, rtx in, rtx value, poly_int64 inc_amount)
3718 {
3719   /* REG or MEM to be copied and incremented.  */
3720   rtx incloc = XEXP (value, 0);
3721   /* Nonzero if increment after copying.  */
3722   int post = (GET_CODE (value) == POST_DEC || GET_CODE (value) == POST_INC
3723 	      || GET_CODE (value) == POST_MODIFY);
3724   rtx_insn *last;
3725   rtx inc;
3726   rtx_insn *add_insn;
3727   int code;
3728   rtx real_in = in == value ? incloc : in;
3729   rtx result;
3730   bool plus_p = true;
3731 
3732   if (GET_CODE (value) == PRE_MODIFY || GET_CODE (value) == POST_MODIFY)
3733     {
3734       lra_assert (GET_CODE (XEXP (value, 1)) == PLUS
3735 		  || GET_CODE (XEXP (value, 1)) == MINUS);
3736       lra_assert (rtx_equal_p (XEXP (XEXP (value, 1), 0), XEXP (value, 0)));
3737       plus_p = GET_CODE (XEXP (value, 1)) == PLUS;
3738       inc = XEXP (XEXP (value, 1), 1);
3739     }
3740   else
3741     {
3742       if (GET_CODE (value) == PRE_DEC || GET_CODE (value) == POST_DEC)
3743 	inc_amount = -inc_amount;
3744 
3745       inc = gen_int_mode (inc_amount, GET_MODE (value));
3746     }
3747 
3748   if (! post && REG_P (incloc))
3749     result = incloc;
3750   else
3751     result = lra_create_new_reg (GET_MODE (value), value, new_rclass,
3752 				 "INC/DEC result");
3753 
3754   if (real_in != result)
3755     {
3756       /* First copy the location to the result register.  */
3757       lra_assert (REG_P (result));
3758       emit_insn (gen_move_insn (result, real_in));
3759     }
3760 
3761   /* We suppose that there are insns to add/sub with the constant
3762      increment permitted in {PRE/POST)_{DEC/INC/MODIFY}.  At least the
3763      old reload worked with this assumption.  If the assumption
3764      becomes wrong, we should use approach in function
3765      base_plus_disp_to_reg.  */
3766   if (in == value)
3767     {
3768       /* See if we can directly increment INCLOC.  */
3769       last = get_last_insn ();
3770       add_insn = emit_insn (plus_p
3771 			    ? gen_add2_insn (incloc, inc)
3772 			    : gen_sub2_insn (incloc, inc));
3773 
3774       code = recog_memoized (add_insn);
3775       if (code >= 0)
3776 	{
3777 	  if (! post && result != incloc)
3778 	    emit_insn (gen_move_insn (result, incloc));
3779 	  return result;
3780 	}
3781       delete_insns_since (last);
3782     }
3783 
3784   /* If couldn't do the increment directly, must increment in RESULT.
3785      The way we do this depends on whether this is pre- or
3786      post-increment.  For pre-increment, copy INCLOC to the reload
3787      register, increment it there, then save back.  */
3788   if (! post)
3789     {
3790       if (real_in != result)
3791 	emit_insn (gen_move_insn (result, real_in));
3792       if (plus_p)
3793 	emit_insn (gen_add2_insn (result, inc));
3794       else
3795 	emit_insn (gen_sub2_insn (result, inc));
3796       if (result != incloc)
3797 	emit_insn (gen_move_insn (incloc, result));
3798     }
3799   else
3800     {
3801       /* Post-increment.
3802 
3803 	 Because this might be a jump insn or a compare, and because
3804 	 RESULT may not be available after the insn in an input
3805 	 reload, we must do the incrementing before the insn being
3806 	 reloaded for.
3807 
3808 	 We have already copied IN to RESULT.  Increment the copy in
3809 	 RESULT, save that back, then decrement RESULT so it has
3810 	 the original value.  */
3811       if (plus_p)
3812 	emit_insn (gen_add2_insn (result, inc));
3813       else
3814 	emit_insn (gen_sub2_insn (result, inc));
3815       emit_insn (gen_move_insn (incloc, result));
3816       /* Restore non-modified value for the result.  We prefer this
3817 	 way because it does not require an additional hard
3818 	 register.  */
3819       if (plus_p)
3820 	{
3821 	  poly_int64 offset;
3822 	  if (poly_int_rtx_p (inc, &offset))
3823 	    emit_insn (gen_add2_insn (result,
3824 				      gen_int_mode (-offset,
3825 						    GET_MODE (result))));
3826 	  else
3827 	    emit_insn (gen_sub2_insn (result, inc));
3828 	}
3829       else
3830 	emit_insn (gen_add2_insn (result, inc));
3831     }
3832   return result;
3833 }
3834 
3835 /* Return true if the current move insn does not need processing as we
3836    already know that it satisfies its constraints.  */
3837 static bool
simple_move_p(void)3838 simple_move_p (void)
3839 {
3840   rtx dest, src;
3841   enum reg_class dclass, sclass;
3842 
3843   lra_assert (curr_insn_set != NULL_RTX);
3844   dest = SET_DEST (curr_insn_set);
3845   src = SET_SRC (curr_insn_set);
3846 
3847   /* If the instruction has multiple sets we need to process it even if it
3848      is single_set.  This can happen if one or more of the SETs are dead.
3849      See PR73650.  */
3850   if (multiple_sets (curr_insn))
3851     return false;
3852 
3853   return ((dclass = get_op_class (dest)) != NO_REGS
3854 	  && (sclass = get_op_class (src)) != NO_REGS
3855 	  /* The backend guarantees that register moves of cost 2
3856 	     never need reloads.  */
3857 	  && targetm.register_move_cost (GET_MODE (src), sclass, dclass) == 2);
3858  }
3859 
3860 /* Swap operands NOP and NOP + 1. */
3861 static inline void
swap_operands(int nop)3862 swap_operands (int nop)
3863 {
3864   std::swap (curr_operand_mode[nop], curr_operand_mode[nop + 1]);
3865   std::swap (original_subreg_reg_mode[nop], original_subreg_reg_mode[nop + 1]);
3866   std::swap (*curr_id->operand_loc[nop], *curr_id->operand_loc[nop + 1]);
3867   std::swap (equiv_substition_p[nop], equiv_substition_p[nop + 1]);
3868   /* Swap the duplicates too.  */
3869   lra_update_dup (curr_id, nop);
3870   lra_update_dup (curr_id, nop + 1);
3871 }
3872 
3873 /* Main entry point of the constraint code: search the body of the
3874    current insn to choose the best alternative.  It is mimicking insn
3875    alternative cost calculation model of former reload pass.  That is
3876    because machine descriptions were written to use this model.  This
3877    model can be changed in future.  Make commutative operand exchange
3878    if it is chosen.
3879 
3880    if CHECK_ONLY_P is false, do RTL changes to satisfy the
3881    constraints.  Return true if any change happened during function
3882    call.
3883 
3884    If CHECK_ONLY_P is true then don't do any transformation.  Just
3885    check that the insn satisfies all constraints.  If the insn does
3886    not satisfy any constraint, return true.  */
3887 static bool
curr_insn_transform(bool check_only_p)3888 curr_insn_transform (bool check_only_p)
3889 {
3890   int i, j, k;
3891   int n_operands;
3892   int n_alternatives;
3893   int n_outputs;
3894   int commutative;
3895   signed char goal_alt_matched[MAX_RECOG_OPERANDS][MAX_RECOG_OPERANDS];
3896   signed char match_inputs[MAX_RECOG_OPERANDS + 1];
3897   signed char outputs[MAX_RECOG_OPERANDS + 1];
3898   rtx_insn *before, *after;
3899   bool alt_p = false;
3900   /* Flag that the insn has been changed through a transformation.  */
3901   bool change_p;
3902   bool sec_mem_p;
3903   bool use_sec_mem_p;
3904   int max_regno_before;
3905   int reused_alternative_num;
3906 
3907   curr_insn_set = single_set (curr_insn);
3908   if (curr_insn_set != NULL_RTX && simple_move_p ())
3909     {
3910       /* We assume that the corresponding insn alternative has no
3911 	 earlier clobbers.  If it is not the case, don't define move
3912 	 cost equal to 2 for the corresponding register classes.  */
3913       lra_set_used_insn_alternative (curr_insn, LRA_NON_CLOBBERED_ALT);
3914       return false;
3915     }
3916 
3917   no_input_reloads_p = no_output_reloads_p = false;
3918   goal_alt_number = -1;
3919   change_p = sec_mem_p = false;
3920   /* JUMP_INSNs and CALL_INSNs are not allowed to have any output
3921      reloads; neither are insns that SET cc0.  Insns that use CC0 are
3922      not allowed to have any input reloads.  */
3923   if (JUMP_P (curr_insn) || CALL_P (curr_insn))
3924     no_output_reloads_p = true;
3925 
3926   if (HAVE_cc0 && reg_referenced_p (cc0_rtx, PATTERN (curr_insn)))
3927     no_input_reloads_p = true;
3928   if (HAVE_cc0 && reg_set_p (cc0_rtx, PATTERN (curr_insn)))
3929     no_output_reloads_p = true;
3930 
3931   n_operands = curr_static_id->n_operands;
3932   n_alternatives = curr_static_id->n_alternatives;
3933 
3934   /* Just return "no reloads" if insn has no operands with
3935      constraints.  */
3936   if (n_operands == 0 || n_alternatives == 0)
3937     return false;
3938 
3939   max_regno_before = max_reg_num ();
3940 
3941   for (i = 0; i < n_operands; i++)
3942     {
3943       goal_alt_matched[i][0] = -1;
3944       goal_alt_matches[i] = -1;
3945     }
3946 
3947   commutative = curr_static_id->commutative;
3948 
3949   /* Now see what we need for pseudos that didn't get hard regs or got
3950      the wrong kind of hard reg.  For this, we must consider all the
3951      operands together against the register constraints.  */
3952 
3953   best_losers = best_overall = INT_MAX;
3954   best_reload_sum = 0;
3955 
3956   curr_swapped = false;
3957   goal_alt_swapped = false;
3958 
3959   if (! check_only_p)
3960     /* Make equivalence substitution and memory subreg elimination
3961        before address processing because an address legitimacy can
3962        depend on memory mode.  */
3963     for (i = 0; i < n_operands; i++)
3964       {
3965 	rtx op, subst, old;
3966 	bool op_change_p = false;
3967 
3968 	if (curr_static_id->operand[i].is_operator)
3969 	  continue;
3970 
3971 	old = op = *curr_id->operand_loc[i];
3972 	if (GET_CODE (old) == SUBREG)
3973 	  old = SUBREG_REG (old);
3974 	subst = get_equiv_with_elimination (old, curr_insn);
3975 	original_subreg_reg_mode[i] = VOIDmode;
3976 	equiv_substition_p[i] = false;
3977 	if (subst != old)
3978 	  {
3979 	    equiv_substition_p[i] = true;
3980 	    subst = copy_rtx (subst);
3981 	    lra_assert (REG_P (old));
3982 	    if (GET_CODE (op) != SUBREG)
3983 	      *curr_id->operand_loc[i] = subst;
3984 	    else
3985 	      {
3986 		SUBREG_REG (op) = subst;
3987 		if (GET_MODE (subst) == VOIDmode)
3988 		  original_subreg_reg_mode[i] = GET_MODE (old);
3989 	      }
3990 	    if (lra_dump_file != NULL)
3991 	      {
3992 		fprintf (lra_dump_file,
3993 			 "Changing pseudo %d in operand %i of insn %u on equiv ",
3994 			 REGNO (old), i, INSN_UID (curr_insn));
3995 		dump_value_slim (lra_dump_file, subst, 1);
3996 		fprintf (lra_dump_file, "\n");
3997 	      }
3998 	    op_change_p = change_p = true;
3999 	  }
4000 	if (simplify_operand_subreg (i, GET_MODE (old)) || op_change_p)
4001 	  {
4002 	    change_p = true;
4003 	    lra_update_dup (curr_id, i);
4004 	  }
4005       }
4006 
4007   /* Reload address registers and displacements.  We do it before
4008      finding an alternative because of memory constraints.  */
4009   before = after = NULL;
4010   for (i = 0; i < n_operands; i++)
4011     if (! curr_static_id->operand[i].is_operator
4012 	&& process_address (i, check_only_p, &before, &after))
4013       {
4014 	if (check_only_p)
4015 	  return true;
4016 	change_p = true;
4017 	lra_update_dup (curr_id, i);
4018       }
4019 
4020   if (change_p)
4021     /* If we've changed the instruction then any alternative that
4022        we chose previously may no longer be valid.  */
4023     lra_set_used_insn_alternative (curr_insn, LRA_UNKNOWN_ALT);
4024 
4025   if (! check_only_p && curr_insn_set != NULL_RTX
4026       && check_and_process_move (&change_p, &sec_mem_p))
4027     return change_p;
4028 
4029  try_swapped:
4030 
4031   reused_alternative_num = check_only_p ? LRA_UNKNOWN_ALT : curr_id->used_insn_alternative;
4032   if (lra_dump_file != NULL && reused_alternative_num >= 0)
4033     fprintf (lra_dump_file, "Reusing alternative %d for insn #%u\n",
4034 	     reused_alternative_num, INSN_UID (curr_insn));
4035 
4036   if (process_alt_operands (reused_alternative_num))
4037     alt_p = true;
4038 
4039   if (check_only_p)
4040     return ! alt_p || best_losers != 0;
4041 
4042   /* If insn is commutative (it's safe to exchange a certain pair of
4043      operands) then we need to try each alternative twice, the second
4044      time matching those two operands as if we had exchanged them.  To
4045      do this, really exchange them in operands.
4046 
4047      If we have just tried the alternatives the second time, return
4048      operands to normal and drop through.  */
4049 
4050   if (reused_alternative_num < 0 && commutative >= 0)
4051     {
4052       curr_swapped = !curr_swapped;
4053       if (curr_swapped)
4054 	{
4055 	  swap_operands (commutative);
4056 	  goto try_swapped;
4057 	}
4058       else
4059 	swap_operands (commutative);
4060     }
4061 
4062   if (! alt_p && ! sec_mem_p)
4063     {
4064       /* No alternative works with reloads??  */
4065       if (INSN_CODE (curr_insn) >= 0)
4066 	fatal_insn ("unable to generate reloads for:", curr_insn);
4067       error_for_asm (curr_insn,
4068 		     "inconsistent operand constraints in an %<asm%>");
4069       lra_asm_error_p = true;
4070       /* Avoid further trouble with this insn.  Don't generate use
4071 	 pattern here as we could use the insn SP offset.  */
4072       lra_set_insn_deleted (curr_insn);
4073       return true;
4074     }
4075 
4076   /* If the best alternative is with operands 1 and 2 swapped, swap
4077      them.  Update the operand numbers of any reloads already
4078      pushed.  */
4079 
4080   if (goal_alt_swapped)
4081     {
4082       if (lra_dump_file != NULL)
4083 	fprintf (lra_dump_file, "  Commutative operand exchange in insn %u\n",
4084 		 INSN_UID (curr_insn));
4085 
4086       /* Swap the duplicates too.  */
4087       swap_operands (commutative);
4088       change_p = true;
4089     }
4090 
4091   /* Some targets' TARGET_SECONDARY_MEMORY_NEEDED (e.g. x86) are defined
4092      too conservatively.  So we use the secondary memory only if there
4093      is no any alternative without reloads.  */
4094   use_sec_mem_p = false;
4095   if (! alt_p)
4096     use_sec_mem_p = true;
4097   else if (sec_mem_p)
4098     {
4099       for (i = 0; i < n_operands; i++)
4100 	if (! goal_alt_win[i] && ! goal_alt_match_win[i])
4101 	  break;
4102       use_sec_mem_p = i < n_operands;
4103     }
4104 
4105   if (use_sec_mem_p)
4106     {
4107       int in = -1, out = -1;
4108       rtx new_reg, src, dest, rld;
4109       machine_mode sec_mode, rld_mode;
4110 
4111       lra_assert (curr_insn_set != NULL_RTX && sec_mem_p);
4112       dest = SET_DEST (curr_insn_set);
4113       src = SET_SRC (curr_insn_set);
4114       for (i = 0; i < n_operands; i++)
4115 	if (*curr_id->operand_loc[i] == dest)
4116 	  out = i;
4117 	else if (*curr_id->operand_loc[i] == src)
4118 	  in = i;
4119       for (i = 0; i < curr_static_id->n_dups; i++)
4120 	if (out < 0 && *curr_id->dup_loc[i] == dest)
4121 	  out = curr_static_id->dup_num[i];
4122 	else if (in < 0 && *curr_id->dup_loc[i] == src)
4123 	  in = curr_static_id->dup_num[i];
4124       lra_assert (out >= 0 && in >= 0
4125 		  && curr_static_id->operand[out].type == OP_OUT
4126 		  && curr_static_id->operand[in].type == OP_IN);
4127       rld = partial_subreg_p (GET_MODE (src), GET_MODE (dest)) ? src : dest;
4128       rld_mode = GET_MODE (rld);
4129       sec_mode = targetm.secondary_memory_needed_mode (rld_mode);
4130       new_reg = lra_create_new_reg (sec_mode, NULL_RTX,
4131 				    NO_REGS, "secondary");
4132       /* If the mode is changed, it should be wider.  */
4133       lra_assert (!partial_subreg_p (sec_mode, rld_mode));
4134       if (sec_mode != rld_mode)
4135         {
4136 	  /* If the target says specifically to use another mode for
4137 	     secondary memory moves we cannot reuse the original
4138 	     insn.  */
4139 	  after = emit_spill_move (false, new_reg, dest);
4140 	  lra_process_new_insns (curr_insn, NULL, after,
4141 				 "Inserting the sec. move");
4142 	  /* We may have non null BEFORE here (e.g. after address
4143 	     processing.  */
4144 	  push_to_sequence (before);
4145 	  before = emit_spill_move (true, new_reg, src);
4146 	  emit_insn (before);
4147 	  before = get_insns ();
4148 	  end_sequence ();
4149 	  lra_process_new_insns (curr_insn, before, NULL, "Changing on");
4150 	  lra_set_insn_deleted (curr_insn);
4151 	}
4152       else if (dest == rld)
4153         {
4154 	  *curr_id->operand_loc[out] = new_reg;
4155 	  lra_update_dup (curr_id, out);
4156 	  after = emit_spill_move (false, new_reg, dest);
4157 	  lra_process_new_insns (curr_insn, NULL, after,
4158 				 "Inserting the sec. move");
4159 	}
4160       else
4161 	{
4162 	  *curr_id->operand_loc[in] = new_reg;
4163 	  lra_update_dup (curr_id, in);
4164 	  /* See comments above.  */
4165 	  push_to_sequence (before);
4166 	  before = emit_spill_move (true, new_reg, src);
4167 	  emit_insn (before);
4168 	  before = get_insns ();
4169 	  end_sequence ();
4170 	  lra_process_new_insns (curr_insn, before, NULL,
4171 				 "Inserting the sec. move");
4172 	}
4173       lra_update_insn_regno_info (curr_insn);
4174       return true;
4175     }
4176 
4177   lra_assert (goal_alt_number >= 0);
4178   lra_set_used_insn_alternative (curr_insn, goal_alt_number);
4179 
4180   if (lra_dump_file != NULL)
4181     {
4182       const char *p;
4183 
4184       fprintf (lra_dump_file, "	 Choosing alt %d in insn %u:",
4185 	       goal_alt_number, INSN_UID (curr_insn));
4186       for (i = 0; i < n_operands; i++)
4187 	{
4188 	  p = (curr_static_id->operand_alternative
4189 	       [goal_alt_number * n_operands + i].constraint);
4190 	  if (*p == '\0')
4191 	    continue;
4192 	  fprintf (lra_dump_file, "  (%d) ", i);
4193 	  for (; *p != '\0' && *p != ',' && *p != '#'; p++)
4194 	    fputc (*p, lra_dump_file);
4195 	}
4196       if (INSN_CODE (curr_insn) >= 0
4197           && (p = get_insn_name (INSN_CODE (curr_insn))) != NULL)
4198         fprintf (lra_dump_file, " {%s}", p);
4199       if (maybe_ne (curr_id->sp_offset, 0))
4200 	{
4201 	  fprintf (lra_dump_file, " (sp_off=");
4202 	  print_dec (curr_id->sp_offset, lra_dump_file);
4203 	  fprintf (lra_dump_file, ")");
4204 	}
4205       fprintf (lra_dump_file, "\n");
4206     }
4207 
4208   /* Right now, for any pair of operands I and J that are required to
4209      match, with J < I, goal_alt_matches[I] is J.  Add I to
4210      goal_alt_matched[J].  */
4211 
4212   for (i = 0; i < n_operands; i++)
4213     if ((j = goal_alt_matches[i]) >= 0)
4214       {
4215 	for (k = 0; goal_alt_matched[j][k] >= 0; k++)
4216 	  ;
4217 	/* We allow matching one output operand and several input
4218 	   operands.  */
4219 	lra_assert (k == 0
4220 		    || (curr_static_id->operand[j].type == OP_OUT
4221 			&& curr_static_id->operand[i].type == OP_IN
4222 			&& (curr_static_id->operand
4223 			    [goal_alt_matched[j][0]].type == OP_IN)));
4224 	goal_alt_matched[j][k] = i;
4225 	goal_alt_matched[j][k + 1] = -1;
4226       }
4227 
4228   for (i = 0; i < n_operands; i++)
4229     goal_alt_win[i] |= goal_alt_match_win[i];
4230 
4231   /* Any constants that aren't allowed and can't be reloaded into
4232      registers are here changed into memory references.	 */
4233   for (i = 0; i < n_operands; i++)
4234     if (goal_alt_win[i])
4235       {
4236 	int regno;
4237 	enum reg_class new_class;
4238 	rtx reg = *curr_id->operand_loc[i];
4239 
4240 	if (GET_CODE (reg) == SUBREG)
4241 	  reg = SUBREG_REG (reg);
4242 
4243 	if (REG_P (reg) && (regno = REGNO (reg)) >= FIRST_PSEUDO_REGISTER)
4244 	  {
4245 	    bool ok_p = in_class_p (reg, goal_alt[i], &new_class);
4246 
4247 	    if (new_class != NO_REGS && get_reg_class (regno) != new_class)
4248 	      {
4249 		lra_assert (ok_p);
4250 		lra_change_class (regno, new_class, "      Change to", true);
4251 	      }
4252 	  }
4253       }
4254     else
4255       {
4256 	const char *constraint;
4257 	char c;
4258 	rtx op = *curr_id->operand_loc[i];
4259 	rtx subreg = NULL_RTX;
4260 	machine_mode mode = curr_operand_mode[i];
4261 
4262 	if (GET_CODE (op) == SUBREG)
4263 	  {
4264 	    subreg = op;
4265 	    op = SUBREG_REG (op);
4266 	    mode = GET_MODE (op);
4267 	  }
4268 
4269 	if (CONST_POOL_OK_P (mode, op)
4270 	    && ((targetm.preferred_reload_class
4271 		 (op, (enum reg_class) goal_alt[i]) == NO_REGS)
4272 		|| no_input_reloads_p))
4273 	  {
4274 	    rtx tem = force_const_mem (mode, op);
4275 
4276 	    change_p = true;
4277 	    if (subreg != NULL_RTX)
4278 	      tem = gen_rtx_SUBREG (mode, tem, SUBREG_BYTE (subreg));
4279 
4280 	    *curr_id->operand_loc[i] = tem;
4281 	    lra_update_dup (curr_id, i);
4282 	    process_address (i, false, &before, &after);
4283 
4284 	    /* If the alternative accepts constant pool refs directly
4285 	       there will be no reload needed at all.  */
4286 	    if (subreg != NULL_RTX)
4287 	      continue;
4288 	    /* Skip alternatives before the one requested.  */
4289 	    constraint = (curr_static_id->operand_alternative
4290 			  [goal_alt_number * n_operands + i].constraint);
4291 	    for (;
4292 		 (c = *constraint) && c != ',' && c != '#';
4293 		 constraint += CONSTRAINT_LEN (c, constraint))
4294 	      {
4295 		enum constraint_num cn = lookup_constraint (constraint);
4296 		if ((insn_extra_memory_constraint (cn)
4297 		     || insn_extra_special_memory_constraint (cn))
4298 		    && satisfies_memory_constraint_p (tem, cn))
4299 		  break;
4300 	      }
4301 	    if (c == '\0' || c == ',' || c == '#')
4302 	      continue;
4303 
4304 	    goal_alt_win[i] = true;
4305 	  }
4306       }
4307 
4308   n_outputs = 0;
4309   outputs[0] = -1;
4310   for (i = 0; i < n_operands; i++)
4311     {
4312       int regno;
4313       bool optional_p = false;
4314       rtx old, new_reg;
4315       rtx op = *curr_id->operand_loc[i];
4316 
4317       if (goal_alt_win[i])
4318 	{
4319 	  if (goal_alt[i] == NO_REGS
4320 	      && REG_P (op)
4321 	      /* When we assign NO_REGS it means that we will not
4322 		 assign a hard register to the scratch pseudo by
4323 		 assigment pass and the scratch pseudo will be
4324 		 spilled.  Spilled scratch pseudos are transformed
4325 		 back to scratches at the LRA end.  */
4326 	      && lra_former_scratch_operand_p (curr_insn, i)
4327 	      && lra_former_scratch_p (REGNO (op)))
4328 	    {
4329 	      int regno = REGNO (op);
4330 	      lra_change_class (regno, NO_REGS, "      Change to", true);
4331 	      if (lra_get_regno_hard_regno (regno) >= 0)
4332 		/* We don't have to mark all insn affected by the
4333 		   spilled pseudo as there is only one such insn, the
4334 		   current one.  */
4335 		reg_renumber[regno] = -1;
4336 	      lra_assert (bitmap_single_bit_set_p
4337 			  (&lra_reg_info[REGNO (op)].insn_bitmap));
4338 	    }
4339 	  /* We can do an optional reload.  If the pseudo got a hard
4340 	     reg, we might improve the code through inheritance.  If
4341 	     it does not get a hard register we coalesce memory/memory
4342 	     moves later.  Ignore move insns to avoid cycling.  */
4343 	  if (! lra_simple_p
4344 	      && lra_undo_inheritance_iter < LRA_MAX_INHERITANCE_PASSES
4345 	      && goal_alt[i] != NO_REGS && REG_P (op)
4346 	      && (regno = REGNO (op)) >= FIRST_PSEUDO_REGISTER
4347 	      && regno < new_regno_start
4348 	      && ! lra_former_scratch_p (regno)
4349 	      && reg_renumber[regno] < 0
4350 	      /* Check that the optional reload pseudo will be able to
4351 		 hold given mode value.  */
4352 	      && ! (prohibited_class_reg_set_mode_p
4353 		    (goal_alt[i], reg_class_contents[goal_alt[i]],
4354 		     PSEUDO_REGNO_MODE (regno)))
4355 	      && (curr_insn_set == NULL_RTX
4356 		  || !((REG_P (SET_SRC (curr_insn_set))
4357 			|| MEM_P (SET_SRC (curr_insn_set))
4358 			|| GET_CODE (SET_SRC (curr_insn_set)) == SUBREG)
4359 		       && (REG_P (SET_DEST (curr_insn_set))
4360 			   || MEM_P (SET_DEST (curr_insn_set))
4361 			   || GET_CODE (SET_DEST (curr_insn_set)) == SUBREG))))
4362 	    optional_p = true;
4363 	  else if (goal_alt_matched[i][0] != -1
4364 		   && curr_static_id->operand[i].type == OP_OUT
4365 		   && (curr_static_id->operand_alternative
4366 		       [goal_alt_number * n_operands + i].earlyclobber)
4367 		   && REG_P (op))
4368 	    {
4369 	      for (j = 0; goal_alt_matched[i][j] != -1; j++)
4370 		{
4371 		  rtx op2 = *curr_id->operand_loc[goal_alt_matched[i][j]];
4372 
4373 		  if (REG_P (op2) && REGNO (op) != REGNO (op2))
4374 		    break;
4375 		}
4376 	      if (goal_alt_matched[i][j] != -1)
4377 		{
4378 		  /* Generate reloads for different output and matched
4379 		     input registers.  This is the easiest way to avoid
4380 		     creation of non-existing register conflicts in
4381 		     lra-lives.c.  */
4382 		  match_reload (i, goal_alt_matched[i], outputs, goal_alt[i], &before,
4383 				&after, TRUE);
4384 		  outputs[n_outputs++] = i;
4385 		  outputs[n_outputs] = -1;
4386 		}
4387 	      continue;
4388 	    }
4389 	  else
4390 	    continue;
4391 	}
4392 
4393       /* Operands that match previous ones have already been handled.  */
4394       if (goal_alt_matches[i] >= 0)
4395 	continue;
4396 
4397       /* We should not have an operand with a non-offsettable address
4398 	 appearing where an offsettable address will do.  It also may
4399 	 be a case when the address should be special in other words
4400 	 not a general one (e.g. it needs no index reg).  */
4401       if (goal_alt_matched[i][0] == -1 && goal_alt_offmemok[i] && MEM_P (op))
4402 	{
4403 	  enum reg_class rclass;
4404 	  rtx *loc = &XEXP (op, 0);
4405 	  enum rtx_code code = GET_CODE (*loc);
4406 
4407 	  push_to_sequence (before);
4408 	  rclass = base_reg_class (GET_MODE (op), MEM_ADDR_SPACE (op),
4409 				   MEM, SCRATCH);
4410 	  if (GET_RTX_CLASS (code) == RTX_AUTOINC)
4411 	    new_reg = emit_inc (rclass, *loc, *loc,
4412 				/* This value does not matter for MODIFY.  */
4413 				GET_MODE_SIZE (GET_MODE (op)));
4414 	  else if (get_reload_reg (OP_IN, Pmode, *loc, rclass, FALSE,
4415 				   "offsetable address", &new_reg))
4416 	    {
4417 	      rtx addr = *loc;
4418 	      enum rtx_code code = GET_CODE (addr);
4419 
4420 	      if (code == AND && CONST_INT_P (XEXP (addr, 1)))
4421 		/* (and ... (const_int -X)) is used to align to X bytes.  */
4422 		addr = XEXP (*loc, 0);
4423 	      lra_emit_move (new_reg, addr);
4424 	      if (addr != *loc)
4425 		emit_move_insn (new_reg, gen_rtx_AND (GET_MODE (new_reg), new_reg, XEXP (*loc, 1)));
4426 	    }
4427 	  before = get_insns ();
4428 	  end_sequence ();
4429 	  *loc = new_reg;
4430 	  lra_update_dup (curr_id, i);
4431 	}
4432       else if (goal_alt_matched[i][0] == -1)
4433 	{
4434 	  machine_mode mode;
4435 	  rtx reg, *loc;
4436 	  int hard_regno;
4437 	  enum op_type type = curr_static_id->operand[i].type;
4438 
4439 	  loc = curr_id->operand_loc[i];
4440 	  mode = curr_operand_mode[i];
4441 	  if (GET_CODE (*loc) == SUBREG)
4442 	    {
4443 	      reg = SUBREG_REG (*loc);
4444 	      poly_int64 byte = SUBREG_BYTE (*loc);
4445 	      if (REG_P (reg)
4446 		  /* Strict_low_part requires reloading the register and not
4447 		     just the subreg.  Likewise for a strict subreg no wider
4448 		     than a word for WORD_REGISTER_OPERATIONS targets.  */
4449 		  && (curr_static_id->operand[i].strict_low
4450 		      || (!paradoxical_subreg_p (mode, GET_MODE (reg))
4451 			  && (hard_regno
4452 			      = get_try_hard_regno (REGNO (reg))) >= 0
4453 			  && (simplify_subreg_regno
4454 			      (hard_regno,
4455 			       GET_MODE (reg), byte, mode) < 0)
4456 			  && (goal_alt[i] == NO_REGS
4457 			      || (simplify_subreg_regno
4458 				  (ira_class_hard_regs[goal_alt[i]][0],
4459 				   GET_MODE (reg), byte, mode) >= 0)))
4460 		      || (partial_subreg_p (mode, GET_MODE (reg))
4461 			  && known_le (GET_MODE_SIZE (GET_MODE (reg)),
4462 				       UNITS_PER_WORD)
4463 			  && WORD_REGISTER_OPERATIONS)))
4464 		{
4465 		  /* An OP_INOUT is required when reloading a subreg of a
4466 		     mode wider than a word to ensure that data beyond the
4467 		     word being reloaded is preserved.  Also automatically
4468 		     ensure that strict_low_part reloads are made into
4469 		     OP_INOUT which should already be true from the backend
4470 		     constraints.  */
4471 		  if (type == OP_OUT
4472 		      && (curr_static_id->operand[i].strict_low
4473 			  || read_modify_subreg_p (*loc)))
4474 		    type = OP_INOUT;
4475 		  loc = &SUBREG_REG (*loc);
4476 		  mode = GET_MODE (*loc);
4477 		}
4478 	    }
4479 	  old = *loc;
4480 	  if (get_reload_reg (type, mode, old, goal_alt[i],
4481 			      loc != curr_id->operand_loc[i], "", &new_reg)
4482 	      && type != OP_OUT)
4483 	    {
4484 	      push_to_sequence (before);
4485 	      lra_emit_move (new_reg, old);
4486 	      before = get_insns ();
4487 	      end_sequence ();
4488 	    }
4489 	  *loc = new_reg;
4490 	  if (type != OP_IN
4491 	      && find_reg_note (curr_insn, REG_UNUSED, old) == NULL_RTX)
4492 	    {
4493 	      start_sequence ();
4494 	      lra_emit_move (type == OP_INOUT ? copy_rtx (old) : old, new_reg);
4495 	      emit_insn (after);
4496 	      after = get_insns ();
4497 	      end_sequence ();
4498 	      *loc = new_reg;
4499 	    }
4500 	  for (j = 0; j < goal_alt_dont_inherit_ops_num; j++)
4501 	    if (goal_alt_dont_inherit_ops[j] == i)
4502 	      {
4503 		lra_set_regno_unique_value (REGNO (new_reg));
4504 		break;
4505 	      }
4506 	  lra_update_dup (curr_id, i);
4507 	}
4508       else if (curr_static_id->operand[i].type == OP_IN
4509 	       && (curr_static_id->operand[goal_alt_matched[i][0]].type
4510 		   == OP_OUT
4511 		   || (curr_static_id->operand[goal_alt_matched[i][0]].type
4512 		       == OP_INOUT
4513 		       && (operands_match_p
4514 			   (*curr_id->operand_loc[i],
4515 			    *curr_id->operand_loc[goal_alt_matched[i][0]],
4516 			    -1)))))
4517 	{
4518 	  /* generate reloads for input and matched outputs.  */
4519 	  match_inputs[0] = i;
4520 	  match_inputs[1] = -1;
4521 	  match_reload (goal_alt_matched[i][0], match_inputs, outputs,
4522 			goal_alt[i], &before, &after,
4523 			curr_static_id->operand_alternative
4524 			[goal_alt_number * n_operands + goal_alt_matched[i][0]]
4525 			.earlyclobber);
4526 	}
4527       else if ((curr_static_id->operand[i].type == OP_OUT
4528 		|| (curr_static_id->operand[i].type == OP_INOUT
4529 		    && (operands_match_p
4530 			(*curr_id->operand_loc[i],
4531 			 *curr_id->operand_loc[goal_alt_matched[i][0]],
4532 			 -1))))
4533 	       && (curr_static_id->operand[goal_alt_matched[i][0]].type
4534 		    == OP_IN))
4535 	/* Generate reloads for output and matched inputs.  */
4536 	match_reload (i, goal_alt_matched[i], outputs, goal_alt[i], &before,
4537 		      &after, curr_static_id->operand_alternative
4538 			      [goal_alt_number * n_operands + i].earlyclobber);
4539       else if (curr_static_id->operand[i].type == OP_IN
4540 	       && (curr_static_id->operand[goal_alt_matched[i][0]].type
4541 		   == OP_IN))
4542 	{
4543 	  /* Generate reloads for matched inputs.  */
4544 	  match_inputs[0] = i;
4545 	  for (j = 0; (k = goal_alt_matched[i][j]) >= 0; j++)
4546 	    match_inputs[j + 1] = k;
4547 	  match_inputs[j + 1] = -1;
4548 	  match_reload (-1, match_inputs, outputs, goal_alt[i], &before,
4549 			&after, false);
4550 	}
4551       else
4552 	/* We must generate code in any case when function
4553 	   process_alt_operands decides that it is possible.  */
4554 	gcc_unreachable ();
4555 
4556       /* Memorise processed outputs so that output remaining to be processed
4557 	 can avoid using the same register value (see match_reload).  */
4558       if (curr_static_id->operand[i].type == OP_OUT)
4559 	{
4560 	  outputs[n_outputs++] = i;
4561 	  outputs[n_outputs] = -1;
4562 	}
4563 
4564       if (optional_p)
4565 	{
4566 	  rtx reg = op;
4567 
4568 	  lra_assert (REG_P (reg));
4569 	  regno = REGNO (reg);
4570 	  op = *curr_id->operand_loc[i]; /* Substitution.  */
4571 	  if (GET_CODE (op) == SUBREG)
4572 	    op = SUBREG_REG (op);
4573 	  gcc_assert (REG_P (op) && (int) REGNO (op) >= new_regno_start);
4574 	  bitmap_set_bit (&lra_optional_reload_pseudos, REGNO (op));
4575 	  lra_reg_info[REGNO (op)].restore_rtx = reg;
4576 	  if (lra_dump_file != NULL)
4577 	    fprintf (lra_dump_file,
4578 		     "      Making reload reg %d for reg %d optional\n",
4579 		     REGNO (op), regno);
4580 	}
4581     }
4582   if (before != NULL_RTX || after != NULL_RTX
4583       || max_regno_before != max_reg_num ())
4584     change_p = true;
4585   if (change_p)
4586     {
4587       lra_update_operator_dups (curr_id);
4588       /* Something changes -- process the insn.	 */
4589       lra_update_insn_regno_info (curr_insn);
4590     }
4591   lra_process_new_insns (curr_insn, before, after, "Inserting insn reload");
4592   return change_p;
4593 }
4594 
4595 /* Return true if INSN satisfies all constraints.  In other words, no
4596    reload insns are needed.  */
4597 bool
lra_constrain_insn(rtx_insn * insn)4598 lra_constrain_insn (rtx_insn *insn)
4599 {
4600   int saved_new_regno_start = new_regno_start;
4601   int saved_new_insn_uid_start = new_insn_uid_start;
4602   bool change_p;
4603 
4604   curr_insn = insn;
4605   curr_id = lra_get_insn_recog_data (curr_insn);
4606   curr_static_id = curr_id->insn_static_data;
4607   new_insn_uid_start = get_max_uid ();
4608   new_regno_start = max_reg_num ();
4609   change_p = curr_insn_transform (true);
4610   new_regno_start = saved_new_regno_start;
4611   new_insn_uid_start = saved_new_insn_uid_start;
4612   return ! change_p;
4613 }
4614 
4615 /* Return true if X is in LIST.	 */
4616 static bool
in_list_p(rtx x,rtx list)4617 in_list_p (rtx x, rtx list)
4618 {
4619   for (; list != NULL_RTX; list = XEXP (list, 1))
4620     if (XEXP (list, 0) == x)
4621       return true;
4622   return false;
4623 }
4624 
4625 /* Return true if X contains an allocatable hard register (if
4626    HARD_REG_P) or a (spilled if SPILLED_P) pseudo.  */
4627 static bool
contains_reg_p(rtx x,bool hard_reg_p,bool spilled_p)4628 contains_reg_p (rtx x, bool hard_reg_p, bool spilled_p)
4629 {
4630   int i, j;
4631   const char *fmt;
4632   enum rtx_code code;
4633 
4634   code = GET_CODE (x);
4635   if (REG_P (x))
4636     {
4637       int regno = REGNO (x);
4638       HARD_REG_SET alloc_regs;
4639 
4640       if (hard_reg_p)
4641 	{
4642 	  if (regno >= FIRST_PSEUDO_REGISTER)
4643 	    regno = lra_get_regno_hard_regno (regno);
4644 	  if (regno < 0)
4645 	    return false;
4646 	  alloc_regs = ~lra_no_alloc_regs;
4647 	  return overlaps_hard_reg_set_p (alloc_regs, GET_MODE (x), regno);
4648 	}
4649       else
4650 	{
4651 	  if (regno < FIRST_PSEUDO_REGISTER)
4652 	    return false;
4653 	  if (! spilled_p)
4654 	    return true;
4655 	  return lra_get_regno_hard_regno (regno) < 0;
4656 	}
4657     }
4658   fmt = GET_RTX_FORMAT (code);
4659   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4660     {
4661       if (fmt[i] == 'e')
4662 	{
4663 	  if (contains_reg_p (XEXP (x, i), hard_reg_p, spilled_p))
4664 	    return true;
4665 	}
4666       else if (fmt[i] == 'E')
4667 	{
4668 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4669 	    if (contains_reg_p (XVECEXP (x, i, j), hard_reg_p, spilled_p))
4670 	      return true;
4671 	}
4672     }
4673   return false;
4674 }
4675 
4676 /* Process all regs in location *LOC and change them on equivalent
4677    substitution.  Return true if any change was done.  */
4678 static bool
loc_equivalence_change_p(rtx * loc)4679 loc_equivalence_change_p (rtx *loc)
4680 {
4681   rtx subst, reg, x = *loc;
4682   bool result = false;
4683   enum rtx_code code = GET_CODE (x);
4684   const char *fmt;
4685   int i, j;
4686 
4687   if (code == SUBREG)
4688     {
4689       reg = SUBREG_REG (x);
4690       if ((subst = get_equiv_with_elimination (reg, curr_insn)) != reg
4691 	  && GET_MODE (subst) == VOIDmode)
4692 	{
4693 	  /* We cannot reload debug location.  Simplify subreg here
4694 	     while we know the inner mode.  */
4695 	  *loc = simplify_gen_subreg (GET_MODE (x), subst,
4696 				      GET_MODE (reg), SUBREG_BYTE (x));
4697 	  return true;
4698 	}
4699     }
4700   if (code == REG && (subst = get_equiv_with_elimination (x, curr_insn)) != x)
4701     {
4702       *loc = subst;
4703       return true;
4704     }
4705 
4706   /* Scan all the operand sub-expressions.  */
4707   fmt = GET_RTX_FORMAT (code);
4708   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4709     {
4710       if (fmt[i] == 'e')
4711 	result = loc_equivalence_change_p (&XEXP (x, i)) || result;
4712       else if (fmt[i] == 'E')
4713 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4714 	  result
4715 	    = loc_equivalence_change_p (&XVECEXP (x, i, j)) || result;
4716     }
4717   return result;
4718 }
4719 
4720 /* Similar to loc_equivalence_change_p, but for use as
4721    simplify_replace_fn_rtx callback.  DATA is insn for which the
4722    elimination is done.  If it null we don't do the elimination.  */
4723 static rtx
loc_equivalence_callback(rtx loc,const_rtx,void * data)4724 loc_equivalence_callback (rtx loc, const_rtx, void *data)
4725 {
4726   if (!REG_P (loc))
4727     return NULL_RTX;
4728 
4729   rtx subst = (data == NULL
4730 	       ? get_equiv (loc) : get_equiv_with_elimination (loc, (rtx_insn *) data));
4731   if (subst != loc)
4732     return subst;
4733 
4734   return NULL_RTX;
4735 }
4736 
4737 /* Maximum number of generated reload insns per an insn.  It is for
4738    preventing this pass cycling in a bug case.	*/
4739 #define MAX_RELOAD_INSNS_NUMBER LRA_MAX_INSN_RELOADS
4740 
4741 /* The current iteration number of this LRA pass.  */
4742 int lra_constraint_iter;
4743 
4744 /* True if we should during assignment sub-pass check assignment
4745    correctness for all pseudos and spill some of them to correct
4746    conflicts.  It can be necessary when we substitute equiv which
4747    needs checking register allocation correctness because the
4748    equivalent value contains allocatable hard registers, or when we
4749    restore multi-register pseudo, or when we change the insn code and
4750    its operand became INOUT operand when it was IN one before.  */
4751 bool check_and_force_assignment_correctness_p;
4752 
4753 /* Return true if REGNO is referenced in more than one block.  */
4754 static bool
multi_block_pseudo_p(int regno)4755 multi_block_pseudo_p (int regno)
4756 {
4757   basic_block bb = NULL;
4758   unsigned int uid;
4759   bitmap_iterator bi;
4760 
4761   if (regno < FIRST_PSEUDO_REGISTER)
4762     return false;
4763 
4764     EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)
4765       if (bb == NULL)
4766 	bb = BLOCK_FOR_INSN (lra_insn_recog_data[uid]->insn);
4767       else if (BLOCK_FOR_INSN (lra_insn_recog_data[uid]->insn) != bb)
4768 	return true;
4769     return false;
4770 }
4771 
4772 /* Return true if LIST contains a deleted insn.  */
4773 static bool
contains_deleted_insn_p(rtx_insn_list * list)4774 contains_deleted_insn_p (rtx_insn_list *list)
4775 {
4776   for (; list != NULL_RTX; list = list->next ())
4777     if (NOTE_P (list->insn ())
4778 	&& NOTE_KIND (list->insn ()) == NOTE_INSN_DELETED)
4779       return true;
4780   return false;
4781 }
4782 
4783 /* Return true if X contains a pseudo dying in INSN.  */
4784 static bool
dead_pseudo_p(rtx x,rtx_insn * insn)4785 dead_pseudo_p (rtx x, rtx_insn *insn)
4786 {
4787   int i, j;
4788   const char *fmt;
4789   enum rtx_code code;
4790 
4791   if (REG_P (x))
4792     return (insn != NULL_RTX
4793 	    && find_regno_note (insn, REG_DEAD, REGNO (x)) != NULL_RTX);
4794   code = GET_CODE (x);
4795   fmt = GET_RTX_FORMAT (code);
4796   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4797     {
4798       if (fmt[i] == 'e')
4799 	{
4800 	  if (dead_pseudo_p (XEXP (x, i), insn))
4801 	    return true;
4802 	}
4803       else if (fmt[i] == 'E')
4804 	{
4805 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4806 	    if (dead_pseudo_p (XVECEXP (x, i, j), insn))
4807 	      return true;
4808 	}
4809     }
4810   return false;
4811 }
4812 
4813 /* Return true if INSN contains a dying pseudo in INSN right hand
4814    side.  */
4815 static bool
insn_rhs_dead_pseudo_p(rtx_insn * insn)4816 insn_rhs_dead_pseudo_p (rtx_insn *insn)
4817 {
4818   rtx set = single_set (insn);
4819 
4820   gcc_assert (set != NULL);
4821   return dead_pseudo_p (SET_SRC (set), insn);
4822 }
4823 
4824 /* Return true if any init insn of REGNO contains a dying pseudo in
4825    insn right hand side.  */
4826 static bool
init_insn_rhs_dead_pseudo_p(int regno)4827 init_insn_rhs_dead_pseudo_p (int regno)
4828 {
4829   rtx_insn_list *insns = ira_reg_equiv[regno].init_insns;
4830 
4831   if (insns == NULL)
4832     return false;
4833   for (; insns != NULL_RTX; insns = insns->next ())
4834     if (insn_rhs_dead_pseudo_p (insns->insn ()))
4835       return true;
4836   return false;
4837 }
4838 
4839 /* Return TRUE if REGNO has a reverse equivalence.  The equivalence is
4840    reverse only if we have one init insn with given REGNO as a
4841    source.  */
4842 static bool
reverse_equiv_p(int regno)4843 reverse_equiv_p (int regno)
4844 {
4845   rtx_insn_list *insns = ira_reg_equiv[regno].init_insns;
4846   rtx set;
4847 
4848   if (insns == NULL)
4849     return false;
4850   if (! INSN_P (insns->insn ())
4851       || insns->next () != NULL)
4852     return false;
4853   if ((set = single_set (insns->insn ())) == NULL_RTX)
4854     return false;
4855   return REG_P (SET_SRC (set)) && (int) REGNO (SET_SRC (set)) == regno;
4856 }
4857 
4858 /* Return TRUE if REGNO was reloaded in an equivalence init insn.  We
4859    call this function only for non-reverse equivalence.  */
4860 static bool
contains_reloaded_insn_p(int regno)4861 contains_reloaded_insn_p (int regno)
4862 {
4863   rtx set;
4864   rtx_insn_list *list = ira_reg_equiv[regno].init_insns;
4865 
4866   for (; list != NULL; list = list->next ())
4867     if ((set = single_set (list->insn ())) == NULL_RTX
4868 	|| ! REG_P (SET_DEST (set))
4869 	|| (int) REGNO (SET_DEST (set)) != regno)
4870       return true;
4871   return false;
4872 }
4873 
4874 /* Entry function of LRA constraint pass.  Return true if the
4875    constraint pass did change the code.	 */
4876 bool
lra_constraints(bool first_p)4877 lra_constraints (bool first_p)
4878 {
4879   bool changed_p;
4880   int i, hard_regno, new_insns_num;
4881   unsigned int min_len, new_min_len, uid;
4882   rtx set, x, reg, dest_reg;
4883   basic_block last_bb;
4884   bitmap_iterator bi;
4885 
4886   lra_constraint_iter++;
4887   if (lra_dump_file != NULL)
4888     fprintf (lra_dump_file, "\n********** Local #%d: **********\n\n",
4889 	     lra_constraint_iter);
4890   changed_p = false;
4891   if (pic_offset_table_rtx
4892       && REGNO (pic_offset_table_rtx) >= FIRST_PSEUDO_REGISTER)
4893     check_and_force_assignment_correctness_p = true;
4894   else if (first_p)
4895     /* On the first iteration we should check IRA assignment
4896        correctness.  In rare cases, the assignments can be wrong as
4897        early clobbers operands are ignored in IRA or usages of
4898        paradoxical sub-registers are not taken into account by
4899        IRA.  */
4900     check_and_force_assignment_correctness_p = true;
4901   new_insn_uid_start = get_max_uid ();
4902   new_regno_start = first_p ? lra_constraint_new_regno_start : max_reg_num ();
4903   /* Mark used hard regs for target stack size calulations.  */
4904   for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++)
4905     if (lra_reg_info[i].nrefs != 0
4906 	&& (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
4907       {
4908 	int j, nregs;
4909 
4910 	nregs = hard_regno_nregs (hard_regno, lra_reg_info[i].biggest_mode);
4911 	for (j = 0; j < nregs; j++)
4912 	  df_set_regs_ever_live (hard_regno + j, true);
4913       }
4914   /* Do elimination before the equivalence processing as we can spill
4915      some pseudos during elimination.  */
4916   lra_eliminate (false, first_p);
4917   auto_bitmap equiv_insn_bitmap (&reg_obstack);
4918   for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++)
4919     if (lra_reg_info[i].nrefs != 0)
4920       {
4921 	ira_reg_equiv[i].profitable_p = true;
4922 	reg = regno_reg_rtx[i];
4923 	if (lra_get_regno_hard_regno (i) < 0 && (x = get_equiv (reg)) != reg)
4924 	  {
4925 	    bool pseudo_p = contains_reg_p (x, false, false);
4926 
4927 	    /* After RTL transformation, we cannot guarantee that
4928 	       pseudo in the substitution was not reloaded which might
4929 	       make equivalence invalid.  For example, in reverse
4930 	       equiv of p0
4931 
4932 	       p0 <- ...
4933 	       ...
4934 	       equiv_mem <- p0
4935 
4936 	       the memory address register was reloaded before the 2nd
4937 	       insn.  */
4938 	    if ((! first_p && pseudo_p)
4939 		/* We don't use DF for compilation speed sake.  So it
4940 		   is problematic to update live info when we use an
4941 		   equivalence containing pseudos in more than one
4942 		   BB.  */
4943 		|| (pseudo_p && multi_block_pseudo_p (i))
4944 		/* If an init insn was deleted for some reason, cancel
4945 		   the equiv.  We could update the equiv insns after
4946 		   transformations including an equiv insn deletion
4947 		   but it is not worthy as such cases are extremely
4948 		   rare.  */
4949 		|| contains_deleted_insn_p (ira_reg_equiv[i].init_insns)
4950 		/* If it is not a reverse equivalence, we check that a
4951 		   pseudo in rhs of the init insn is not dying in the
4952 		   insn.  Otherwise, the live info at the beginning of
4953 		   the corresponding BB might be wrong after we
4954 		   removed the insn.  When the equiv can be a
4955 		   constant, the right hand side of the init insn can
4956 		   be a pseudo.  */
4957 		|| (! reverse_equiv_p (i)
4958 		    && (init_insn_rhs_dead_pseudo_p (i)
4959 			/* If we reloaded the pseudo in an equivalence
4960 			   init insn, we cannot remove the equiv init
4961 			   insns and the init insns might write into
4962 			   const memory in this case.  */
4963 			|| contains_reloaded_insn_p (i)))
4964 		/* Prevent access beyond equivalent memory for
4965 		   paradoxical subregs.  */
4966 		|| (MEM_P (x)
4967 		    && maybe_gt (GET_MODE_SIZE (lra_reg_info[i].biggest_mode),
4968 				 GET_MODE_SIZE (GET_MODE (x))))
4969 		|| (pic_offset_table_rtx
4970 		    && ((CONST_POOL_OK_P (PSEUDO_REGNO_MODE (i), x)
4971 			 && (targetm.preferred_reload_class
4972 			     (x, lra_get_allocno_class (i)) == NO_REGS))
4973 			|| contains_symbol_ref_p (x))))
4974 	      ira_reg_equiv[i].defined_p = false;
4975 	    if (contains_reg_p (x, false, true))
4976 	      ira_reg_equiv[i].profitable_p = false;
4977 	    if (get_equiv (reg) != reg)
4978 	      bitmap_ior_into (equiv_insn_bitmap, &lra_reg_info[i].insn_bitmap);
4979 	  }
4980       }
4981   for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++)
4982     update_equiv (i);
4983   /* We should add all insns containing pseudos which should be
4984      substituted by their equivalences.  */
4985   EXECUTE_IF_SET_IN_BITMAP (equiv_insn_bitmap, 0, uid, bi)
4986     lra_push_insn_by_uid (uid);
4987   min_len = lra_insn_stack_length ();
4988   new_insns_num = 0;
4989   last_bb = NULL;
4990   changed_p = false;
4991   while ((new_min_len = lra_insn_stack_length ()) != 0)
4992     {
4993       curr_insn = lra_pop_insn ();
4994       --new_min_len;
4995       curr_bb = BLOCK_FOR_INSN (curr_insn);
4996       if (curr_bb != last_bb)
4997 	{
4998 	  last_bb = curr_bb;
4999 	  bb_reload_num = lra_curr_reload_num;
5000 	}
5001       if (min_len > new_min_len)
5002 	{
5003 	  min_len = new_min_len;
5004 	  new_insns_num = 0;
5005 	}
5006       if (new_insns_num > MAX_RELOAD_INSNS_NUMBER)
5007 	internal_error
5008 	  ("maximum number of generated reload insns per insn achieved (%d)",
5009 	   MAX_RELOAD_INSNS_NUMBER);
5010       new_insns_num++;
5011       if (DEBUG_INSN_P (curr_insn))
5012 	{
5013 	  /* We need to check equivalence in debug insn and change
5014 	     pseudo to the equivalent value if necessary.  */
5015 	  curr_id = lra_get_insn_recog_data (curr_insn);
5016 	  if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (curr_insn)))
5017 	    {
5018 	      rtx old = *curr_id->operand_loc[0];
5019 	      *curr_id->operand_loc[0]
5020 		= simplify_replace_fn_rtx (old, NULL_RTX,
5021 					   loc_equivalence_callback, curr_insn);
5022 	      if (old != *curr_id->operand_loc[0])
5023 		{
5024 		  lra_update_insn_regno_info (curr_insn);
5025 		  changed_p = true;
5026 		}
5027 	    }
5028 	}
5029       else if (INSN_P (curr_insn))
5030 	{
5031 	  if ((set = single_set (curr_insn)) != NULL_RTX)
5032 	    {
5033 	      dest_reg = SET_DEST (set);
5034 	      /* The equivalence pseudo could be set up as SUBREG in a
5035 		 case when it is a call restore insn in a mode
5036 		 different from the pseudo mode.  */
5037 	      if (GET_CODE (dest_reg) == SUBREG)
5038 		dest_reg = SUBREG_REG (dest_reg);
5039 	      if ((REG_P (dest_reg)
5040 		   && (x = get_equiv (dest_reg)) != dest_reg
5041 		   /* Remove insns which set up a pseudo whose value
5042 		      cannot be changed.  Such insns might be not in
5043 		      init_insns because we don't update equiv data
5044 		      during insn transformations.
5045 
5046 		      As an example, let suppose that a pseudo got
5047 		      hard register and on the 1st pass was not
5048 		      changed to equivalent constant.  We generate an
5049 		      additional insn setting up the pseudo because of
5050 		      secondary memory movement.  Then the pseudo is
5051 		      spilled and we use the equiv constant.  In this
5052 		      case we should remove the additional insn and
5053 		      this insn is not init_insns list.  */
5054 		   && (! MEM_P (x) || MEM_READONLY_P (x)
5055 		       /* Check that this is actually an insn setting
5056 			  up the equivalence.  */
5057 		       || in_list_p (curr_insn,
5058 				     ira_reg_equiv
5059 				     [REGNO (dest_reg)].init_insns)))
5060 		  || (((x = get_equiv (SET_SRC (set))) != SET_SRC (set))
5061 		      && in_list_p (curr_insn,
5062 				    ira_reg_equiv
5063 				    [REGNO (SET_SRC (set))].init_insns)))
5064 		{
5065 		  /* This is equiv init insn of pseudo which did not get a
5066 		     hard register -- remove the insn.	*/
5067 		  if (lra_dump_file != NULL)
5068 		    {
5069 		      fprintf (lra_dump_file,
5070 			       "      Removing equiv init insn %i (freq=%d)\n",
5071 			       INSN_UID (curr_insn),
5072 			       REG_FREQ_FROM_BB (BLOCK_FOR_INSN (curr_insn)));
5073 		      dump_insn_slim (lra_dump_file, curr_insn);
5074 		    }
5075 		  if (contains_reg_p (x, true, false))
5076 		    check_and_force_assignment_correctness_p = true;
5077 		  lra_set_insn_deleted (curr_insn);
5078 		  continue;
5079 		}
5080 	    }
5081 	  curr_id = lra_get_insn_recog_data (curr_insn);
5082 	  curr_static_id = curr_id->insn_static_data;
5083 	  init_curr_insn_input_reloads ();
5084 	  init_curr_operand_mode ();
5085 	  if (curr_insn_transform (false))
5086 	    changed_p = true;
5087 	  /* Check non-transformed insns too for equiv change as USE
5088 	     or CLOBBER don't need reloads but can contain pseudos
5089 	     being changed on their equivalences.  */
5090 	  else if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (curr_insn))
5091 		   && loc_equivalence_change_p (&PATTERN (curr_insn)))
5092 	    {
5093 	      lra_update_insn_regno_info (curr_insn);
5094 	      changed_p = true;
5095 	    }
5096 	}
5097     }
5098 
5099   /* If we used a new hard regno, changed_p should be true because the
5100      hard reg is assigned to a new pseudo.  */
5101   if (flag_checking && !changed_p)
5102     {
5103       for (i = FIRST_PSEUDO_REGISTER; i < new_regno_start; i++)
5104 	if (lra_reg_info[i].nrefs != 0
5105 	    && (hard_regno = lra_get_regno_hard_regno (i)) >= 0)
5106 	  {
5107 	    int j, nregs = hard_regno_nregs (hard_regno,
5108 					     PSEUDO_REGNO_MODE (i));
5109 
5110 	    for (j = 0; j < nregs; j++)
5111 	      lra_assert (df_regs_ever_live_p (hard_regno + j));
5112 	  }
5113     }
5114   return changed_p;
5115 }
5116 
5117 static void initiate_invariants (void);
5118 static void finish_invariants (void);
5119 
5120 /* Initiate the LRA constraint pass.  It is done once per
5121    function.  */
5122 void
lra_constraints_init(void)5123 lra_constraints_init (void)
5124 {
5125   initiate_invariants ();
5126 }
5127 
5128 /* Finalize the LRA constraint pass.  It is done once per
5129    function.  */
5130 void
lra_constraints_finish(void)5131 lra_constraints_finish (void)
5132 {
5133   finish_invariants ();
5134 }
5135 
5136 
5137 
5138 /* Structure describes invariants for ineheritance.  */
5139 struct lra_invariant
5140 {
5141   /* The order number of the invariant.  */
5142   int num;
5143   /* The invariant RTX.  */
5144   rtx invariant_rtx;
5145   /* The origin insn of the invariant.  */
5146   rtx_insn *insn;
5147 };
5148 
5149 typedef lra_invariant invariant_t;
5150 typedef invariant_t *invariant_ptr_t;
5151 typedef const invariant_t *const_invariant_ptr_t;
5152 
5153 /* Pointer to the inheritance invariants.  */
5154 static vec<invariant_ptr_t> invariants;
5155 
5156 /* Allocation pool for the invariants.  */
5157 static object_allocator<lra_invariant> *invariants_pool;
5158 
5159 /* Hash table for the invariants.  */
5160 static htab_t invariant_table;
5161 
5162 /* Hash function for INVARIANT.  */
5163 static hashval_t
invariant_hash(const void * invariant)5164 invariant_hash (const void *invariant)
5165 {
5166   rtx inv = ((const_invariant_ptr_t) invariant)->invariant_rtx;
5167   return lra_rtx_hash (inv);
5168 }
5169 
5170 /* Equal function for invariants INVARIANT1 and INVARIANT2.  */
5171 static int
invariant_eq_p(const void * invariant1,const void * invariant2)5172 invariant_eq_p (const void *invariant1, const void *invariant2)
5173 {
5174   rtx inv1 = ((const_invariant_ptr_t) invariant1)->invariant_rtx;
5175   rtx inv2 = ((const_invariant_ptr_t) invariant2)->invariant_rtx;
5176 
5177   return rtx_equal_p (inv1, inv2);
5178 }
5179 
5180 /* Insert INVARIANT_RTX into the table if it is not there yet.  Return
5181    invariant which is in the table.  */
5182 static invariant_ptr_t
insert_invariant(rtx invariant_rtx)5183 insert_invariant (rtx invariant_rtx)
5184 {
5185   void **entry_ptr;
5186   invariant_t invariant;
5187   invariant_ptr_t invariant_ptr;
5188 
5189   invariant.invariant_rtx = invariant_rtx;
5190   entry_ptr = htab_find_slot (invariant_table, &invariant, INSERT);
5191   if (*entry_ptr == NULL)
5192     {
5193       invariant_ptr = invariants_pool->allocate ();
5194       invariant_ptr->invariant_rtx = invariant_rtx;
5195       invariant_ptr->insn = NULL;
5196       invariants.safe_push (invariant_ptr);
5197       *entry_ptr = (void *) invariant_ptr;
5198     }
5199   return (invariant_ptr_t) *entry_ptr;
5200 }
5201 
5202 /* Initiate the invariant table.  */
5203 static void
initiate_invariants(void)5204 initiate_invariants (void)
5205 {
5206   invariants.create (100);
5207   invariants_pool
5208     = new object_allocator<lra_invariant> ("Inheritance invariants");
5209   invariant_table = htab_create (100, invariant_hash, invariant_eq_p, NULL);
5210 }
5211 
5212 /* Finish the invariant table.  */
5213 static void
finish_invariants(void)5214 finish_invariants (void)
5215 {
5216   htab_delete (invariant_table);
5217   delete invariants_pool;
5218   invariants.release ();
5219 }
5220 
5221 /* Make the invariant table empty.  */
5222 static void
clear_invariants(void)5223 clear_invariants (void)
5224 {
5225   htab_empty (invariant_table);
5226   invariants_pool->release ();
5227   invariants.truncate (0);
5228 }
5229 
5230 
5231 
5232 /* This page contains code to do inheritance/split
5233    transformations.  */
5234 
5235 /* Number of reloads passed so far in current EBB.  */
5236 static int reloads_num;
5237 
5238 /* Number of calls passed so far in current EBB.  */
5239 static int calls_num;
5240 
5241 /* Index ID is the CALLS_NUM associated the last call we saw with
5242    ABI identifier ID.  */
5243 static int last_call_for_abi[NUM_ABI_IDS];
5244 
5245 /* Which registers have been fully or partially clobbered by a call
5246    since they were last used.  */
5247 static HARD_REG_SET full_and_partial_call_clobbers;
5248 
5249 /* Current reload pseudo check for validity of elements in
5250    USAGE_INSNS.	 */
5251 static int curr_usage_insns_check;
5252 
5253 /* Info about last usage of registers in EBB to do inheritance/split
5254    transformation.  Inheritance transformation is done from a spilled
5255    pseudo and split transformations from a hard register or a pseudo
5256    assigned to a hard register.	 */
5257 struct usage_insns
5258 {
5259   /* If the value is equal to CURR_USAGE_INSNS_CHECK, then the member
5260      value INSNS is valid.  The insns is chain of optional debug insns
5261      and a finishing non-debug insn using the corresponding reg.  The
5262      value is also used to mark the registers which are set up in the
5263      current insn.  The negated insn uid is used for this.  */
5264   int check;
5265   /* Value of global reloads_num at the last insn in INSNS.  */
5266   int reloads_num;
5267   /* Value of global reloads_nums at the last insn in INSNS.  */
5268   int calls_num;
5269   /* It can be true only for splitting.	 And it means that the restore
5270      insn should be put after insn given by the following member.  */
5271   bool after_p;
5272   /* Next insns in the current EBB which use the original reg and the
5273      original reg value is not changed between the current insn and
5274      the next insns.  In order words, e.g. for inheritance, if we need
5275      to use the original reg value again in the next insns we can try
5276      to use the value in a hard register from a reload insn of the
5277      current insn.  */
5278   rtx insns;
5279 };
5280 
5281 /* Map: regno -> corresponding pseudo usage insns.  */
5282 static struct usage_insns *usage_insns;
5283 
5284 static void
setup_next_usage_insn(int regno,rtx insn,int reloads_num,bool after_p)5285 setup_next_usage_insn (int regno, rtx insn, int reloads_num, bool after_p)
5286 {
5287   usage_insns[regno].check = curr_usage_insns_check;
5288   usage_insns[regno].insns = insn;
5289   usage_insns[regno].reloads_num = reloads_num;
5290   usage_insns[regno].calls_num = calls_num;
5291   usage_insns[regno].after_p = after_p;
5292   if (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0)
5293     remove_from_hard_reg_set (&full_and_partial_call_clobbers,
5294 			      PSEUDO_REGNO_MODE (regno),
5295 			      reg_renumber[regno]);
5296 }
5297 
5298 /* The function is used to form list REGNO usages which consists of
5299    optional debug insns finished by a non-debug insn using REGNO.
5300    RELOADS_NUM is current number of reload insns processed so far.  */
5301 static void
add_next_usage_insn(int regno,rtx_insn * insn,int reloads_num)5302 add_next_usage_insn (int regno, rtx_insn *insn, int reloads_num)
5303 {
5304   rtx next_usage_insns;
5305 
5306   if (usage_insns[regno].check == curr_usage_insns_check
5307       && (next_usage_insns = usage_insns[regno].insns) != NULL_RTX
5308       && DEBUG_INSN_P (insn))
5309     {
5310       /* Check that we did not add the debug insn yet.	*/
5311       if (next_usage_insns != insn
5312 	  && (GET_CODE (next_usage_insns) != INSN_LIST
5313 	      || XEXP (next_usage_insns, 0) != insn))
5314 	usage_insns[regno].insns = gen_rtx_INSN_LIST (VOIDmode, insn,
5315 						      next_usage_insns);
5316     }
5317   else if (NONDEBUG_INSN_P (insn))
5318     setup_next_usage_insn (regno, insn, reloads_num, false);
5319   else
5320     usage_insns[regno].check = 0;
5321 }
5322 
5323 /* Return first non-debug insn in list USAGE_INSNS.  */
5324 static rtx_insn *
skip_usage_debug_insns(rtx usage_insns)5325 skip_usage_debug_insns (rtx usage_insns)
5326 {
5327   rtx insn;
5328 
5329   /* Skip debug insns.  */
5330   for (insn = usage_insns;
5331        insn != NULL_RTX && GET_CODE (insn) == INSN_LIST;
5332        insn = XEXP (insn, 1))
5333     ;
5334   return safe_as_a <rtx_insn *> (insn);
5335 }
5336 
5337 /* Return true if we need secondary memory moves for insn in
5338    USAGE_INSNS after inserting inherited pseudo of class INHER_CL
5339    into the insn.  */
5340 static bool
check_secondary_memory_needed_p(enum reg_class inher_cl ATTRIBUTE_UNUSED,rtx usage_insns ATTRIBUTE_UNUSED)5341 check_secondary_memory_needed_p (enum reg_class inher_cl ATTRIBUTE_UNUSED,
5342 				 rtx usage_insns ATTRIBUTE_UNUSED)
5343 {
5344   rtx_insn *insn;
5345   rtx set, dest;
5346   enum reg_class cl;
5347 
5348   if (inher_cl == ALL_REGS
5349       || (insn = skip_usage_debug_insns (usage_insns)) == NULL_RTX)
5350     return false;
5351   lra_assert (INSN_P (insn));
5352   if ((set = single_set (insn)) == NULL_RTX || ! REG_P (SET_DEST (set)))
5353     return false;
5354   dest = SET_DEST (set);
5355   if (! REG_P (dest))
5356     return false;
5357   lra_assert (inher_cl != NO_REGS);
5358   cl = get_reg_class (REGNO (dest));
5359   return (cl != NO_REGS && cl != ALL_REGS
5360 	  && targetm.secondary_memory_needed (GET_MODE (dest), inher_cl, cl));
5361 }
5362 
5363 /* Registers involved in inheritance/split in the current EBB
5364    (inheritance/split pseudos and original registers).	*/
5365 static bitmap_head check_only_regs;
5366 
5367 /* Reload pseudos cannot be involded in invariant inheritance in the
5368    current EBB.  */
5369 static bitmap_head invalid_invariant_regs;
5370 
5371 /* Do inheritance transformations for insn INSN, which defines (if
5372    DEF_P) or uses ORIGINAL_REGNO.  NEXT_USAGE_INSNS specifies which
5373    instruction in the EBB next uses ORIGINAL_REGNO; it has the same
5374    form as the "insns" field of usage_insns.  Return true if we
5375    succeed in such transformation.
5376 
5377    The transformations look like:
5378 
5379      p <- ...		  i <- ...
5380      ...		  p <- i    (new insn)
5381      ...	     =>
5382      <- ... p ...	  <- ... i ...
5383    or
5384      ...		  i <- p    (new insn)
5385      <- ... p ...	  <- ... i ...
5386      ...	     =>
5387      <- ... p ...	  <- ... i ...
5388    where p is a spilled original pseudo and i is a new inheritance pseudo.
5389 
5390 
5391    The inheritance pseudo has the smallest class of two classes CL and
5392    class of ORIGINAL REGNO.  */
5393 static bool
inherit_reload_reg(bool def_p,int original_regno,enum reg_class cl,rtx_insn * insn,rtx next_usage_insns)5394 inherit_reload_reg (bool def_p, int original_regno,
5395 		    enum reg_class cl, rtx_insn *insn, rtx next_usage_insns)
5396 {
5397   if (optimize_function_for_size_p (cfun))
5398     return false;
5399 
5400   enum reg_class rclass = lra_get_allocno_class (original_regno);
5401   rtx original_reg = regno_reg_rtx[original_regno];
5402   rtx new_reg, usage_insn;
5403   rtx_insn *new_insns;
5404 
5405   lra_assert (! usage_insns[original_regno].after_p);
5406   if (lra_dump_file != NULL)
5407     fprintf (lra_dump_file,
5408 	     "    <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n");
5409   if (! ira_reg_classes_intersect_p[cl][rclass])
5410     {
5411       if (lra_dump_file != NULL)
5412 	{
5413 	  fprintf (lra_dump_file,
5414 		   "    Rejecting inheritance for %d "
5415 		   "because of disjoint classes %s and %s\n",
5416 		   original_regno, reg_class_names[cl],
5417 		   reg_class_names[rclass]);
5418 	  fprintf (lra_dump_file,
5419 		   "    >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
5420 	}
5421       return false;
5422     }
5423   if ((ira_class_subset_p[cl][rclass] && cl != rclass)
5424       /* We don't use a subset of two classes because it can be
5425 	 NO_REGS.  This transformation is still profitable in most
5426 	 cases even if the classes are not intersected as register
5427 	 move is probably cheaper than a memory load.  */
5428       || ira_class_hard_regs_num[cl] < ira_class_hard_regs_num[rclass])
5429     {
5430       if (lra_dump_file != NULL)
5431 	fprintf (lra_dump_file, "    Use smallest class of %s and %s\n",
5432 		 reg_class_names[cl], reg_class_names[rclass]);
5433 
5434       rclass = cl;
5435     }
5436   if (check_secondary_memory_needed_p (rclass, next_usage_insns))
5437     {
5438       /* Reject inheritance resulting in secondary memory moves.
5439 	 Otherwise, there is a danger in LRA cycling.  Also such
5440 	 transformation will be unprofitable.  */
5441       if (lra_dump_file != NULL)
5442 	{
5443 	  rtx_insn *insn = skip_usage_debug_insns (next_usage_insns);
5444 	  rtx set = single_set (insn);
5445 
5446 	  lra_assert (set != NULL_RTX);
5447 
5448 	  rtx dest = SET_DEST (set);
5449 
5450 	  lra_assert (REG_P (dest));
5451 	  fprintf (lra_dump_file,
5452 		   "    Rejecting inheritance for insn %d(%s)<-%d(%s) "
5453 		   "as secondary mem is needed\n",
5454 		   REGNO (dest), reg_class_names[get_reg_class (REGNO (dest))],
5455 		   original_regno, reg_class_names[rclass]);
5456 	  fprintf (lra_dump_file,
5457 		   "    >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
5458 	}
5459       return false;
5460     }
5461   new_reg = lra_create_new_reg (GET_MODE (original_reg), original_reg,
5462 				rclass, "inheritance");
5463   start_sequence ();
5464   if (def_p)
5465     lra_emit_move (original_reg, new_reg);
5466   else
5467     lra_emit_move (new_reg, original_reg);
5468   new_insns = get_insns ();
5469   end_sequence ();
5470   if (NEXT_INSN (new_insns) != NULL_RTX)
5471     {
5472       if (lra_dump_file != NULL)
5473 	{
5474 	  fprintf (lra_dump_file,
5475 		   "    Rejecting inheritance %d->%d "
5476 		   "as it results in 2 or more insns:\n",
5477 		   original_regno, REGNO (new_reg));
5478 	  dump_rtl_slim (lra_dump_file, new_insns, NULL, -1, 0);
5479 	  fprintf (lra_dump_file,
5480 		   "	>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
5481 	}
5482       return false;
5483     }
5484   lra_substitute_pseudo_within_insn (insn, original_regno, new_reg, false);
5485   lra_update_insn_regno_info (insn);
5486   if (! def_p)
5487     /* We now have a new usage insn for original regno.  */
5488     setup_next_usage_insn (original_regno, new_insns, reloads_num, false);
5489   if (lra_dump_file != NULL)
5490     fprintf (lra_dump_file, "    Original reg change %d->%d (bb%d):\n",
5491 	     original_regno, REGNO (new_reg), BLOCK_FOR_INSN (insn)->index);
5492   lra_reg_info[REGNO (new_reg)].restore_rtx = regno_reg_rtx[original_regno];
5493   bitmap_set_bit (&check_only_regs, REGNO (new_reg));
5494   bitmap_set_bit (&check_only_regs, original_regno);
5495   bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg));
5496   if (def_p)
5497     lra_process_new_insns (insn, NULL, new_insns,
5498 			   "Add original<-inheritance");
5499   else
5500     lra_process_new_insns (insn, new_insns, NULL,
5501 			   "Add inheritance<-original");
5502   while (next_usage_insns != NULL_RTX)
5503     {
5504       if (GET_CODE (next_usage_insns) != INSN_LIST)
5505 	{
5506 	  usage_insn = next_usage_insns;
5507 	  lra_assert (NONDEBUG_INSN_P (usage_insn));
5508 	  next_usage_insns = NULL;
5509 	}
5510       else
5511 	{
5512 	  usage_insn = XEXP (next_usage_insns, 0);
5513 	  lra_assert (DEBUG_INSN_P (usage_insn));
5514 	  next_usage_insns = XEXP (next_usage_insns, 1);
5515 	}
5516       lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false,
5517 			     DEBUG_INSN_P (usage_insn));
5518       lra_update_insn_regno_info (as_a <rtx_insn *> (usage_insn));
5519       if (lra_dump_file != NULL)
5520 	{
5521 	  basic_block bb = BLOCK_FOR_INSN (usage_insn);
5522 	  fprintf (lra_dump_file,
5523 		   "    Inheritance reuse change %d->%d (bb%d):\n",
5524 		   original_regno, REGNO (new_reg),
5525 		   bb ? bb->index : -1);
5526 	  dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (usage_insn));
5527 	}
5528     }
5529   if (lra_dump_file != NULL)
5530     fprintf (lra_dump_file,
5531 	     "	  >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
5532   return true;
5533 }
5534 
5535 /* Return true if we need a caller save/restore for pseudo REGNO which
5536    was assigned to a hard register.  */
5537 static inline bool
need_for_call_save_p(int regno)5538 need_for_call_save_p (int regno)
5539 {
5540   lra_assert (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0);
5541   if (usage_insns[regno].calls_num < calls_num)
5542     {
5543       unsigned int abis = 0;
5544       for (unsigned int i = 0; i < NUM_ABI_IDS; ++i)
5545 	if (last_call_for_abi[i] > usage_insns[regno].calls_num)
5546 	  abis |= 1 << i;
5547       gcc_assert (abis);
5548       if (call_clobbered_in_region_p (abis, full_and_partial_call_clobbers,
5549 				      PSEUDO_REGNO_MODE (regno),
5550 				      reg_renumber[regno]))
5551 	return true;
5552     }
5553   return false;
5554 }
5555 
5556 /* Global registers occurring in the current EBB.  */
5557 static bitmap_head ebb_global_regs;
5558 
5559 /* Return true if we need a split for hard register REGNO or pseudo
5560    REGNO which was assigned to a hard register.
5561    POTENTIAL_RELOAD_HARD_REGS contains hard registers which might be
5562    used for reloads since the EBB end.	It is an approximation of the
5563    used hard registers in the split range.  The exact value would
5564    require expensive calculations.  If we were aggressive with
5565    splitting because of the approximation, the split pseudo will save
5566    the same hard register assignment and will be removed in the undo
5567    pass.  We still need the approximation because too aggressive
5568    splitting would result in too inaccurate cost calculation in the
5569    assignment pass because of too many generated moves which will be
5570    probably removed in the undo pass.  */
5571 static inline bool
need_for_split_p(HARD_REG_SET potential_reload_hard_regs,int regno)5572 need_for_split_p (HARD_REG_SET potential_reload_hard_regs, int regno)
5573 {
5574   int hard_regno = regno < FIRST_PSEUDO_REGISTER ? regno : reg_renumber[regno];
5575 
5576   lra_assert (hard_regno >= 0);
5577   return ((TEST_HARD_REG_BIT (potential_reload_hard_regs, hard_regno)
5578 	   /* Don't split eliminable hard registers, otherwise we can
5579 	      split hard registers like hard frame pointer, which
5580 	      lives on BB start/end according to DF-infrastructure,
5581 	      when there is a pseudo assigned to the register and
5582 	      living in the same BB.  */
5583 	   && (regno >= FIRST_PSEUDO_REGISTER
5584 	       || ! TEST_HARD_REG_BIT (eliminable_regset, hard_regno))
5585 	   && ! TEST_HARD_REG_BIT (lra_no_alloc_regs, hard_regno)
5586 	   /* Don't split call clobbered hard regs living through
5587 	      calls, otherwise we might have a check problem in the
5588 	      assign sub-pass as in the most cases (exception is a
5589 	      situation when check_and_force_assignment_correctness_p value is
5590 	      true) the assign pass assumes that all pseudos living
5591 	      through calls are assigned to call saved hard regs.  */
5592 	   && (regno >= FIRST_PSEUDO_REGISTER
5593 	       || !TEST_HARD_REG_BIT (full_and_partial_call_clobbers, regno))
5594 	   /* We need at least 2 reloads to make pseudo splitting
5595 	      profitable.  We should provide hard regno splitting in
5596 	      any case to solve 1st insn scheduling problem when
5597 	      moving hard register definition up might result in
5598 	      impossibility to find hard register for reload pseudo of
5599 	      small register class.  */
5600 	   && (usage_insns[regno].reloads_num
5601 	       + (regno < FIRST_PSEUDO_REGISTER ? 0 : 3) < reloads_num)
5602 	   && (regno < FIRST_PSEUDO_REGISTER
5603 	       /* For short living pseudos, spilling + inheritance can
5604 		  be considered a substitution for splitting.
5605 		  Therefore we do not splitting for local pseudos.  It
5606 		  decreases also aggressiveness of splitting.  The
5607 		  minimal number of references is chosen taking into
5608 		  account that for 2 references splitting has no sense
5609 		  as we can just spill the pseudo.  */
5610 	       || (regno >= FIRST_PSEUDO_REGISTER
5611 		   && lra_reg_info[regno].nrefs > 3
5612 		   && bitmap_bit_p (&ebb_global_regs, regno))))
5613 	  || (regno >= FIRST_PSEUDO_REGISTER && need_for_call_save_p (regno)));
5614 }
5615 
5616 /* Return class for the split pseudo created from original pseudo with
5617    ALLOCNO_CLASS and MODE which got a hard register HARD_REGNO.	 We
5618    choose subclass of ALLOCNO_CLASS which contains HARD_REGNO and
5619    results in no secondary memory movements.  */
5620 static enum reg_class
choose_split_class(enum reg_class allocno_class,int hard_regno ATTRIBUTE_UNUSED,machine_mode mode ATTRIBUTE_UNUSED)5621 choose_split_class (enum reg_class allocno_class,
5622 		    int hard_regno ATTRIBUTE_UNUSED,
5623 		    machine_mode mode ATTRIBUTE_UNUSED)
5624 {
5625   int i;
5626   enum reg_class cl, best_cl = NO_REGS;
5627   enum reg_class hard_reg_class ATTRIBUTE_UNUSED
5628     = REGNO_REG_CLASS (hard_regno);
5629 
5630   if (! targetm.secondary_memory_needed (mode, allocno_class, allocno_class)
5631       && TEST_HARD_REG_BIT (reg_class_contents[allocno_class], hard_regno))
5632     return allocno_class;
5633   for (i = 0;
5634        (cl = reg_class_subclasses[allocno_class][i]) != LIM_REG_CLASSES;
5635        i++)
5636     if (! targetm.secondary_memory_needed (mode, cl, hard_reg_class)
5637 	&& ! targetm.secondary_memory_needed (mode, hard_reg_class, cl)
5638 	&& TEST_HARD_REG_BIT (reg_class_contents[cl], hard_regno)
5639 	&& (best_cl == NO_REGS
5640 	    || ira_class_hard_regs_num[best_cl] < ira_class_hard_regs_num[cl]))
5641       best_cl = cl;
5642   return best_cl;
5643 }
5644 
5645 /* Copy any equivalence information from ORIGINAL_REGNO to NEW_REGNO.
5646    It only makes sense to call this function if NEW_REGNO is always
5647    equal to ORIGINAL_REGNO.  */
5648 
5649 static void
lra_copy_reg_equiv(unsigned int new_regno,unsigned int original_regno)5650 lra_copy_reg_equiv (unsigned int new_regno, unsigned int original_regno)
5651 {
5652   if (!ira_reg_equiv[original_regno].defined_p)
5653     return;
5654 
5655   ira_expand_reg_equiv ();
5656   ira_reg_equiv[new_regno].defined_p = true;
5657   if (ira_reg_equiv[original_regno].memory)
5658     ira_reg_equiv[new_regno].memory
5659       = copy_rtx (ira_reg_equiv[original_regno].memory);
5660   if (ira_reg_equiv[original_regno].constant)
5661     ira_reg_equiv[new_regno].constant
5662       = copy_rtx (ira_reg_equiv[original_regno].constant);
5663   if (ira_reg_equiv[original_regno].invariant)
5664     ira_reg_equiv[new_regno].invariant
5665       = copy_rtx (ira_reg_equiv[original_regno].invariant);
5666 }
5667 
5668 /* Do split transformations for insn INSN, which defines or uses
5669    ORIGINAL_REGNO.  NEXT_USAGE_INSNS specifies which instruction in
5670    the EBB next uses ORIGINAL_REGNO; it has the same form as the
5671    "insns" field of usage_insns.  If TO is not NULL, we don't use
5672    usage_insns, we put restore insns after TO insn.  It is a case when
5673    we call it from lra_split_hard_reg_for, outside the inheritance
5674    pass.
5675 
5676    The transformations look like:
5677 
5678      p <- ...		  p <- ...
5679      ...		  s <- p    (new insn -- save)
5680      ...	     =>
5681      ...		  p <- s    (new insn -- restore)
5682      <- ... p ...	  <- ... p ...
5683    or
5684      <- ... p ...	  <- ... p ...
5685      ...		  s <- p    (new insn -- save)
5686      ...	     =>
5687      ...		  p <- s    (new insn -- restore)
5688      <- ... p ...	  <- ... p ...
5689 
5690    where p is an original pseudo got a hard register or a hard
5691    register and s is a new split pseudo.  The save is put before INSN
5692    if BEFORE_P is true.	 Return true if we succeed in such
5693    transformation.  */
5694 static bool
split_reg(bool before_p,int original_regno,rtx_insn * insn,rtx next_usage_insns,rtx_insn * to)5695 split_reg (bool before_p, int original_regno, rtx_insn *insn,
5696 	   rtx next_usage_insns, rtx_insn *to)
5697 {
5698   enum reg_class rclass;
5699   rtx original_reg;
5700   int hard_regno, nregs;
5701   rtx new_reg, usage_insn;
5702   rtx_insn *restore, *save;
5703   bool after_p;
5704   bool call_save_p;
5705   machine_mode mode;
5706 
5707   if (original_regno < FIRST_PSEUDO_REGISTER)
5708     {
5709       rclass = ira_allocno_class_translate[REGNO_REG_CLASS (original_regno)];
5710       hard_regno = original_regno;
5711       call_save_p = false;
5712       nregs = 1;
5713       mode = lra_reg_info[hard_regno].biggest_mode;
5714       machine_mode reg_rtx_mode = GET_MODE (regno_reg_rtx[hard_regno]);
5715       /* A reg can have a biggest_mode of VOIDmode if it was only ever seen
5716 	 as part of a multi-word register.  In that case, or if the biggest
5717 	 mode was larger than a register, just use the reg_rtx.  Otherwise,
5718 	 limit the size to that of the biggest access in the function.  */
5719       if (mode == VOIDmode
5720 	  || paradoxical_subreg_p (mode, reg_rtx_mode))
5721 	{
5722 	  original_reg = regno_reg_rtx[hard_regno];
5723 	  mode = reg_rtx_mode;
5724 	}
5725       else
5726 	original_reg = gen_rtx_REG (mode, hard_regno);
5727     }
5728   else
5729     {
5730       mode = PSEUDO_REGNO_MODE (original_regno);
5731       hard_regno = reg_renumber[original_regno];
5732       nregs = hard_regno_nregs (hard_regno, mode);
5733       rclass = lra_get_allocno_class (original_regno);
5734       original_reg = regno_reg_rtx[original_regno];
5735       call_save_p = need_for_call_save_p (original_regno);
5736     }
5737   lra_assert (hard_regno >= 0);
5738   if (lra_dump_file != NULL)
5739     fprintf (lra_dump_file,
5740 	     "	  ((((((((((((((((((((((((((((((((((((((((((((((((\n");
5741 
5742   if (call_save_p)
5743     {
5744       mode = HARD_REGNO_CALLER_SAVE_MODE (hard_regno,
5745 					  hard_regno_nregs (hard_regno, mode),
5746 					  mode);
5747       new_reg = lra_create_new_reg (mode, NULL_RTX, NO_REGS, "save");
5748     }
5749   else
5750     {
5751       rclass = choose_split_class (rclass, hard_regno, mode);
5752       if (rclass == NO_REGS)
5753 	{
5754 	  if (lra_dump_file != NULL)
5755 	    {
5756 	      fprintf (lra_dump_file,
5757 		       "    Rejecting split of %d(%s): "
5758 		       "no good reg class for %d(%s)\n",
5759 		       original_regno,
5760 		       reg_class_names[lra_get_allocno_class (original_regno)],
5761 		       hard_regno,
5762 		       reg_class_names[REGNO_REG_CLASS (hard_regno)]);
5763 	      fprintf
5764 		(lra_dump_file,
5765 		 "    ))))))))))))))))))))))))))))))))))))))))))))))))\n");
5766 	    }
5767 	  return false;
5768 	}
5769       /* Split_if_necessary can split hard registers used as part of a
5770 	 multi-register mode but splits each register individually.  The
5771 	 mode used for each independent register may not be supported
5772 	 so reject the split.  Splitting the wider mode should theoretically
5773 	 be possible but is not implemented.  */
5774       if (!targetm.hard_regno_mode_ok (hard_regno, mode))
5775 	{
5776 	  if (lra_dump_file != NULL)
5777 	    {
5778 	      fprintf (lra_dump_file,
5779 		       "    Rejecting split of %d(%s): unsuitable mode %s\n",
5780 		       original_regno,
5781 		       reg_class_names[lra_get_allocno_class (original_regno)],
5782 		       GET_MODE_NAME (mode));
5783 	      fprintf
5784 		(lra_dump_file,
5785 		 "    ))))))))))))))))))))))))))))))))))))))))))))))))\n");
5786 	    }
5787 	  return false;
5788 	}
5789       new_reg = lra_create_new_reg (mode, original_reg, rclass, "split");
5790       reg_renumber[REGNO (new_reg)] = hard_regno;
5791     }
5792   int new_regno = REGNO (new_reg);
5793   save = emit_spill_move (true, new_reg, original_reg);
5794   if (NEXT_INSN (save) != NULL_RTX && !call_save_p)
5795     {
5796       if (lra_dump_file != NULL)
5797 	{
5798 	  fprintf
5799 	    (lra_dump_file,
5800 	     "	  Rejecting split %d->%d resulting in > 2 save insns:\n",
5801 	     original_regno, new_regno);
5802 	  dump_rtl_slim (lra_dump_file, save, NULL, -1, 0);
5803 	  fprintf (lra_dump_file,
5804 		   "	))))))))))))))))))))))))))))))))))))))))))))))))\n");
5805 	}
5806       return false;
5807     }
5808   restore = emit_spill_move (false, new_reg, original_reg);
5809   if (NEXT_INSN (restore) != NULL_RTX && !call_save_p)
5810     {
5811       if (lra_dump_file != NULL)
5812 	{
5813 	  fprintf (lra_dump_file,
5814 		   "	Rejecting split %d->%d "
5815 		   "resulting in > 2 restore insns:\n",
5816 		   original_regno, new_regno);
5817 	  dump_rtl_slim (lra_dump_file, restore, NULL, -1, 0);
5818 	  fprintf (lra_dump_file,
5819 		   "	))))))))))))))))))))))))))))))))))))))))))))))))\n");
5820 	}
5821       return false;
5822     }
5823   /* Transfer equivalence information to the spill register, so that
5824      if we fail to allocate the spill register, we have the option of
5825      rematerializing the original value instead of spilling to the stack.  */
5826   if (!HARD_REGISTER_NUM_P (original_regno)
5827       && mode == PSEUDO_REGNO_MODE (original_regno))
5828     lra_copy_reg_equiv (new_regno, original_regno);
5829   lra_reg_info[new_regno].restore_rtx = regno_reg_rtx[original_regno];
5830   bitmap_set_bit (&lra_split_regs, new_regno);
5831   if (to != NULL)
5832     {
5833       lra_assert (next_usage_insns == NULL);
5834       usage_insn = to;
5835       after_p = TRUE;
5836     }
5837   else
5838     {
5839       /* We need check_only_regs only inside the inheritance pass.  */
5840       bitmap_set_bit (&check_only_regs, new_regno);
5841       bitmap_set_bit (&check_only_regs, original_regno);
5842       after_p = usage_insns[original_regno].after_p;
5843       for (;;)
5844 	{
5845 	  if (GET_CODE (next_usage_insns) != INSN_LIST)
5846 	    {
5847 	      usage_insn = next_usage_insns;
5848 	      break;
5849 	    }
5850 	  usage_insn = XEXP (next_usage_insns, 0);
5851 	  lra_assert (DEBUG_INSN_P (usage_insn));
5852 	  next_usage_insns = XEXP (next_usage_insns, 1);
5853 	  lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false,
5854 				 true);
5855 	  lra_update_insn_regno_info (as_a <rtx_insn *> (usage_insn));
5856 	  if (lra_dump_file != NULL)
5857 	    {
5858 	      fprintf (lra_dump_file, "    Split reuse change %d->%d:\n",
5859 		       original_regno, new_regno);
5860 	      dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (usage_insn));
5861 	    }
5862 	}
5863     }
5864   lra_assert (NOTE_P (usage_insn) || NONDEBUG_INSN_P (usage_insn));
5865   lra_assert (usage_insn != insn || (after_p && before_p));
5866   lra_process_new_insns (as_a <rtx_insn *> (usage_insn),
5867 			 after_p ? NULL : restore,
5868 			 after_p ? restore : NULL,
5869 			 call_save_p
5870 			 ?  "Add reg<-save" : "Add reg<-split");
5871   lra_process_new_insns (insn, before_p ? save : NULL,
5872 			 before_p ? NULL : save,
5873 			 call_save_p
5874 			 ?  "Add save<-reg" : "Add split<-reg");
5875   if (nregs > 1)
5876     /* If we are trying to split multi-register.  We should check
5877        conflicts on the next assignment sub-pass.  IRA can allocate on
5878        sub-register levels, LRA do this on pseudos level right now and
5879        this discrepancy may create allocation conflicts after
5880        splitting.  */
5881     check_and_force_assignment_correctness_p = true;
5882   if (lra_dump_file != NULL)
5883     fprintf (lra_dump_file,
5884 	     "	  ))))))))))))))))))))))))))))))))))))))))))))))))\n");
5885   return true;
5886 }
5887 
5888 /* Split a hard reg for reload pseudo REGNO having RCLASS and living
5889    in the range [FROM, TO].  Return true if did a split.  Otherwise,
5890    return false.  */
5891 bool
spill_hard_reg_in_range(int regno,enum reg_class rclass,rtx_insn * from,rtx_insn * to)5892 spill_hard_reg_in_range (int regno, enum reg_class rclass, rtx_insn *from, rtx_insn *to)
5893 {
5894   int i, hard_regno;
5895   int rclass_size;
5896   rtx_insn *insn;
5897   unsigned int uid;
5898   bitmap_iterator bi;
5899   HARD_REG_SET ignore;
5900 
5901   lra_assert (from != NULL && to != NULL);
5902   CLEAR_HARD_REG_SET (ignore);
5903   EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)
5904     {
5905       lra_insn_recog_data_t id = lra_insn_recog_data[uid];
5906       struct lra_static_insn_data *static_id = id->insn_static_data;
5907       struct lra_insn_reg *reg;
5908 
5909       for (reg = id->regs; reg != NULL; reg = reg->next)
5910 	if (reg->regno < FIRST_PSEUDO_REGISTER)
5911 	  SET_HARD_REG_BIT (ignore, reg->regno);
5912       for (reg = static_id->hard_regs; reg != NULL; reg = reg->next)
5913 	SET_HARD_REG_BIT (ignore, reg->regno);
5914     }
5915   rclass_size = ira_class_hard_regs_num[rclass];
5916   for (i = 0; i < rclass_size; i++)
5917     {
5918       hard_regno = ira_class_hard_regs[rclass][i];
5919       if (! TEST_HARD_REG_BIT (lra_reg_info[regno].conflict_hard_regs, hard_regno)
5920 	  || TEST_HARD_REG_BIT (ignore, hard_regno))
5921 	continue;
5922       for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn))
5923 	{
5924 	  struct lra_static_insn_data *static_id;
5925 	  struct lra_insn_reg *reg;
5926 
5927 	  if (!INSN_P (insn))
5928 	      continue;
5929 	  if (bitmap_bit_p (&lra_reg_info[hard_regno].insn_bitmap,
5930 			    INSN_UID (insn)))
5931 	    break;
5932 	  static_id = lra_get_insn_recog_data (insn)->insn_static_data;
5933 	  for (reg = static_id->hard_regs; reg != NULL; reg = reg->next)
5934 	    if (reg->regno == hard_regno)
5935 	      break;
5936 	  if (reg != NULL)
5937 	    break;
5938 	}
5939       if (insn != NEXT_INSN (to))
5940 	continue;
5941       if (split_reg (TRUE, hard_regno, from, NULL, to))
5942 	return true;
5943     }
5944   return false;
5945 }
5946 
5947 /* Recognize that we need a split transformation for insn INSN, which
5948    defines or uses REGNO in its insn biggest MODE (we use it only if
5949    REGNO is a hard register).  POTENTIAL_RELOAD_HARD_REGS contains
5950    hard registers which might be used for reloads since the EBB end.
5951    Put the save before INSN if BEFORE_P is true.  MAX_UID is maximla
5952    uid before starting INSN processing.  Return true if we succeed in
5953    such transformation.  */
5954 static bool
split_if_necessary(int regno,machine_mode mode,HARD_REG_SET potential_reload_hard_regs,bool before_p,rtx_insn * insn,int max_uid)5955 split_if_necessary (int regno, machine_mode mode,
5956 		    HARD_REG_SET potential_reload_hard_regs,
5957 		    bool before_p, rtx_insn *insn, int max_uid)
5958 {
5959   bool res = false;
5960   int i, nregs = 1;
5961   rtx next_usage_insns;
5962 
5963   if (regno < FIRST_PSEUDO_REGISTER)
5964     nregs = hard_regno_nregs (regno, mode);
5965   for (i = 0; i < nregs; i++)
5966     if (usage_insns[regno + i].check == curr_usage_insns_check
5967 	&& (next_usage_insns = usage_insns[regno + i].insns) != NULL_RTX
5968 	/* To avoid processing the register twice or more.  */
5969 	&& ((GET_CODE (next_usage_insns) != INSN_LIST
5970 	     && INSN_UID (next_usage_insns) < max_uid)
5971 	    || (GET_CODE (next_usage_insns) == INSN_LIST
5972 		&& (INSN_UID (XEXP (next_usage_insns, 0)) < max_uid)))
5973 	&& need_for_split_p (potential_reload_hard_regs, regno + i)
5974 	&& split_reg (before_p, regno + i, insn, next_usage_insns, NULL))
5975     res = true;
5976   return res;
5977 }
5978 
5979 /* Return TRUE if rtx X is considered as an invariant for
5980    inheritance.  */
5981 static bool
invariant_p(const_rtx x)5982 invariant_p (const_rtx x)
5983 {
5984   machine_mode mode;
5985   const char *fmt;
5986   enum rtx_code code;
5987   int i, j;
5988 
5989   if (side_effects_p (x))
5990     return false;
5991 
5992   code = GET_CODE (x);
5993   mode = GET_MODE (x);
5994   if (code == SUBREG)
5995     {
5996       x = SUBREG_REG (x);
5997       code = GET_CODE (x);
5998       mode = wider_subreg_mode (mode, GET_MODE (x));
5999     }
6000 
6001   if (MEM_P (x))
6002     return false;
6003 
6004   if (REG_P (x))
6005     {
6006       int i, nregs, regno = REGNO (x);
6007 
6008       if (regno >= FIRST_PSEUDO_REGISTER || regno == STACK_POINTER_REGNUM
6009 	  || TEST_HARD_REG_BIT (eliminable_regset, regno)
6010 	  || GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
6011 	return false;
6012       nregs = hard_regno_nregs (regno, mode);
6013       for (i = 0; i < nregs; i++)
6014 	if (! fixed_regs[regno + i]
6015 	    /* A hard register may be clobbered in the current insn
6016 	       but we can ignore this case because if the hard
6017 	       register is used it should be set somewhere after the
6018 	       clobber.  */
6019 	    || bitmap_bit_p (&invalid_invariant_regs, regno + i))
6020 	  return false;
6021     }
6022   fmt = GET_RTX_FORMAT (code);
6023   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6024     {
6025       if (fmt[i] == 'e')
6026 	{
6027 	  if (! invariant_p (XEXP (x, i)))
6028 	    return false;
6029 	}
6030       else if (fmt[i] == 'E')
6031 	{
6032 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6033 	    if (! invariant_p (XVECEXP (x, i, j)))
6034 	      return false;
6035 	}
6036     }
6037   return true;
6038 }
6039 
6040 /* We have 'dest_reg <- invariant'.  Let us try to make an invariant
6041    inheritance transformation (using dest_reg instead invariant in a
6042    subsequent insn).  */
6043 static bool
process_invariant_for_inheritance(rtx dst_reg,rtx invariant_rtx)6044 process_invariant_for_inheritance (rtx dst_reg, rtx invariant_rtx)
6045 {
6046   invariant_ptr_t invariant_ptr;
6047   rtx_insn *insn, *new_insns;
6048   rtx insn_set, insn_reg, new_reg;
6049   int insn_regno;
6050   bool succ_p = false;
6051   int dst_regno = REGNO (dst_reg);
6052   machine_mode dst_mode = GET_MODE (dst_reg);
6053   enum reg_class cl = lra_get_allocno_class (dst_regno), insn_reg_cl;
6054 
6055   invariant_ptr = insert_invariant (invariant_rtx);
6056   if ((insn = invariant_ptr->insn) != NULL_RTX)
6057     {
6058       /* We have a subsequent insn using the invariant.  */
6059       insn_set = single_set (insn);
6060       lra_assert (insn_set != NULL);
6061       insn_reg = SET_DEST (insn_set);
6062       lra_assert (REG_P (insn_reg));
6063       insn_regno = REGNO (insn_reg);
6064       insn_reg_cl = lra_get_allocno_class (insn_regno);
6065 
6066       if (dst_mode == GET_MODE (insn_reg)
6067 	  /* We should consider only result move reg insns which are
6068 	     cheap.  */
6069 	  && targetm.register_move_cost (dst_mode, cl, insn_reg_cl) == 2
6070 	  && targetm.register_move_cost (dst_mode, cl, cl) == 2)
6071 	{
6072 	  if (lra_dump_file != NULL)
6073 	    fprintf (lra_dump_file,
6074 		     "    [[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[[\n");
6075 	  new_reg = lra_create_new_reg (dst_mode, dst_reg,
6076 					cl, "invariant inheritance");
6077 	  bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg));
6078 	  bitmap_set_bit (&check_only_regs, REGNO (new_reg));
6079 	  lra_reg_info[REGNO (new_reg)].restore_rtx = PATTERN (insn);
6080 	  start_sequence ();
6081 	  lra_emit_move (new_reg, dst_reg);
6082 	  new_insns = get_insns ();
6083 	  end_sequence ();
6084 	  lra_process_new_insns (curr_insn, NULL, new_insns,
6085 				 "Add invariant inheritance<-original");
6086 	  start_sequence ();
6087 	  lra_emit_move (SET_DEST (insn_set), new_reg);
6088 	  new_insns = get_insns ();
6089 	  end_sequence ();
6090 	  lra_process_new_insns (insn, NULL, new_insns,
6091 				 "Changing reload<-inheritance");
6092 	  lra_set_insn_deleted (insn);
6093 	  succ_p = true;
6094 	  if (lra_dump_file != NULL)
6095 	    {
6096 	      fprintf (lra_dump_file,
6097 		       "    Invariant inheritance reuse change %d (bb%d):\n",
6098 		       REGNO (new_reg), BLOCK_FOR_INSN (insn)->index);
6099 	      dump_insn_slim (lra_dump_file, insn);
6100 	      fprintf (lra_dump_file,
6101 		       "	  ]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]]\n");
6102 	    }
6103 	}
6104     }
6105   invariant_ptr->insn = curr_insn;
6106   return succ_p;
6107 }
6108 
6109 /* Check only registers living at the current program point in the
6110    current EBB.	 */
6111 static bitmap_head live_regs;
6112 
6113 /* Update live info in EBB given by its HEAD and TAIL insns after
6114    inheritance/split transformation.  The function removes dead moves
6115    too.	 */
6116 static void
update_ebb_live_info(rtx_insn * head,rtx_insn * tail)6117 update_ebb_live_info (rtx_insn *head, rtx_insn *tail)
6118 {
6119   unsigned int j;
6120   int i, regno;
6121   bool live_p;
6122   rtx_insn *prev_insn;
6123   rtx set;
6124   bool remove_p;
6125   basic_block last_bb, prev_bb, curr_bb;
6126   bitmap_iterator bi;
6127   struct lra_insn_reg *reg;
6128   edge e;
6129   edge_iterator ei;
6130 
6131   last_bb = BLOCK_FOR_INSN (tail);
6132   prev_bb = NULL;
6133   for (curr_insn = tail;
6134        curr_insn != PREV_INSN (head);
6135        curr_insn = prev_insn)
6136     {
6137       prev_insn = PREV_INSN (curr_insn);
6138       /* We need to process empty blocks too.  They contain
6139 	 NOTE_INSN_BASIC_BLOCK referring for the basic block.  */
6140       if (NOTE_P (curr_insn) && NOTE_KIND (curr_insn) != NOTE_INSN_BASIC_BLOCK)
6141 	continue;
6142       curr_bb = BLOCK_FOR_INSN (curr_insn);
6143       if (curr_bb != prev_bb)
6144 	{
6145 	  if (prev_bb != NULL)
6146 	    {
6147 	      /* Update df_get_live_in (prev_bb):  */
6148 	      EXECUTE_IF_SET_IN_BITMAP (&check_only_regs, 0, j, bi)
6149 		if (bitmap_bit_p (&live_regs, j))
6150 		  bitmap_set_bit (df_get_live_in (prev_bb), j);
6151 		else
6152 		  bitmap_clear_bit (df_get_live_in (prev_bb), j);
6153 	    }
6154 	  if (curr_bb != last_bb)
6155 	    {
6156 	      /* Update df_get_live_out (curr_bb):  */
6157 	      EXECUTE_IF_SET_IN_BITMAP (&check_only_regs, 0, j, bi)
6158 		{
6159 		  live_p = bitmap_bit_p (&live_regs, j);
6160 		  if (! live_p)
6161 		    FOR_EACH_EDGE (e, ei, curr_bb->succs)
6162 		      if (bitmap_bit_p (df_get_live_in (e->dest), j))
6163 			{
6164 			  live_p = true;
6165 			  break;
6166 			}
6167 		  if (live_p)
6168 		    bitmap_set_bit (df_get_live_out (curr_bb), j);
6169 		  else
6170 		    bitmap_clear_bit (df_get_live_out (curr_bb), j);
6171 		}
6172 	    }
6173 	  prev_bb = curr_bb;
6174 	  bitmap_and (&live_regs, &check_only_regs, df_get_live_out (curr_bb));
6175 	}
6176       if (! NONDEBUG_INSN_P (curr_insn))
6177 	continue;
6178       curr_id = lra_get_insn_recog_data (curr_insn);
6179       curr_static_id = curr_id->insn_static_data;
6180       remove_p = false;
6181       if ((set = single_set (curr_insn)) != NULL_RTX
6182 	  && REG_P (SET_DEST (set))
6183 	  && (regno = REGNO (SET_DEST (set))) >= FIRST_PSEUDO_REGISTER
6184 	  && SET_DEST (set) != pic_offset_table_rtx
6185 	  && bitmap_bit_p (&check_only_regs, regno)
6186 	  && ! bitmap_bit_p (&live_regs, regno))
6187 	remove_p = true;
6188       /* See which defined values die here.  */
6189       for (reg = curr_id->regs; reg != NULL; reg = reg->next)
6190 	if (reg->type == OP_OUT && ! reg->subreg_p)
6191 	  bitmap_clear_bit (&live_regs, reg->regno);
6192       for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next)
6193 	if (reg->type == OP_OUT && ! reg->subreg_p)
6194 	  bitmap_clear_bit (&live_regs, reg->regno);
6195       if (curr_id->arg_hard_regs != NULL)
6196 	/* Make clobbered argument hard registers die.  */
6197 	for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
6198 	  if (regno >= FIRST_PSEUDO_REGISTER)
6199 	    bitmap_clear_bit (&live_regs, regno - FIRST_PSEUDO_REGISTER);
6200       /* Mark each used value as live.  */
6201       for (reg = curr_id->regs; reg != NULL; reg = reg->next)
6202 	if (reg->type != OP_OUT
6203 	    && bitmap_bit_p (&check_only_regs, reg->regno))
6204 	  bitmap_set_bit (&live_regs, reg->regno);
6205       for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next)
6206 	if (reg->type != OP_OUT
6207 	    && bitmap_bit_p (&check_only_regs, reg->regno))
6208 	  bitmap_set_bit (&live_regs, reg->regno);
6209       if (curr_id->arg_hard_regs != NULL)
6210 	/* Make used argument hard registers live.  */
6211 	for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
6212 	  if (regno < FIRST_PSEUDO_REGISTER
6213 	      && bitmap_bit_p (&check_only_regs, regno))
6214 	    bitmap_set_bit (&live_regs, regno);
6215       /* It is quite important to remove dead move insns because it
6216 	 means removing dead store.  We don't need to process them for
6217 	 constraints.  */
6218       if (remove_p)
6219 	{
6220 	  if (lra_dump_file != NULL)
6221 	    {
6222 	      fprintf (lra_dump_file, "	    Removing dead insn:\n ");
6223 	      dump_insn_slim (lra_dump_file, curr_insn);
6224 	    }
6225 	  lra_set_insn_deleted (curr_insn);
6226 	}
6227     }
6228 }
6229 
6230 /* The structure describes info to do an inheritance for the current
6231    insn.  We need to collect such info first before doing the
6232    transformations because the transformations change the insn
6233    internal representation.  */
6234 struct to_inherit
6235 {
6236   /* Original regno.  */
6237   int regno;
6238   /* Subsequent insns which can inherit original reg value.  */
6239   rtx insns;
6240 };
6241 
6242 /* Array containing all info for doing inheritance from the current
6243    insn.  */
6244 static struct to_inherit to_inherit[LRA_MAX_INSN_RELOADS];
6245 
6246 /* Number elements in the previous array.  */
6247 static int to_inherit_num;
6248 
6249 /* Add inheritance info REGNO and INSNS. Their meaning is described in
6250    structure to_inherit.  */
6251 static void
add_to_inherit(int regno,rtx insns)6252 add_to_inherit (int regno, rtx insns)
6253 {
6254   int i;
6255 
6256   for (i = 0; i < to_inherit_num; i++)
6257     if (to_inherit[i].regno == regno)
6258       return;
6259   lra_assert (to_inherit_num < LRA_MAX_INSN_RELOADS);
6260   to_inherit[to_inherit_num].regno = regno;
6261   to_inherit[to_inherit_num++].insns = insns;
6262 }
6263 
6264 /* Return the last non-debug insn in basic block BB, or the block begin
6265    note if none.  */
6266 static rtx_insn *
get_last_insertion_point(basic_block bb)6267 get_last_insertion_point (basic_block bb)
6268 {
6269   rtx_insn *insn;
6270 
6271   FOR_BB_INSNS_REVERSE (bb, insn)
6272     if (NONDEBUG_INSN_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn))
6273       return insn;
6274   gcc_unreachable ();
6275 }
6276 
6277 /* Set up RES by registers living on edges FROM except the edge (FROM,
6278    TO) or by registers set up in a jump insn in BB FROM.  */
6279 static void
get_live_on_other_edges(basic_block from,basic_block to,bitmap res)6280 get_live_on_other_edges (basic_block from, basic_block to, bitmap res)
6281 {
6282   rtx_insn *last;
6283   struct lra_insn_reg *reg;
6284   edge e;
6285   edge_iterator ei;
6286 
6287   lra_assert (to != NULL);
6288   bitmap_clear (res);
6289   FOR_EACH_EDGE (e, ei, from->succs)
6290     if (e->dest != to)
6291       bitmap_ior_into (res, df_get_live_in (e->dest));
6292   last = get_last_insertion_point (from);
6293   if (! JUMP_P (last))
6294     return;
6295   curr_id = lra_get_insn_recog_data (last);
6296   for (reg = curr_id->regs; reg != NULL; reg = reg->next)
6297     if (reg->type != OP_IN)
6298       bitmap_set_bit (res, reg->regno);
6299 }
6300 
6301 /* Used as a temporary results of some bitmap calculations.  */
6302 static bitmap_head temp_bitmap;
6303 
6304 /* We split for reloads of small class of hard regs.  The following
6305    defines how many hard regs the class should have to be qualified as
6306    small.  The code is mostly oriented to x86/x86-64 architecture
6307    where some insns need to use only specific register or pair of
6308    registers and these register can live in RTL explicitly, e.g. for
6309    parameter passing.  */
6310 static const int max_small_class_regs_num = 2;
6311 
6312 /* Do inheritance/split transformations in EBB starting with HEAD and
6313    finishing on TAIL.  We process EBB insns in the reverse order.
6314    Return true if we did any inheritance/split transformation in the
6315    EBB.
6316 
6317    We should avoid excessive splitting which results in worse code
6318    because of inaccurate cost calculations for spilling new split
6319    pseudos in such case.  To achieve this we do splitting only if
6320    register pressure is high in given basic block and there are reload
6321    pseudos requiring hard registers.  We could do more register
6322    pressure calculations at any given program point to avoid necessary
6323    splitting even more but it is to expensive and the current approach
6324    works well enough.  */
6325 static bool
inherit_in_ebb(rtx_insn * head,rtx_insn * tail)6326 inherit_in_ebb (rtx_insn *head, rtx_insn *tail)
6327 {
6328   int i, src_regno, dst_regno, nregs;
6329   bool change_p, succ_p, update_reloads_num_p;
6330   rtx_insn *prev_insn, *last_insn;
6331   rtx next_usage_insns, curr_set;
6332   enum reg_class cl;
6333   struct lra_insn_reg *reg;
6334   basic_block last_processed_bb, curr_bb = NULL;
6335   HARD_REG_SET potential_reload_hard_regs, live_hard_regs;
6336   bitmap to_process;
6337   unsigned int j;
6338   bitmap_iterator bi;
6339   bool head_p, after_p;
6340 
6341   change_p = false;
6342   curr_usage_insns_check++;
6343   clear_invariants ();
6344   reloads_num = calls_num = 0;
6345   for (unsigned int i = 0; i < NUM_ABI_IDS; ++i)
6346     last_call_for_abi[i] = 0;
6347   CLEAR_HARD_REG_SET (full_and_partial_call_clobbers);
6348   bitmap_clear (&check_only_regs);
6349   bitmap_clear (&invalid_invariant_regs);
6350   last_processed_bb = NULL;
6351   CLEAR_HARD_REG_SET (potential_reload_hard_regs);
6352   live_hard_regs = eliminable_regset | lra_no_alloc_regs;
6353   /* We don't process new insns generated in the loop.	*/
6354   for (curr_insn = tail; curr_insn != PREV_INSN (head); curr_insn = prev_insn)
6355     {
6356       prev_insn = PREV_INSN (curr_insn);
6357       if (BLOCK_FOR_INSN (curr_insn) != NULL)
6358 	curr_bb = BLOCK_FOR_INSN (curr_insn);
6359       if (last_processed_bb != curr_bb)
6360 	{
6361 	  /* We are at the end of BB.  Add qualified living
6362 	     pseudos for potential splitting.  */
6363 	  to_process = df_get_live_out (curr_bb);
6364 	  if (last_processed_bb != NULL)
6365 	    {
6366 	      /* We are somewhere in the middle of EBB.	 */
6367 	      get_live_on_other_edges (curr_bb, last_processed_bb,
6368 				       &temp_bitmap);
6369 	      to_process = &temp_bitmap;
6370 	    }
6371 	  last_processed_bb = curr_bb;
6372 	  last_insn = get_last_insertion_point (curr_bb);
6373 	  after_p = (! JUMP_P (last_insn)
6374 		     && (! CALL_P (last_insn)
6375 			 || (find_reg_note (last_insn,
6376 					   REG_NORETURN, NULL_RTX) == NULL_RTX
6377 			     && ! SIBLING_CALL_P (last_insn))));
6378 	  CLEAR_HARD_REG_SET (potential_reload_hard_regs);
6379 	  EXECUTE_IF_SET_IN_BITMAP (to_process, 0, j, bi)
6380 	    {
6381 	      if ((int) j >= lra_constraint_new_regno_start)
6382 		break;
6383 	      if (j < FIRST_PSEUDO_REGISTER || reg_renumber[j] >= 0)
6384 		{
6385 		  if (j < FIRST_PSEUDO_REGISTER)
6386 		    SET_HARD_REG_BIT (live_hard_regs, j);
6387 		  else
6388 		    add_to_hard_reg_set (&live_hard_regs,
6389 					 PSEUDO_REGNO_MODE (j),
6390 					 reg_renumber[j]);
6391 		  setup_next_usage_insn (j, last_insn, reloads_num, after_p);
6392 		}
6393 	    }
6394 	}
6395       src_regno = dst_regno = -1;
6396       curr_set = single_set (curr_insn);
6397       if (curr_set != NULL_RTX && REG_P (SET_DEST (curr_set)))
6398 	dst_regno = REGNO (SET_DEST (curr_set));
6399       if (curr_set != NULL_RTX && REG_P (SET_SRC (curr_set)))
6400 	src_regno = REGNO (SET_SRC (curr_set));
6401       update_reloads_num_p = true;
6402       if (src_regno < lra_constraint_new_regno_start
6403 	  && src_regno >= FIRST_PSEUDO_REGISTER
6404 	  && reg_renumber[src_regno] < 0
6405 	  && dst_regno >= lra_constraint_new_regno_start
6406 	  && (cl = lra_get_allocno_class (dst_regno)) != NO_REGS)
6407 	{
6408 	  /* 'reload_pseudo <- original_pseudo'.  */
6409 	  if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num)
6410 	    reloads_num++;
6411 	  update_reloads_num_p = false;
6412 	  succ_p = false;
6413 	  if (usage_insns[src_regno].check == curr_usage_insns_check
6414 	      && (next_usage_insns = usage_insns[src_regno].insns) != NULL_RTX)
6415 	    succ_p = inherit_reload_reg (false, src_regno, cl,
6416 					 curr_insn, next_usage_insns);
6417 	  if (succ_p)
6418 	    change_p = true;
6419 	  else
6420 	    setup_next_usage_insn (src_regno, curr_insn, reloads_num, false);
6421 	  if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
6422 	    potential_reload_hard_regs |= reg_class_contents[cl];
6423 	}
6424       else if (src_regno < 0
6425 	       && dst_regno >= lra_constraint_new_regno_start
6426 	       && invariant_p (SET_SRC (curr_set))
6427 	       && (cl = lra_get_allocno_class (dst_regno)) != NO_REGS
6428 	       && ! bitmap_bit_p (&invalid_invariant_regs, dst_regno)
6429 	       && ! bitmap_bit_p (&invalid_invariant_regs,
6430 				  ORIGINAL_REGNO(regno_reg_rtx[dst_regno])))
6431 	{
6432 	  /* 'reload_pseudo <- invariant'.  */
6433 	  if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num)
6434 	    reloads_num++;
6435 	  update_reloads_num_p = false;
6436 	  if (process_invariant_for_inheritance (SET_DEST (curr_set), SET_SRC (curr_set)))
6437 	    change_p = true;
6438 	  if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
6439 	    potential_reload_hard_regs |= reg_class_contents[cl];
6440 	}
6441       else if (src_regno >= lra_constraint_new_regno_start
6442 	       && dst_regno < lra_constraint_new_regno_start
6443 	       && dst_regno >= FIRST_PSEUDO_REGISTER
6444 	       && reg_renumber[dst_regno] < 0
6445 	       && (cl = lra_get_allocno_class (src_regno)) != NO_REGS
6446 	       && usage_insns[dst_regno].check == curr_usage_insns_check
6447 	       && (next_usage_insns
6448 		   = usage_insns[dst_regno].insns) != NULL_RTX)
6449 	{
6450 	  if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num)
6451 	    reloads_num++;
6452 	  update_reloads_num_p = false;
6453 	  /* 'original_pseudo <- reload_pseudo'.  */
6454 	  if (! JUMP_P (curr_insn)
6455 	      && inherit_reload_reg (true, dst_regno, cl,
6456 				     curr_insn, next_usage_insns))
6457 	    change_p = true;
6458 	  /* Invalidate.  */
6459 	  usage_insns[dst_regno].check = 0;
6460 	  if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
6461 	    potential_reload_hard_regs |= reg_class_contents[cl];
6462 	}
6463       else if (INSN_P (curr_insn))
6464 	{
6465 	  int iter;
6466 	  int max_uid = get_max_uid ();
6467 
6468 	  curr_id = lra_get_insn_recog_data (curr_insn);
6469 	  curr_static_id = curr_id->insn_static_data;
6470 	  to_inherit_num = 0;
6471 	  /* Process insn definitions.	*/
6472 	  for (iter = 0; iter < 2; iter++)
6473 	    for (reg = iter == 0 ? curr_id->regs : curr_static_id->hard_regs;
6474 		 reg != NULL;
6475 		 reg = reg->next)
6476 	      if (reg->type != OP_IN
6477 		  && (dst_regno = reg->regno) < lra_constraint_new_regno_start)
6478 		{
6479 		  if (dst_regno >= FIRST_PSEUDO_REGISTER && reg->type == OP_OUT
6480 		      && reg_renumber[dst_regno] < 0 && ! reg->subreg_p
6481 		      && usage_insns[dst_regno].check == curr_usage_insns_check
6482 		      && (next_usage_insns
6483 			  = usage_insns[dst_regno].insns) != NULL_RTX)
6484 		    {
6485 		      struct lra_insn_reg *r;
6486 
6487 		      for (r = curr_id->regs; r != NULL; r = r->next)
6488 			if (r->type != OP_OUT && r->regno == dst_regno)
6489 			  break;
6490 		      /* Don't do inheritance if the pseudo is also
6491 			 used in the insn.  */
6492 		      if (r == NULL)
6493 			/* We cannot do inheritance right now
6494 			   because the current insn reg info (chain
6495 			   regs) can change after that.  */
6496 			add_to_inherit (dst_regno, next_usage_insns);
6497 		    }
6498 		  /* We cannot process one reg twice here because of
6499 		     usage_insns invalidation.  */
6500 		  if ((dst_regno < FIRST_PSEUDO_REGISTER
6501 		       || reg_renumber[dst_regno] >= 0)
6502 		      && ! reg->subreg_p && reg->type != OP_IN)
6503 		    {
6504 		      HARD_REG_SET s;
6505 
6506 		      if (split_if_necessary (dst_regno, reg->biggest_mode,
6507 					      potential_reload_hard_regs,
6508 					      false, curr_insn, max_uid))
6509 			change_p = true;
6510 		      CLEAR_HARD_REG_SET (s);
6511 		      if (dst_regno < FIRST_PSEUDO_REGISTER)
6512 			add_to_hard_reg_set (&s, reg->biggest_mode, dst_regno);
6513 		      else
6514 			add_to_hard_reg_set (&s, PSEUDO_REGNO_MODE (dst_regno),
6515 					     reg_renumber[dst_regno]);
6516 		      live_hard_regs &= ~s;
6517 		      potential_reload_hard_regs &= ~s;
6518 		    }
6519 		  /* We should invalidate potential inheritance or
6520 		     splitting for the current insn usages to the next
6521 		     usage insns (see code below) as the output pseudo
6522 		     prevents this.  */
6523 		  if ((dst_regno >= FIRST_PSEUDO_REGISTER
6524 		       && reg_renumber[dst_regno] < 0)
6525 		      || (reg->type == OP_OUT && ! reg->subreg_p
6526 			  && (dst_regno < FIRST_PSEUDO_REGISTER
6527 			      || reg_renumber[dst_regno] >= 0)))
6528 		    {
6529 		      /* Invalidate and mark definitions.  */
6530 		      if (dst_regno >= FIRST_PSEUDO_REGISTER)
6531 			usage_insns[dst_regno].check = -(int) INSN_UID (curr_insn);
6532 		      else
6533 			{
6534 			  nregs = hard_regno_nregs (dst_regno,
6535 						    reg->biggest_mode);
6536 			  for (i = 0; i < nregs; i++)
6537 			    usage_insns[dst_regno + i].check
6538 			      = -(int) INSN_UID (curr_insn);
6539 			}
6540 		    }
6541 		}
6542 	  /* Process clobbered call regs.  */
6543 	  if (curr_id->arg_hard_regs != NULL)
6544 	    for (i = 0; (dst_regno = curr_id->arg_hard_regs[i]) >= 0; i++)
6545 	      if (dst_regno >= FIRST_PSEUDO_REGISTER)
6546 		usage_insns[dst_regno - FIRST_PSEUDO_REGISTER].check
6547 		  = -(int) INSN_UID (curr_insn);
6548 	  if (! JUMP_P (curr_insn))
6549 	    for (i = 0; i < to_inherit_num; i++)
6550 	      if (inherit_reload_reg (true, to_inherit[i].regno,
6551 				      ALL_REGS, curr_insn,
6552 				      to_inherit[i].insns))
6553 	      change_p = true;
6554 	  if (CALL_P (curr_insn))
6555 	    {
6556 	      rtx cheap, pat, dest;
6557 	      rtx_insn *restore;
6558 	      int regno, hard_regno;
6559 
6560 	      calls_num++;
6561 	      function_abi callee_abi = insn_callee_abi (curr_insn);
6562 	      last_call_for_abi[callee_abi.id ()] = calls_num;
6563 	      full_and_partial_call_clobbers
6564 		|= callee_abi.full_and_partial_reg_clobbers ();
6565 	      if ((cheap = find_reg_note (curr_insn,
6566 					  REG_RETURNED, NULL_RTX)) != NULL_RTX
6567 		  && ((cheap = XEXP (cheap, 0)), true)
6568 		  && (regno = REGNO (cheap)) >= FIRST_PSEUDO_REGISTER
6569 		  && (hard_regno = reg_renumber[regno]) >= 0
6570 		  && usage_insns[regno].check == curr_usage_insns_check
6571 		  /* If there are pending saves/restores, the
6572 		     optimization is not worth.	 */
6573 		  && usage_insns[regno].calls_num == calls_num - 1
6574 		  && callee_abi.clobbers_reg_p (GET_MODE (cheap), hard_regno))
6575 		{
6576 		  /* Restore the pseudo from the call result as
6577 		     REG_RETURNED note says that the pseudo value is
6578 		     in the call result and the pseudo is an argument
6579 		     of the call.  */
6580 		  pat = PATTERN (curr_insn);
6581 		  if (GET_CODE (pat) == PARALLEL)
6582 		    pat = XVECEXP (pat, 0, 0);
6583 		  dest = SET_DEST (pat);
6584 		  /* For multiple return values dest is PARALLEL.
6585 		     Currently we handle only single return value case.  */
6586 		  if (REG_P (dest))
6587 		    {
6588 		      start_sequence ();
6589 		      emit_move_insn (cheap, copy_rtx (dest));
6590 		      restore = get_insns ();
6591 		      end_sequence ();
6592 		      lra_process_new_insns (curr_insn, NULL, restore,
6593 					     "Inserting call parameter restore");
6594 		      /* We don't need to save/restore of the pseudo from
6595 			 this call.	 */
6596 		      usage_insns[regno].calls_num = calls_num;
6597 		      remove_from_hard_reg_set
6598 			(&full_and_partial_call_clobbers,
6599 			 GET_MODE (cheap), hard_regno);
6600 		      bitmap_set_bit (&check_only_regs, regno);
6601 		    }
6602 		}
6603 	    }
6604 	  to_inherit_num = 0;
6605 	  /* Process insn usages.  */
6606 	  for (iter = 0; iter < 2; iter++)
6607 	    for (reg = iter == 0 ? curr_id->regs : curr_static_id->hard_regs;
6608 		 reg != NULL;
6609 		 reg = reg->next)
6610 	      if ((reg->type != OP_OUT
6611 		   || (reg->type == OP_OUT && reg->subreg_p))
6612 		  && (src_regno = reg->regno) < lra_constraint_new_regno_start)
6613 		{
6614 		  if (src_regno >= FIRST_PSEUDO_REGISTER
6615 		      && reg_renumber[src_regno] < 0 && reg->type == OP_IN)
6616 		    {
6617 		      if (usage_insns[src_regno].check == curr_usage_insns_check
6618 			  && (next_usage_insns
6619 			      = usage_insns[src_regno].insns) != NULL_RTX
6620 			  && NONDEBUG_INSN_P (curr_insn))
6621 			add_to_inherit (src_regno, next_usage_insns);
6622 		      else if (usage_insns[src_regno].check
6623 			       != -(int) INSN_UID (curr_insn))
6624 			/* Add usages but only if the reg is not set up
6625 			   in the same insn.  */
6626 			add_next_usage_insn (src_regno, curr_insn, reloads_num);
6627 		    }
6628 		  else if (src_regno < FIRST_PSEUDO_REGISTER
6629 			   || reg_renumber[src_regno] >= 0)
6630 		    {
6631 		      bool before_p;
6632 		      rtx_insn *use_insn = curr_insn;
6633 
6634 		      before_p = (JUMP_P (curr_insn)
6635 				  || (CALL_P (curr_insn) && reg->type == OP_IN));
6636 		      if (NONDEBUG_INSN_P (curr_insn)
6637 			  && (! JUMP_P (curr_insn) || reg->type == OP_IN)
6638 			  && split_if_necessary (src_regno, reg->biggest_mode,
6639 						 potential_reload_hard_regs,
6640 						 before_p, curr_insn, max_uid))
6641 			{
6642 			  if (reg->subreg_p)
6643 			    check_and_force_assignment_correctness_p = true;
6644 			  change_p = true;
6645 			  /* Invalidate. */
6646 			  usage_insns[src_regno].check = 0;
6647 			  if (before_p)
6648 			    use_insn = PREV_INSN (curr_insn);
6649 			}
6650 		      if (NONDEBUG_INSN_P (curr_insn))
6651 			{
6652 			  if (src_regno < FIRST_PSEUDO_REGISTER)
6653 			    add_to_hard_reg_set (&live_hard_regs,
6654 						 reg->biggest_mode, src_regno);
6655 			  else
6656 			    add_to_hard_reg_set (&live_hard_regs,
6657 						 PSEUDO_REGNO_MODE (src_regno),
6658 						 reg_renumber[src_regno]);
6659 			}
6660 		      if (src_regno >= FIRST_PSEUDO_REGISTER)
6661 			add_next_usage_insn (src_regno, use_insn, reloads_num);
6662 		      else
6663 			{
6664 			  for (i = 0; i < hard_regno_nregs (src_regno, reg->biggest_mode); i++)
6665 			    add_next_usage_insn (src_regno + i, use_insn, reloads_num);
6666 			}
6667 		    }
6668 		}
6669 	  /* Process used call regs.  */
6670 	  if (curr_id->arg_hard_regs != NULL)
6671 	    for (i = 0; (src_regno = curr_id->arg_hard_regs[i]) >= 0; i++)
6672 	      if (src_regno < FIRST_PSEUDO_REGISTER)
6673 		{
6674 	           SET_HARD_REG_BIT (live_hard_regs, src_regno);
6675 	           add_next_usage_insn (src_regno, curr_insn, reloads_num);
6676 		}
6677 	  for (i = 0; i < to_inherit_num; i++)
6678 	    {
6679 	      src_regno = to_inherit[i].regno;
6680 	      if (inherit_reload_reg (false, src_regno, ALL_REGS,
6681 				      curr_insn, to_inherit[i].insns))
6682 		change_p = true;
6683 	      else
6684 		setup_next_usage_insn (src_regno, curr_insn, reloads_num, false);
6685 	    }
6686 	}
6687       if (update_reloads_num_p
6688 	  && NONDEBUG_INSN_P (curr_insn) && curr_set != NULL_RTX)
6689 	{
6690 	  int regno = -1;
6691 	  if ((REG_P (SET_DEST (curr_set))
6692 	       && (regno = REGNO (SET_DEST (curr_set))) >= lra_constraint_new_regno_start
6693 	       && reg_renumber[regno] < 0
6694 	       && (cl = lra_get_allocno_class (regno)) != NO_REGS)
6695 	      || (REG_P (SET_SRC (curr_set))
6696 	          && (regno = REGNO (SET_SRC (curr_set))) >= lra_constraint_new_regno_start
6697 	          && reg_renumber[regno] < 0
6698 	          && (cl = lra_get_allocno_class (regno)) != NO_REGS))
6699 	    {
6700 	      if (ira_class_hard_regs_num[cl] <= max_small_class_regs_num)
6701 		reloads_num++;
6702 	      if (hard_reg_set_subset_p (reg_class_contents[cl], live_hard_regs))
6703 		potential_reload_hard_regs |= reg_class_contents[cl];
6704 	    }
6705 	}
6706       if (NONDEBUG_INSN_P (curr_insn))
6707 	{
6708 	  int regno;
6709 
6710 	  /* Invalidate invariants with changed regs.  */
6711 	  curr_id = lra_get_insn_recog_data (curr_insn);
6712 	  for (reg = curr_id->regs; reg != NULL; reg = reg->next)
6713 	    if (reg->type != OP_IN)
6714 	      {
6715 		bitmap_set_bit (&invalid_invariant_regs, reg->regno);
6716 		bitmap_set_bit (&invalid_invariant_regs,
6717 				ORIGINAL_REGNO (regno_reg_rtx[reg->regno]));
6718 	      }
6719 	  curr_static_id = curr_id->insn_static_data;
6720 	  for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next)
6721 	    if (reg->type != OP_IN)
6722 	      bitmap_set_bit (&invalid_invariant_regs, reg->regno);
6723 	  if (curr_id->arg_hard_regs != NULL)
6724 	    for (i = 0; (regno = curr_id->arg_hard_regs[i]) >= 0; i++)
6725 	      if (regno >= FIRST_PSEUDO_REGISTER)
6726 		bitmap_set_bit (&invalid_invariant_regs,
6727 				regno - FIRST_PSEUDO_REGISTER);
6728 	}
6729       /* We reached the start of the current basic block.  */
6730       if (prev_insn == NULL_RTX || prev_insn == PREV_INSN (head)
6731 	  || BLOCK_FOR_INSN (prev_insn) != curr_bb)
6732 	{
6733 	  /* We reached the beginning of the current block -- do
6734 	     rest of spliting in the current BB.  */
6735 	  to_process = df_get_live_in (curr_bb);
6736 	  if (BLOCK_FOR_INSN (head) != curr_bb)
6737 	    {
6738 	      /* We are somewhere in the middle of EBB.	 */
6739 	      get_live_on_other_edges (EDGE_PRED (curr_bb, 0)->src,
6740 				       curr_bb, &temp_bitmap);
6741 	      to_process = &temp_bitmap;
6742 	    }
6743 	  head_p = true;
6744 	  EXECUTE_IF_SET_IN_BITMAP (to_process, 0, j, bi)
6745 	    {
6746 	      if ((int) j >= lra_constraint_new_regno_start)
6747 		break;
6748 	      if (((int) j < FIRST_PSEUDO_REGISTER || reg_renumber[j] >= 0)
6749 		  && usage_insns[j].check == curr_usage_insns_check
6750 		  && (next_usage_insns = usage_insns[j].insns) != NULL_RTX)
6751 		{
6752 		  if (need_for_split_p (potential_reload_hard_regs, j))
6753 		    {
6754 		      if (lra_dump_file != NULL && head_p)
6755 			{
6756 			  fprintf (lra_dump_file,
6757 				   "  ----------------------------------\n");
6758 			  head_p = false;
6759 			}
6760 		      if (split_reg (false, j, bb_note (curr_bb),
6761 				     next_usage_insns, NULL))
6762 			change_p = true;
6763 		    }
6764 		  usage_insns[j].check = 0;
6765 		}
6766 	    }
6767 	}
6768     }
6769   return change_p;
6770 }
6771 
6772 /* This value affects EBB forming.  If probability of edge from EBB to
6773    a BB is not greater than the following value, we don't add the BB
6774    to EBB.  */
6775 #define EBB_PROBABILITY_CUTOFF \
6776   ((REG_BR_PROB_BASE * param_lra_inheritance_ebb_probability_cutoff) / 100)
6777 
6778 /* Current number of inheritance/split iteration.  */
6779 int lra_inheritance_iter;
6780 
6781 /* Entry function for inheritance/split pass.  */
6782 void
lra_inheritance(void)6783 lra_inheritance (void)
6784 {
6785   int i;
6786   basic_block bb, start_bb;
6787   edge e;
6788 
6789   lra_inheritance_iter++;
6790   if (lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES)
6791     return;
6792   timevar_push (TV_LRA_INHERITANCE);
6793   if (lra_dump_file != NULL)
6794     fprintf (lra_dump_file, "\n********** Inheritance #%d: **********\n\n",
6795 	     lra_inheritance_iter);
6796   curr_usage_insns_check = 0;
6797   usage_insns = XNEWVEC (struct usage_insns, lra_constraint_new_regno_start);
6798   for (i = 0; i < lra_constraint_new_regno_start; i++)
6799     usage_insns[i].check = 0;
6800   bitmap_initialize (&check_only_regs, &reg_obstack);
6801   bitmap_initialize (&invalid_invariant_regs, &reg_obstack);
6802   bitmap_initialize (&live_regs, &reg_obstack);
6803   bitmap_initialize (&temp_bitmap, &reg_obstack);
6804   bitmap_initialize (&ebb_global_regs, &reg_obstack);
6805   FOR_EACH_BB_FN (bb, cfun)
6806     {
6807       start_bb = bb;
6808       if (lra_dump_file != NULL)
6809 	fprintf (lra_dump_file, "EBB");
6810       /* Form a EBB starting with BB.  */
6811       bitmap_clear (&ebb_global_regs);
6812       bitmap_ior_into (&ebb_global_regs, df_get_live_in (bb));
6813       for (;;)
6814 	{
6815 	  if (lra_dump_file != NULL)
6816 	    fprintf (lra_dump_file, " %d", bb->index);
6817 	  if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
6818 	      || LABEL_P (BB_HEAD (bb->next_bb)))
6819 	    break;
6820 	  e = find_fallthru_edge (bb->succs);
6821 	  if (! e)
6822 	    break;
6823 	  if (e->probability.initialized_p ()
6824 	      && e->probability.to_reg_br_prob_base () < EBB_PROBABILITY_CUTOFF)
6825 	    break;
6826 	  bb = bb->next_bb;
6827 	}
6828       bitmap_ior_into (&ebb_global_regs, df_get_live_out (bb));
6829       if (lra_dump_file != NULL)
6830 	fprintf (lra_dump_file, "\n");
6831       if (inherit_in_ebb (BB_HEAD (start_bb), BB_END (bb)))
6832 	/* Remember that the EBB head and tail can change in
6833 	   inherit_in_ebb.  */
6834 	update_ebb_live_info (BB_HEAD (start_bb), BB_END (bb));
6835     }
6836   bitmap_release (&ebb_global_regs);
6837   bitmap_release (&temp_bitmap);
6838   bitmap_release (&live_regs);
6839   bitmap_release (&invalid_invariant_regs);
6840   bitmap_release (&check_only_regs);
6841   free (usage_insns);
6842 
6843   timevar_pop (TV_LRA_INHERITANCE);
6844 }
6845 
6846 
6847 
6848 /* This page contains code to undo failed inheritance/split
6849    transformations.  */
6850 
6851 /* Current number of iteration undoing inheritance/split.  */
6852 int lra_undo_inheritance_iter;
6853 
6854 /* Fix BB live info LIVE after removing pseudos created on pass doing
6855    inheritance/split which are REMOVED_PSEUDOS.	 */
6856 static void
fix_bb_live_info(bitmap live,bitmap removed_pseudos)6857 fix_bb_live_info (bitmap live, bitmap removed_pseudos)
6858 {
6859   unsigned int regno;
6860   bitmap_iterator bi;
6861 
6862   EXECUTE_IF_SET_IN_BITMAP (removed_pseudos, 0, regno, bi)
6863     if (bitmap_clear_bit (live, regno)
6864 	&& REG_P (lra_reg_info[regno].restore_rtx))
6865       bitmap_set_bit (live, REGNO (lra_reg_info[regno].restore_rtx));
6866 }
6867 
6868 /* Return regno of the (subreg of) REG. Otherwise, return a negative
6869    number.  */
6870 static int
get_regno(rtx reg)6871 get_regno (rtx reg)
6872 {
6873   if (GET_CODE (reg) == SUBREG)
6874     reg = SUBREG_REG (reg);
6875   if (REG_P (reg))
6876     return REGNO (reg);
6877   return -1;
6878 }
6879 
6880 /* Delete a move INSN with destination reg DREGNO and a previous
6881    clobber insn with the same regno.  The inheritance/split code can
6882    generate moves with preceding clobber and when we delete such moves
6883    we should delete the clobber insn too to keep the correct life
6884    info.  */
6885 static void
delete_move_and_clobber(rtx_insn * insn,int dregno)6886 delete_move_and_clobber (rtx_insn *insn, int dregno)
6887 {
6888   rtx_insn *prev_insn = PREV_INSN (insn);
6889 
6890   lra_set_insn_deleted (insn);
6891   lra_assert (dregno >= 0);
6892   if (prev_insn != NULL && NONDEBUG_INSN_P (prev_insn)
6893       && GET_CODE (PATTERN (prev_insn)) == CLOBBER
6894       && dregno == get_regno (XEXP (PATTERN (prev_insn), 0)))
6895     lra_set_insn_deleted (prev_insn);
6896 }
6897 
6898 /* Remove inheritance/split pseudos which are in REMOVE_PSEUDOS and
6899    return true if we did any change.  The undo transformations for
6900    inheritance looks like
6901       i <- i2
6902       p <- i	  =>   p <- i2
6903    or removing
6904       p <- i, i <- p, and i <- i3
6905    where p is original pseudo from which inheritance pseudo i was
6906    created, i and i3 are removed inheritance pseudos, i2 is another
6907    not removed inheritance pseudo.  All split pseudos or other
6908    occurrences of removed inheritance pseudos are changed on the
6909    corresponding original pseudos.
6910 
6911    The function also schedules insns changed and created during
6912    inheritance/split pass for processing by the subsequent constraint
6913    pass.  */
6914 static bool
remove_inheritance_pseudos(bitmap remove_pseudos)6915 remove_inheritance_pseudos (bitmap remove_pseudos)
6916 {
6917   basic_block bb;
6918   int regno, sregno, prev_sregno, dregno;
6919   rtx restore_rtx;
6920   rtx set, prev_set;
6921   rtx_insn *prev_insn;
6922   bool change_p, done_p;
6923 
6924   change_p = ! bitmap_empty_p (remove_pseudos);
6925   /* We cannot finish the function right away if CHANGE_P is true
6926      because we need to marks insns affected by previous
6927      inheritance/split pass for processing by the subsequent
6928      constraint pass.  */
6929   FOR_EACH_BB_FN (bb, cfun)
6930     {
6931       fix_bb_live_info (df_get_live_in (bb), remove_pseudos);
6932       fix_bb_live_info (df_get_live_out (bb), remove_pseudos);
6933       FOR_BB_INSNS_REVERSE (bb, curr_insn)
6934 	{
6935 	  if (! INSN_P (curr_insn))
6936 	    continue;
6937 	  done_p = false;
6938 	  sregno = dregno = -1;
6939 	  if (change_p && NONDEBUG_INSN_P (curr_insn)
6940 	      && (set = single_set (curr_insn)) != NULL_RTX)
6941 	    {
6942 	      dregno = get_regno (SET_DEST (set));
6943 	      sregno = get_regno (SET_SRC (set));
6944 	    }
6945 
6946 	  if (sregno >= 0 && dregno >= 0)
6947 	    {
6948 	      if (bitmap_bit_p (remove_pseudos, dregno)
6949 		  && ! REG_P (lra_reg_info[dregno].restore_rtx))
6950 		{
6951 		  /* invariant inheritance pseudo <- original pseudo */
6952 		  if (lra_dump_file != NULL)
6953 		    {
6954 		      fprintf (lra_dump_file, "	   Removing invariant inheritance:\n");
6955 		      dump_insn_slim (lra_dump_file, curr_insn);
6956 		      fprintf (lra_dump_file, "\n");
6957 		    }
6958 		  delete_move_and_clobber (curr_insn, dregno);
6959 		  done_p = true;
6960 		}
6961 	      else if (bitmap_bit_p (remove_pseudos, sregno)
6962 		       && ! REG_P (lra_reg_info[sregno].restore_rtx))
6963 		{
6964 		  /* reload pseudo <- invariant inheritance pseudo */
6965 		  start_sequence ();
6966 		  /* We cannot just change the source.  It might be
6967 		     an insn different from the move.  */
6968 		  emit_insn (lra_reg_info[sregno].restore_rtx);
6969 		  rtx_insn *new_insns = get_insns ();
6970 		  end_sequence ();
6971 		  lra_assert (single_set (new_insns) != NULL
6972 			      && SET_DEST (set) == SET_DEST (single_set (new_insns)));
6973 		  lra_process_new_insns (curr_insn, NULL, new_insns,
6974 					 "Changing reload<-invariant inheritance");
6975 		  delete_move_and_clobber (curr_insn, dregno);
6976 		  done_p = true;
6977 		}
6978 	      else if ((bitmap_bit_p (remove_pseudos, sregno)
6979 			&& (get_regno (lra_reg_info[sregno].restore_rtx) == dregno
6980 			    || (bitmap_bit_p (remove_pseudos, dregno)
6981 				&& get_regno (lra_reg_info[sregno].restore_rtx) >= 0
6982 				&& (get_regno (lra_reg_info[sregno].restore_rtx)
6983 				    == get_regno (lra_reg_info[dregno].restore_rtx)))))
6984 		       || (bitmap_bit_p (remove_pseudos, dregno)
6985 			   && get_regno (lra_reg_info[dregno].restore_rtx) == sregno))
6986 		/* One of the following cases:
6987 		     original <- removed inheritance pseudo
6988 		     removed inherit pseudo <- another removed inherit pseudo
6989 		     removed inherit pseudo <- original pseudo
6990 		   Or
6991 		     removed_split_pseudo <- original_reg
6992 		     original_reg <- removed_split_pseudo */
6993 		{
6994 		  if (lra_dump_file != NULL)
6995 		    {
6996 		      fprintf (lra_dump_file, "	   Removing %s:\n",
6997 			       bitmap_bit_p (&lra_split_regs, sregno)
6998 			       || bitmap_bit_p (&lra_split_regs, dregno)
6999 			       ? "split" : "inheritance");
7000 		      dump_insn_slim (lra_dump_file, curr_insn);
7001 		    }
7002 		  delete_move_and_clobber (curr_insn, dregno);
7003 		  done_p = true;
7004 		}
7005 	      else if (bitmap_bit_p (remove_pseudos, sregno)
7006 		       && bitmap_bit_p (&lra_inheritance_pseudos, sregno))
7007 		{
7008 		  /* Search the following pattern:
7009 		       inherit_or_split_pseudo1 <- inherit_or_split_pseudo2
7010 		       original_pseudo <- inherit_or_split_pseudo1
7011 		    where the 2nd insn is the current insn and
7012 		    inherit_or_split_pseudo2 is not removed.  If it is found,
7013 		    change the current insn onto:
7014 		       original_pseudo <- inherit_or_split_pseudo2.  */
7015 		  for (prev_insn = PREV_INSN (curr_insn);
7016 		       prev_insn != NULL_RTX && ! NONDEBUG_INSN_P (prev_insn);
7017 		       prev_insn = PREV_INSN (prev_insn))
7018 		    ;
7019 		  if (prev_insn != NULL_RTX && BLOCK_FOR_INSN (prev_insn) == bb
7020 		      && (prev_set = single_set (prev_insn)) != NULL_RTX
7021 		      /* There should be no subregs in insn we are
7022 			 searching because only the original reg might
7023 			 be in subreg when we changed the mode of
7024 			 load/store for splitting.  */
7025 		      && REG_P (SET_DEST (prev_set))
7026 		      && REG_P (SET_SRC (prev_set))
7027 		      && (int) REGNO (SET_DEST (prev_set)) == sregno
7028 		      && ((prev_sregno = REGNO (SET_SRC (prev_set)))
7029 			  >= FIRST_PSEUDO_REGISTER)
7030 		      && (lra_reg_info[prev_sregno].restore_rtx == NULL_RTX
7031 			  ||
7032 			  /* As we consider chain of inheritance or
7033 			     splitting described in above comment we should
7034 			     check that sregno and prev_sregno were
7035 			     inheritance/split pseudos created from the
7036 			     same original regno.  */
7037 			  (get_regno (lra_reg_info[sregno].restore_rtx) >= 0
7038 			   && (get_regno (lra_reg_info[sregno].restore_rtx)
7039 			       == get_regno (lra_reg_info[prev_sregno].restore_rtx))))
7040 		      && ! bitmap_bit_p (remove_pseudos, prev_sregno))
7041 		    {
7042 		      lra_assert (GET_MODE (SET_SRC (prev_set))
7043 				  == GET_MODE (regno_reg_rtx[sregno]));
7044 		      /* Although we have a single set, the insn can
7045 			 contain more one sregno register occurrence
7046 			 as a source.  Change all occurrences.  */
7047 		      lra_substitute_pseudo_within_insn (curr_insn, sregno,
7048 							 SET_SRC (prev_set),
7049 							 false);
7050 		      /* As we are finishing with processing the insn
7051 			 here, check the destination too as it might
7052 			 inheritance pseudo for another pseudo.  */
7053 		      if (bitmap_bit_p (remove_pseudos, dregno)
7054 			  && bitmap_bit_p (&lra_inheritance_pseudos, dregno)
7055 			  && (restore_rtx
7056 			      = lra_reg_info[dregno].restore_rtx) != NULL_RTX)
7057 			{
7058 			  if (GET_CODE (SET_DEST (set)) == SUBREG)
7059 			    SUBREG_REG (SET_DEST (set)) = restore_rtx;
7060 			  else
7061 			    SET_DEST (set) = restore_rtx;
7062 			}
7063 		      lra_push_insn_and_update_insn_regno_info (curr_insn);
7064 		      lra_set_used_insn_alternative_by_uid
7065 			(INSN_UID (curr_insn), LRA_UNKNOWN_ALT);
7066 		      done_p = true;
7067 		      if (lra_dump_file != NULL)
7068 			{
7069 			  fprintf (lra_dump_file, "    Change reload insn:\n");
7070 			  dump_insn_slim (lra_dump_file, curr_insn);
7071 			}
7072 		    }
7073 		}
7074 	    }
7075 	  if (! done_p)
7076 	    {
7077 	      struct lra_insn_reg *reg;
7078 	      bool restored_regs_p = false;
7079 	      bool kept_regs_p = false;
7080 
7081 	      curr_id = lra_get_insn_recog_data (curr_insn);
7082 	      for (reg = curr_id->regs; reg != NULL; reg = reg->next)
7083 		{
7084 		  regno = reg->regno;
7085 		  restore_rtx = lra_reg_info[regno].restore_rtx;
7086 		  if (restore_rtx != NULL_RTX)
7087 		    {
7088 		      if (change_p && bitmap_bit_p (remove_pseudos, regno))
7089 			{
7090 			  lra_substitute_pseudo_within_insn
7091 			    (curr_insn, regno, restore_rtx, false);
7092 			  restored_regs_p = true;
7093 			}
7094 		      else
7095 			kept_regs_p = true;
7096 		    }
7097 		}
7098 	      if (NONDEBUG_INSN_P (curr_insn) && kept_regs_p)
7099 		{
7100 		  /* The instruction has changed since the previous
7101 		     constraints pass.  */
7102 		  lra_push_insn_and_update_insn_regno_info (curr_insn);
7103 		  lra_set_used_insn_alternative_by_uid
7104 		    (INSN_UID (curr_insn), LRA_UNKNOWN_ALT);
7105 		}
7106 	      else if (restored_regs_p)
7107 		/* The instruction has been restored to the form that
7108 		   it had during the previous constraints pass.  */
7109 		lra_update_insn_regno_info (curr_insn);
7110 	      if (restored_regs_p && lra_dump_file != NULL)
7111 		{
7112 		  fprintf (lra_dump_file, "   Insn after restoring regs:\n");
7113 		  dump_insn_slim (lra_dump_file, curr_insn);
7114 		}
7115 	    }
7116 	}
7117     }
7118   return change_p;
7119 }
7120 
7121 /* If optional reload pseudos failed to get a hard register or was not
7122    inherited, it is better to remove optional reloads.  We do this
7123    transformation after undoing inheritance to figure out necessity to
7124    remove optional reloads easier.  Return true if we do any
7125    change.  */
7126 static bool
undo_optional_reloads(void)7127 undo_optional_reloads (void)
7128 {
7129   bool change_p, keep_p;
7130   unsigned int regno, uid;
7131   bitmap_iterator bi, bi2;
7132   rtx_insn *insn;
7133   rtx set, src, dest;
7134   auto_bitmap removed_optional_reload_pseudos (&reg_obstack);
7135 
7136   bitmap_copy (removed_optional_reload_pseudos, &lra_optional_reload_pseudos);
7137   EXECUTE_IF_SET_IN_BITMAP (&lra_optional_reload_pseudos, 0, regno, bi)
7138     {
7139       keep_p = false;
7140       /* Keep optional reloads from previous subpasses.  */
7141       if (lra_reg_info[regno].restore_rtx == NULL_RTX
7142 	  /* If the original pseudo changed its allocation, just
7143 	     removing the optional pseudo is dangerous as the original
7144 	     pseudo will have longer live range.  */
7145 	  || reg_renumber[REGNO (lra_reg_info[regno].restore_rtx)] >= 0)
7146 	keep_p = true;
7147       else if (reg_renumber[regno] >= 0)
7148 	EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi2)
7149 	  {
7150 	    insn = lra_insn_recog_data[uid]->insn;
7151 	    if ((set = single_set (insn)) == NULL_RTX)
7152 	      continue;
7153 	    src = SET_SRC (set);
7154 	    dest = SET_DEST (set);
7155 	    if (! REG_P (src) || ! REG_P (dest))
7156 	      continue;
7157 	    if (REGNO (dest) == regno
7158 		/* Ignore insn for optional reloads itself.  */
7159 		&& REGNO (lra_reg_info[regno].restore_rtx) != REGNO (src)
7160 		/* Check only inheritance on last inheritance pass.  */
7161 		&& (int) REGNO (src) >= new_regno_start
7162 		/* Check that the optional reload was inherited.  */
7163 		&& bitmap_bit_p (&lra_inheritance_pseudos, REGNO (src)))
7164 	      {
7165 		keep_p = true;
7166 		break;
7167 	      }
7168 	  }
7169       if (keep_p)
7170 	{
7171 	  bitmap_clear_bit (removed_optional_reload_pseudos, regno);
7172 	  if (lra_dump_file != NULL)
7173 	    fprintf (lra_dump_file, "Keep optional reload reg %d\n", regno);
7174 	}
7175     }
7176   change_p = ! bitmap_empty_p (removed_optional_reload_pseudos);
7177   auto_bitmap insn_bitmap (&reg_obstack);
7178   EXECUTE_IF_SET_IN_BITMAP (removed_optional_reload_pseudos, 0, regno, bi)
7179     {
7180       if (lra_dump_file != NULL)
7181 	fprintf (lra_dump_file, "Remove optional reload reg %d\n", regno);
7182       bitmap_copy (insn_bitmap, &lra_reg_info[regno].insn_bitmap);
7183       EXECUTE_IF_SET_IN_BITMAP (insn_bitmap, 0, uid, bi2)
7184 	{
7185 	  insn = lra_insn_recog_data[uid]->insn;
7186 	  if ((set = single_set (insn)) != NULL_RTX)
7187 	    {
7188 	      src = SET_SRC (set);
7189 	      dest = SET_DEST (set);
7190 	      if (REG_P (src) && REG_P (dest)
7191 		  && ((REGNO (src) == regno
7192 		       && (REGNO (lra_reg_info[regno].restore_rtx)
7193 			   == REGNO (dest)))
7194 		      || (REGNO (dest) == regno
7195 			  && (REGNO (lra_reg_info[regno].restore_rtx)
7196 			      == REGNO (src)))))
7197 		{
7198 		  if (lra_dump_file != NULL)
7199 		    {
7200 		      fprintf (lra_dump_file, "  Deleting move %u\n",
7201 			       INSN_UID (insn));
7202 		      dump_insn_slim (lra_dump_file, insn);
7203 		    }
7204 		  delete_move_and_clobber (insn, REGNO (dest));
7205 		  continue;
7206 		}
7207 	      /* We should not worry about generation memory-memory
7208 		 moves here as if the corresponding inheritance did
7209 		 not work (inheritance pseudo did not get a hard reg),
7210 		 we remove the inheritance pseudo and the optional
7211 		 reload.  */
7212 	    }
7213 	  lra_substitute_pseudo_within_insn
7214 	    (insn, regno, lra_reg_info[regno].restore_rtx, false);
7215 	  lra_update_insn_regno_info (insn);
7216 	  if (lra_dump_file != NULL)
7217 	    {
7218 	      fprintf (lra_dump_file,
7219 		       "  Restoring original insn:\n");
7220 	      dump_insn_slim (lra_dump_file, insn);
7221 	    }
7222 	}
7223     }
7224   /* Clear restore_regnos.  */
7225   EXECUTE_IF_SET_IN_BITMAP (&lra_optional_reload_pseudos, 0, regno, bi)
7226     lra_reg_info[regno].restore_rtx = NULL_RTX;
7227   return change_p;
7228 }
7229 
7230 /* Entry function for undoing inheritance/split transformation.	 Return true
7231    if we did any RTL change in this pass.  */
7232 bool
lra_undo_inheritance(void)7233 lra_undo_inheritance (void)
7234 {
7235   unsigned int regno;
7236   int hard_regno;
7237   int n_all_inherit, n_inherit, n_all_split, n_split;
7238   rtx restore_rtx;
7239   bitmap_iterator bi;
7240   bool change_p;
7241 
7242   lra_undo_inheritance_iter++;
7243   if (lra_undo_inheritance_iter > LRA_MAX_INHERITANCE_PASSES)
7244     return false;
7245   if (lra_dump_file != NULL)
7246     fprintf (lra_dump_file,
7247 	     "\n********** Undoing inheritance #%d: **********\n\n",
7248 	     lra_undo_inheritance_iter);
7249   auto_bitmap remove_pseudos (&reg_obstack);
7250   n_inherit = n_all_inherit = 0;
7251   EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi)
7252     if (lra_reg_info[regno].restore_rtx != NULL_RTX)
7253       {
7254 	n_all_inherit++;
7255 	if (reg_renumber[regno] < 0
7256 	    /* If the original pseudo changed its allocation, just
7257 	       removing inheritance is dangerous as for changing
7258 	       allocation we used shorter live-ranges.  */
7259 	    && (! REG_P (lra_reg_info[regno].restore_rtx)
7260 		|| reg_renumber[REGNO (lra_reg_info[regno].restore_rtx)] < 0))
7261 	  bitmap_set_bit (remove_pseudos, regno);
7262 	else
7263 	  n_inherit++;
7264       }
7265   if (lra_dump_file != NULL && n_all_inherit != 0)
7266     fprintf (lra_dump_file, "Inherit %d out of %d (%.2f%%)\n",
7267 	     n_inherit, n_all_inherit,
7268 	     (double) n_inherit / n_all_inherit * 100);
7269   n_split = n_all_split = 0;
7270   EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi)
7271     if ((restore_rtx = lra_reg_info[regno].restore_rtx) != NULL_RTX)
7272       {
7273 	int restore_regno = REGNO (restore_rtx);
7274 
7275 	n_all_split++;
7276 	hard_regno = (restore_regno >= FIRST_PSEUDO_REGISTER
7277 		      ? reg_renumber[restore_regno] : restore_regno);
7278 	if (hard_regno < 0 || reg_renumber[regno] == hard_regno)
7279 	  bitmap_set_bit (remove_pseudos, regno);
7280 	else
7281 	  {
7282 	    n_split++;
7283 	    if (lra_dump_file != NULL)
7284 	      fprintf (lra_dump_file, "	     Keep split r%d (orig=r%d)\n",
7285 		       regno, restore_regno);
7286 	  }
7287       }
7288   if (lra_dump_file != NULL && n_all_split != 0)
7289     fprintf (lra_dump_file, "Split %d out of %d (%.2f%%)\n",
7290 	     n_split, n_all_split,
7291 	     (double) n_split / n_all_split * 100);
7292   change_p = remove_inheritance_pseudos (remove_pseudos);
7293   /* Clear restore_regnos.  */
7294   EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, regno, bi)
7295     lra_reg_info[regno].restore_rtx = NULL_RTX;
7296   EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, regno, bi)
7297     lra_reg_info[regno].restore_rtx = NULL_RTX;
7298   change_p = undo_optional_reloads () || change_p;
7299   return change_p;
7300 }
7301