1 /* IRA hard register and memory cost calculation for allocnos or pseudos.
2    Copyright (C) 2006-2018 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "memmodel.h"
30 #include "tm_p.h"
31 #include "insn-config.h"
32 #include "regs.h"
33 #include "ira.h"
34 #include "ira-int.h"
35 #include "addresses.h"
36 #include "reload.h"
37 
38 /* The flags is set up every time when we calculate pseudo register
39    classes through function ira_set_pseudo_classes.  */
40 static bool pseudo_classes_defined_p = false;
41 
42 /* TRUE if we work with allocnos.  Otherwise we work with pseudos.  */
43 static bool allocno_p;
44 
45 /* Number of elements in array `costs'.  */
46 static int cost_elements_num;
47 
48 /* The `costs' struct records the cost of using hard registers of each
49    class considered for the calculation and of using memory for each
50    allocno or pseudo.  */
51 struct costs
52 {
53   int mem_cost;
54   /* Costs for register classes start here.  We process only some
55      allocno classes.  */
56   int cost[1];
57 };
58 
59 #define max_struct_costs_size \
60   (this_target_ira_int->x_max_struct_costs_size)
61 #define init_cost \
62   (this_target_ira_int->x_init_cost)
63 #define temp_costs \
64   (this_target_ira_int->x_temp_costs)
65 #define op_costs \
66   (this_target_ira_int->x_op_costs)
67 #define this_op_costs \
68   (this_target_ira_int->x_this_op_costs)
69 
70 /* Costs of each class for each allocno or pseudo.  */
71 static struct costs *costs;
72 
73 /* Accumulated costs of each class for each allocno.  */
74 static struct costs *total_allocno_costs;
75 
76 /* It is the current size of struct costs.  */
77 static size_t struct_costs_size;
78 
79 /* Return pointer to structure containing costs of allocno or pseudo
80    with given NUM in array ARR.  */
81 #define COSTS(arr, num) \
82   ((struct costs *) ((char *) (arr) + (num) * struct_costs_size))
83 
84 /* Return index in COSTS when processing reg with REGNO.  */
85 #define COST_INDEX(regno) (allocno_p 					     \
86                            ? ALLOCNO_NUM (ira_curr_regno_allocno_map[regno]) \
87 			   : (int) regno)
88 
89 /* Record register class preferences of each allocno or pseudo.  Null
90    value means no preferences.  It happens on the 1st iteration of the
91    cost calculation.  */
92 static enum reg_class *pref;
93 
94 /* Allocated buffers for pref.  */
95 static enum reg_class *pref_buffer;
96 
97 /* Record allocno class of each allocno with the same regno.  */
98 static enum reg_class *regno_aclass;
99 
100 /* Record cost gains for not allocating a register with an invariant
101    equivalence.  */
102 static int *regno_equiv_gains;
103 
104 /* Execution frequency of the current insn.  */
105 static int frequency;
106 
107 
108 
109 /* Info about reg classes whose costs are calculated for a pseudo.  */
110 struct cost_classes
111 {
112   /* Number of the cost classes in the subsequent array.  */
113   int num;
114   /* Container of the cost classes.  */
115   enum reg_class classes[N_REG_CLASSES];
116   /* Map reg class -> index of the reg class in the previous array.
117      -1 if it is not a cost class.  */
118   int index[N_REG_CLASSES];
119   /* Map hard regno index of first class in array CLASSES containing
120      the hard regno, -1 otherwise.  */
121   int hard_regno_index[FIRST_PSEUDO_REGISTER];
122 };
123 
124 /* Types of pointers to the structure above.  */
125 typedef struct cost_classes *cost_classes_t;
126 typedef const struct cost_classes *const_cost_classes_t;
127 
128 /* Info about cost classes for each pseudo.  */
129 static cost_classes_t *regno_cost_classes;
130 
131 /* Helper for cost_classes hashing.  */
132 
133 struct cost_classes_hasher : pointer_hash <cost_classes>
134 {
135   static inline hashval_t hash (const cost_classes *);
136   static inline bool equal (const cost_classes *, const cost_classes *);
137   static inline void remove (cost_classes *);
138 };
139 
140 /* Returns hash value for cost classes info HV.  */
141 inline hashval_t
hash(const cost_classes * hv)142 cost_classes_hasher::hash (const cost_classes *hv)
143 {
144   return iterative_hash (&hv->classes, sizeof (enum reg_class) * hv->num, 0);
145 }
146 
147 /* Compares cost classes info HV1 and HV2.  */
148 inline bool
equal(const cost_classes * hv1,const cost_classes * hv2)149 cost_classes_hasher::equal (const cost_classes *hv1, const cost_classes *hv2)
150 {
151   return (hv1->num == hv2->num
152 	  && memcmp (hv1->classes, hv2->classes,
153 		     sizeof (enum reg_class) * hv1->num) == 0);
154 }
155 
156 /* Delete cost classes info V from the hash table.  */
157 inline void
remove(cost_classes * v)158 cost_classes_hasher::remove (cost_classes *v)
159 {
160   ira_free (v);
161 }
162 
163 /* Hash table of unique cost classes.  */
164 static hash_table<cost_classes_hasher> *cost_classes_htab;
165 
166 /* Map allocno class -> cost classes for pseudo of given allocno
167    class.  */
168 static cost_classes_t cost_classes_aclass_cache[N_REG_CLASSES];
169 
170 /* Map mode -> cost classes for pseudo of give mode.  */
171 static cost_classes_t cost_classes_mode_cache[MAX_MACHINE_MODE];
172 
173 /* Cost classes that include all classes in ira_important_classes.  */
174 static cost_classes all_cost_classes;
175 
176 /* Use the array of classes in CLASSES_PTR to fill out the rest of
177    the structure.  */
178 static void
complete_cost_classes(cost_classes_t classes_ptr)179 complete_cost_classes (cost_classes_t classes_ptr)
180 {
181   for (int i = 0; i < N_REG_CLASSES; i++)
182     classes_ptr->index[i] = -1;
183   for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
184     classes_ptr->hard_regno_index[i] = -1;
185   for (int i = 0; i < classes_ptr->num; i++)
186     {
187       enum reg_class cl = classes_ptr->classes[i];
188       classes_ptr->index[cl] = i;
189       for (int j = ira_class_hard_regs_num[cl] - 1; j >= 0; j--)
190 	{
191 	  unsigned int hard_regno = ira_class_hard_regs[cl][j];
192 	  if (classes_ptr->hard_regno_index[hard_regno] < 0)
193 	    classes_ptr->hard_regno_index[hard_regno] = i;
194 	}
195     }
196 }
197 
198 /* Initialize info about the cost classes for each pseudo.  */
199 static void
initiate_regno_cost_classes(void)200 initiate_regno_cost_classes (void)
201 {
202   int size = sizeof (cost_classes_t) * max_reg_num ();
203 
204   regno_cost_classes = (cost_classes_t *) ira_allocate (size);
205   memset (regno_cost_classes, 0, size);
206   memset (cost_classes_aclass_cache, 0,
207 	  sizeof (cost_classes_t) * N_REG_CLASSES);
208   memset (cost_classes_mode_cache, 0,
209 	  sizeof (cost_classes_t) * MAX_MACHINE_MODE);
210   cost_classes_htab = new hash_table<cost_classes_hasher> (200);
211   all_cost_classes.num = ira_important_classes_num;
212   for (int i = 0; i < ira_important_classes_num; i++)
213     all_cost_classes.classes[i] = ira_important_classes[i];
214   complete_cost_classes (&all_cost_classes);
215 }
216 
217 /* Create new cost classes from cost classes FROM and set up members
218    index and hard_regno_index.  Return the new classes.  The function
219    implements some common code of two functions
220    setup_regno_cost_classes_by_aclass and
221    setup_regno_cost_classes_by_mode.  */
222 static cost_classes_t
setup_cost_classes(cost_classes_t from)223 setup_cost_classes (cost_classes_t from)
224 {
225   cost_classes_t classes_ptr;
226 
227   classes_ptr = (cost_classes_t) ira_allocate (sizeof (struct cost_classes));
228   classes_ptr->num = from->num;
229   for (int i = 0; i < from->num; i++)
230     classes_ptr->classes[i] = from->classes[i];
231   complete_cost_classes (classes_ptr);
232   return classes_ptr;
233 }
234 
235 /* Return a version of FULL that only considers registers in REGS that are
236    valid for mode MODE.  Both FULL and the returned class are globally
237    allocated.  */
238 static cost_classes_t
restrict_cost_classes(cost_classes_t full,machine_mode mode,const HARD_REG_SET & regs)239 restrict_cost_classes (cost_classes_t full, machine_mode mode,
240 		       const HARD_REG_SET &regs)
241 {
242   static struct cost_classes narrow;
243   int map[N_REG_CLASSES];
244   narrow.num = 0;
245   for (int i = 0; i < full->num; i++)
246     {
247       /* Assume that we'll drop the class.  */
248       map[i] = -1;
249 
250       /* Ignore classes that are too small for the mode.  */
251       enum reg_class cl = full->classes[i];
252       if (!contains_reg_of_mode[cl][mode])
253 	continue;
254 
255       /* Calculate the set of registers in CL that belong to REGS and
256 	 are valid for MODE.  */
257       HARD_REG_SET valid_for_cl;
258       COPY_HARD_REG_SET (valid_for_cl, reg_class_contents[cl]);
259       AND_HARD_REG_SET (valid_for_cl, regs);
260       AND_COMPL_HARD_REG_SET (valid_for_cl,
261 			      ira_prohibited_class_mode_regs[cl][mode]);
262       AND_COMPL_HARD_REG_SET (valid_for_cl, ira_no_alloc_regs);
263       if (hard_reg_set_empty_p (valid_for_cl))
264 	continue;
265 
266       /* Don't use this class if the set of valid registers is a subset
267 	 of an existing class.  For example, suppose we have two classes
268 	 GR_REGS and FR_REGS and a union class GR_AND_FR_REGS.  Suppose
269 	 that the mode changes allowed by FR_REGS are not as general as
270 	 the mode changes allowed by GR_REGS.
271 
272 	 In this situation, the mode changes for GR_AND_FR_REGS could
273 	 either be seen as the union or the intersection of the mode
274 	 changes allowed by the two subclasses.  The justification for
275 	 the union-based definition would be that, if you want a mode
276 	 change that's only allowed by GR_REGS, you can pick a register
277 	 from the GR_REGS subclass.  The justification for the
278 	 intersection-based definition would be that every register
279 	 from the class would allow the mode change.
280 
281 	 However, if we have a register that needs to be in GR_REGS,
282 	 using GR_AND_FR_REGS with the intersection-based definition
283 	 would be too pessimistic, since it would bring in restrictions
284 	 that only apply to FR_REGS.  Conversely, if we have a register
285 	 that needs to be in FR_REGS, using GR_AND_FR_REGS with the
286 	 union-based definition would lose the extra restrictions
287 	 placed on FR_REGS.  GR_AND_FR_REGS is therefore only useful
288 	 for cases where GR_REGS and FP_REGS are both valid.  */
289       int pos;
290       for (pos = 0; pos < narrow.num; ++pos)
291 	{
292 	  enum reg_class cl2 = narrow.classes[pos];
293 	  if (hard_reg_set_subset_p (valid_for_cl, reg_class_contents[cl2]))
294 	    break;
295 	}
296       map[i] = pos;
297       if (pos == narrow.num)
298 	{
299 	  /* If several classes are equivalent, prefer to use the one
300 	     that was chosen as the allocno class.  */
301 	  enum reg_class cl2 = ira_allocno_class_translate[cl];
302 	  if (ira_class_hard_regs_num[cl] == ira_class_hard_regs_num[cl2])
303 	    cl = cl2;
304 	  narrow.classes[narrow.num++] = cl;
305 	}
306     }
307   if (narrow.num == full->num)
308     return full;
309 
310   cost_classes **slot = cost_classes_htab->find_slot (&narrow, INSERT);
311   if (*slot == NULL)
312     {
313       cost_classes_t classes = setup_cost_classes (&narrow);
314       /* Map equivalent classes to the representative that we chose above.  */
315       for (int i = 0; i < ira_important_classes_num; i++)
316 	{
317 	  enum reg_class cl = ira_important_classes[i];
318 	  int index = full->index[cl];
319 	  if (index >= 0)
320 	    classes->index[cl] = map[index];
321 	}
322       *slot = classes;
323     }
324   return *slot;
325 }
326 
327 /* Setup cost classes for pseudo REGNO whose allocno class is ACLASS.
328    This function is used when we know an initial approximation of
329    allocno class of the pseudo already, e.g. on the second iteration
330    of class cost calculation or after class cost calculation in
331    register-pressure sensitive insn scheduling or register-pressure
332    sensitive loop-invariant motion.  */
333 static void
setup_regno_cost_classes_by_aclass(int regno,enum reg_class aclass)334 setup_regno_cost_classes_by_aclass (int regno, enum reg_class aclass)
335 {
336   static struct cost_classes classes;
337   cost_classes_t classes_ptr;
338   enum reg_class cl;
339   int i;
340   cost_classes **slot;
341   HARD_REG_SET temp, temp2;
342   bool exclude_p;
343 
344   if ((classes_ptr = cost_classes_aclass_cache[aclass]) == NULL)
345     {
346       COPY_HARD_REG_SET (temp, reg_class_contents[aclass]);
347       AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
348       /* We exclude classes from consideration which are subsets of
349 	 ACLASS only if ACLASS is an uniform class.  */
350       exclude_p = ira_uniform_class_p[aclass];
351       classes.num = 0;
352       for (i = 0; i < ira_important_classes_num; i++)
353 	{
354 	  cl = ira_important_classes[i];
355 	  if (exclude_p)
356 	    {
357 	      /* Exclude non-uniform classes which are subsets of
358 		 ACLASS.  */
359 	      COPY_HARD_REG_SET (temp2, reg_class_contents[cl]);
360 	      AND_COMPL_HARD_REG_SET (temp2, ira_no_alloc_regs);
361 	      if (hard_reg_set_subset_p (temp2, temp) && cl != aclass)
362 		continue;
363 	    }
364 	  classes.classes[classes.num++] = cl;
365 	}
366       slot = cost_classes_htab->find_slot (&classes, INSERT);
367       if (*slot == NULL)
368 	{
369 	  classes_ptr = setup_cost_classes (&classes);
370 	  *slot = classes_ptr;
371 	}
372       classes_ptr = cost_classes_aclass_cache[aclass] = (cost_classes_t) *slot;
373     }
374   if (regno_reg_rtx[regno] != NULL_RTX)
375     {
376       /* Restrict the classes to those that are valid for REGNO's mode
377 	 (which might for example exclude singleton classes if the mode
378 	 requires two registers).  Also restrict the classes to those that
379 	 are valid for subregs of REGNO.  */
380       const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno);
381       if (!valid_regs)
382 	valid_regs = &reg_class_contents[ALL_REGS];
383       classes_ptr = restrict_cost_classes (classes_ptr,
384 					   PSEUDO_REGNO_MODE (regno),
385 					   *valid_regs);
386     }
387   regno_cost_classes[regno] = classes_ptr;
388 }
389 
390 /* Setup cost classes for pseudo REGNO with MODE.  Usage of MODE can
391    decrease number of cost classes for the pseudo, if hard registers
392    of some important classes can not hold a value of MODE.  So the
393    pseudo can not get hard register of some important classes and cost
394    calculation for such important classes is only wasting CPU
395    time.  */
396 static void
setup_regno_cost_classes_by_mode(int regno,machine_mode mode)397 setup_regno_cost_classes_by_mode (int regno, machine_mode mode)
398 {
399   if (const HARD_REG_SET *valid_regs = valid_mode_changes_for_regno (regno))
400     regno_cost_classes[regno] = restrict_cost_classes (&all_cost_classes,
401 						       mode, *valid_regs);
402   else
403     {
404       if (cost_classes_mode_cache[mode] == NULL)
405 	cost_classes_mode_cache[mode]
406 	  = restrict_cost_classes (&all_cost_classes, mode,
407 				   reg_class_contents[ALL_REGS]);
408       regno_cost_classes[regno] = cost_classes_mode_cache[mode];
409     }
410 }
411 
412 /* Finalize info about the cost classes for each pseudo.  */
413 static void
finish_regno_cost_classes(void)414 finish_regno_cost_classes (void)
415 {
416   ira_free (regno_cost_classes);
417   delete cost_classes_htab;
418   cost_classes_htab = NULL;
419 }
420 
421 
422 
423 /* Compute the cost of loading X into (if TO_P is TRUE) or from (if
424    TO_P is FALSE) a register of class RCLASS in mode MODE.  X must not
425    be a pseudo register.  */
426 static int
copy_cost(rtx x,machine_mode mode,reg_class_t rclass,bool to_p,secondary_reload_info * prev_sri)427 copy_cost (rtx x, machine_mode mode, reg_class_t rclass, bool to_p,
428 	   secondary_reload_info *prev_sri)
429 {
430   secondary_reload_info sri;
431   reg_class_t secondary_class = NO_REGS;
432 
433   /* If X is a SCRATCH, there is actually nothing to move since we are
434      assuming optimal allocation.  */
435   if (GET_CODE (x) == SCRATCH)
436     return 0;
437 
438   /* Get the class we will actually use for a reload.  */
439   rclass = targetm.preferred_reload_class (x, rclass);
440 
441   /* If we need a secondary reload for an intermediate, the cost is
442      that to load the input into the intermediate register, then to
443      copy it.  */
444   sri.prev_sri = prev_sri;
445   sri.extra_cost = 0;
446   /* PR 68770: Secondary reload might examine the t_icode field.  */
447   sri.t_icode = CODE_FOR_nothing;
448 
449   secondary_class = targetm.secondary_reload (to_p, x, rclass, mode, &sri);
450 
451   if (secondary_class != NO_REGS)
452     {
453       ira_init_register_move_cost_if_necessary (mode);
454       return (ira_register_move_cost[mode][(int) secondary_class][(int) rclass]
455 	      + sri.extra_cost
456 	      + copy_cost (x, mode, secondary_class, to_p, &sri));
457     }
458 
459   /* For memory, use the memory move cost, for (hard) registers, use
460      the cost to move between the register classes, and use 2 for
461      everything else (constants).  */
462   if (MEM_P (x) || rclass == NO_REGS)
463     return sri.extra_cost
464 	   + ira_memory_move_cost[mode][(int) rclass][to_p != 0];
465   else if (REG_P (x))
466     {
467       reg_class_t x_class = REGNO_REG_CLASS (REGNO (x));
468 
469       ira_init_register_move_cost_if_necessary (mode);
470       return (sri.extra_cost
471 	      + ira_register_move_cost[mode][(int) x_class][(int) rclass]);
472     }
473   else
474     /* If this is a constant, we may eventually want to call rtx_cost
475        here.  */
476     return sri.extra_cost + COSTS_N_INSNS (1);
477 }
478 
479 
480 
481 /* Record the cost of using memory or hard registers of various
482    classes for the operands in INSN.
483 
484    N_ALTS is the number of alternatives.
485    N_OPS is the number of operands.
486    OPS is an array of the operands.
487    MODES are the modes of the operands, in case any are VOIDmode.
488    CONSTRAINTS are the constraints to use for the operands.  This array
489    is modified by this procedure.
490 
491    This procedure works alternative by alternative.  For each
492    alternative we assume that we will be able to allocate all allocnos
493    to their ideal register class and calculate the cost of using that
494    alternative.  Then we compute, for each operand that is a
495    pseudo-register, the cost of having the allocno allocated to each
496    register class and using it in that alternative.  To this cost is
497    added the cost of the alternative.
498 
499    The cost of each class for this insn is its lowest cost among all
500    the alternatives.  */
501 static void
record_reg_classes(int n_alts,int n_ops,rtx * ops,machine_mode * modes,const char ** constraints,rtx_insn * insn,enum reg_class * pref)502 record_reg_classes (int n_alts, int n_ops, rtx *ops,
503 		    machine_mode *modes, const char **constraints,
504 		    rtx_insn *insn, enum reg_class *pref)
505 {
506   int alt;
507   int i, j, k;
508   int insn_allows_mem[MAX_RECOG_OPERANDS];
509   move_table *move_in_cost, *move_out_cost;
510   short (*mem_cost)[2];
511 
512   for (i = 0; i < n_ops; i++)
513     insn_allows_mem[i] = 0;
514 
515   /* Process each alternative, each time minimizing an operand's cost
516      with the cost for each operand in that alternative.  */
517   alternative_mask preferred = get_preferred_alternatives (insn);
518   for (alt = 0; alt < n_alts; alt++)
519     {
520       enum reg_class classes[MAX_RECOG_OPERANDS];
521       int allows_mem[MAX_RECOG_OPERANDS];
522       enum reg_class rclass;
523       int alt_fail = 0;
524       int alt_cost = 0, op_cost_add;
525 
526       if (!TEST_BIT (preferred, alt))
527 	{
528 	  for (i = 0; i < recog_data.n_operands; i++)
529 	    constraints[i] = skip_alternative (constraints[i]);
530 
531 	  continue;
532 	}
533 
534       for (i = 0; i < n_ops; i++)
535 	{
536 	  unsigned char c;
537 	  const char *p = constraints[i];
538 	  rtx op = ops[i];
539 	  machine_mode mode = modes[i];
540 	  int allows_addr = 0;
541 	  int win = 0;
542 
543 	  /* Initially show we know nothing about the register class.  */
544 	  classes[i] = NO_REGS;
545 	  allows_mem[i] = 0;
546 
547 	  /* If this operand has no constraints at all, we can
548 	     conclude nothing about it since anything is valid.  */
549 	  if (*p == 0)
550 	    {
551 	      if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
552 		memset (this_op_costs[i], 0, struct_costs_size);
553 	      continue;
554 	    }
555 
556 	  /* If this alternative is only relevant when this operand
557 	     matches a previous operand, we do different things
558 	     depending on whether this operand is a allocno-reg or not.
559 	     We must process any modifiers for the operand before we
560 	     can make this test.  */
561 	  while (*p == '%' || *p == '=' || *p == '+' || *p == '&')
562 	    p++;
563 
564 	  if (p[0] >= '0' && p[0] <= '0' + i)
565 	    {
566 	      /* Copy class and whether memory is allowed from the
567 		 matching alternative.  Then perform any needed cost
568 		 computations and/or adjustments.  */
569 	      j = p[0] - '0';
570 	      classes[i] = classes[j];
571 	      allows_mem[i] = allows_mem[j];
572 	      if (allows_mem[i])
573 		insn_allows_mem[i] = 1;
574 
575 	      if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
576 		{
577 		  /* If this matches the other operand, we have no
578 		     added cost and we win.  */
579 		  if (rtx_equal_p (ops[j], op))
580 		    win = 1;
581 		  /* If we can put the other operand into a register,
582 		     add to the cost of this alternative the cost to
583 		     copy this operand to the register used for the
584 		     other operand.  */
585 		  else if (classes[j] != NO_REGS)
586 		    {
587 		      alt_cost += copy_cost (op, mode, classes[j], 1, NULL);
588 		      win = 1;
589 		    }
590 		}
591 	      else if (! REG_P (ops[j])
592 		       || REGNO (ops[j]) < FIRST_PSEUDO_REGISTER)
593 		{
594 		  /* This op is an allocno but the one it matches is
595 		     not.  */
596 
597 		  /* If we can't put the other operand into a
598 		     register, this alternative can't be used.  */
599 
600 		  if (classes[j] == NO_REGS)
601 		    alt_fail = 1;
602 		  /* Otherwise, add to the cost of this alternative
603 		     the cost to copy the other operand to the hard
604 		     register used for this operand.  */
605 		  else
606 		    alt_cost += copy_cost (ops[j], mode, classes[j], 1, NULL);
607 		}
608 	      else
609 		{
610 		  /* The costs of this operand are not the same as the
611 		     other operand since move costs are not symmetric.
612 		     Moreover, if we cannot tie them, this alternative
613 		     needs to do a copy, which is one insn.  */
614 		  struct costs *pp = this_op_costs[i];
615 		  int *pp_costs = pp->cost;
616 		  cost_classes_t cost_classes_ptr
617 		    = regno_cost_classes[REGNO (op)];
618 		  enum reg_class *cost_classes = cost_classes_ptr->classes;
619 		  bool in_p = recog_data.operand_type[i] != OP_OUT;
620 		  bool out_p = recog_data.operand_type[i] != OP_IN;
621 		  enum reg_class op_class = classes[i];
622 
623 		  ira_init_register_move_cost_if_necessary (mode);
624 		  if (! in_p)
625 		    {
626 		      ira_assert (out_p);
627 		      if (op_class == NO_REGS)
628 			{
629 			  mem_cost = ira_memory_move_cost[mode];
630 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
631 			    {
632 			      rclass = cost_classes[k];
633 			      pp_costs[k] = mem_cost[rclass][0] * frequency;
634 			    }
635 			}
636 		      else
637 			{
638 			  move_out_cost = ira_may_move_out_cost[mode];
639 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
640 			    {
641 			      rclass = cost_classes[k];
642 			      pp_costs[k]
643 				= move_out_cost[op_class][rclass] * frequency;
644 			    }
645 			}
646 		    }
647 		  else if (! out_p)
648 		    {
649 		      ira_assert (in_p);
650 		      if (op_class == NO_REGS)
651 			{
652 			  mem_cost = ira_memory_move_cost[mode];
653 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
654 			    {
655 			      rclass = cost_classes[k];
656 			      pp_costs[k] = mem_cost[rclass][1] * frequency;
657 			    }
658 			}
659 		      else
660 			{
661 			  move_in_cost = ira_may_move_in_cost[mode];
662 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
663 			    {
664 			      rclass = cost_classes[k];
665 			      pp_costs[k]
666 				= move_in_cost[rclass][op_class] * frequency;
667 			    }
668 			}
669 		    }
670 		  else
671 		    {
672 		      if (op_class == NO_REGS)
673 			{
674 			  mem_cost = ira_memory_move_cost[mode];
675 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
676 			    {
677 			      rclass = cost_classes[k];
678 			      pp_costs[k] = ((mem_cost[rclass][0]
679 					      + mem_cost[rclass][1])
680 					     * frequency);
681 			    }
682 			}
683 		      else
684 			{
685 			  move_in_cost = ira_may_move_in_cost[mode];
686 			  move_out_cost = ira_may_move_out_cost[mode];
687 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
688 			    {
689 			      rclass = cost_classes[k];
690 			      pp_costs[k] = ((move_in_cost[rclass][op_class]
691 					      + move_out_cost[op_class][rclass])
692 					     * frequency);
693 			    }
694 			}
695 		    }
696 
697 		  /* If the alternative actually allows memory, make
698 		     things a bit cheaper since we won't need an extra
699 		     insn to load it.  */
700 		  pp->mem_cost
701 		    = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
702 		       + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
703 		       - allows_mem[i]) * frequency;
704 
705 		  /* If we have assigned a class to this allocno in
706 		     our first pass, add a cost to this alternative
707 		     corresponding to what we would add if this
708 		     allocno were not in the appropriate class.  */
709 		  if (pref)
710 		    {
711 		      enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
712 
713 		      if (pref_class == NO_REGS)
714 			alt_cost
715 			  += ((out_p
716 			       ? ira_memory_move_cost[mode][op_class][0] : 0)
717 			      + (in_p
718 				 ? ira_memory_move_cost[mode][op_class][1]
719 				 : 0));
720 		      else if (ira_reg_class_intersect
721 			       [pref_class][op_class] == NO_REGS)
722 			alt_cost
723 			  += ira_register_move_cost[mode][pref_class][op_class];
724 		    }
725 		  if (REGNO (ops[i]) != REGNO (ops[j])
726 		      && ! find_reg_note (insn, REG_DEAD, op))
727 		    alt_cost += 2;
728 
729 		  p++;
730 		}
731 	    }
732 
733 	  /* Scan all the constraint letters.  See if the operand
734 	     matches any of the constraints.  Collect the valid
735 	     register classes and see if this operand accepts
736 	     memory.  */
737 	  while ((c = *p))
738 	    {
739 	      switch (c)
740 		{
741 		case '*':
742 		  /* Ignore the next letter for this pass.  */
743 		  c = *++p;
744 		  break;
745 
746 		case '^':
747 		  alt_cost += 2;
748 		  break;
749 
750 		case '?':
751 		  alt_cost += 2;
752 		  break;
753 
754 		case 'g':
755 		  if (MEM_P (op)
756 		      || (CONSTANT_P (op)
757 			  && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))))
758 		    win = 1;
759 		  insn_allows_mem[i] = allows_mem[i] = 1;
760 		  classes[i] = ira_reg_class_subunion[classes[i]][GENERAL_REGS];
761 		  break;
762 
763 		default:
764 		  enum constraint_num cn = lookup_constraint (p);
765 		  enum reg_class cl;
766 		  switch (get_constraint_type (cn))
767 		    {
768 		    case CT_REGISTER:
769 		      cl = reg_class_for_constraint (cn);
770 		      if (cl != NO_REGS)
771 			classes[i] = ira_reg_class_subunion[classes[i]][cl];
772 		      break;
773 
774 		    case CT_CONST_INT:
775 		      if (CONST_INT_P (op)
776 			  && insn_const_int_ok_for_constraint (INTVAL (op), cn))
777 			win = 1;
778 		      break;
779 
780 		    case CT_MEMORY:
781 		      /* Every MEM can be reloaded to fit.  */
782 		      insn_allows_mem[i] = allows_mem[i] = 1;
783 		      if (MEM_P (op))
784 			win = 1;
785 		      break;
786 
787 		    case CT_SPECIAL_MEMORY:
788 		      insn_allows_mem[i] = allows_mem[i] = 1;
789 		      if (MEM_P (op) && constraint_satisfied_p (op, cn))
790 			win = 1;
791 		      break;
792 
793 		    case CT_ADDRESS:
794 		      /* Every address can be reloaded to fit.  */
795 		      allows_addr = 1;
796 		      if (address_operand (op, GET_MODE (op))
797 			  || constraint_satisfied_p (op, cn))
798 			win = 1;
799 		      /* We know this operand is an address, so we
800 			 want it to be allocated to a hard register
801 			 that can be the base of an address,
802 			 i.e. BASE_REG_CLASS.  */
803 		      classes[i]
804 			= ira_reg_class_subunion[classes[i]]
805 			  [base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
806 					   ADDRESS, SCRATCH)];
807 		      break;
808 
809 		    case CT_FIXED_FORM:
810 		      if (constraint_satisfied_p (op, cn))
811 			win = 1;
812 		      break;
813 		    }
814 		  break;
815 		}
816 	      p += CONSTRAINT_LEN (c, p);
817 	      if (c == ',')
818 		break;
819 	    }
820 
821 	  constraints[i] = p;
822 
823 	  if (alt_fail)
824 	    break;
825 
826 	  /* How we account for this operand now depends on whether it
827 	     is a pseudo register or not.  If it is, we first check if
828 	     any register classes are valid.  If not, we ignore this
829 	     alternative, since we want to assume that all allocnos get
830 	     allocated for register preferencing.  If some register
831 	     class is valid, compute the costs of moving the allocno
832 	     into that class.  */
833 	  if (REG_P (op) && REGNO (op) >= FIRST_PSEUDO_REGISTER)
834 	    {
835 	      if (classes[i] == NO_REGS && ! allows_mem[i])
836 		{
837 		  /* We must always fail if the operand is a REG, but
838 		     we did not find a suitable class and memory is
839 		     not allowed.
840 
841 		     Otherwise we may perform an uninitialized read
842 		     from this_op_costs after the `continue' statement
843 		     below.  */
844 		  alt_fail = 1;
845 		}
846 	      else
847 		{
848 		  unsigned int regno = REGNO (op);
849 		  struct costs *pp = this_op_costs[i];
850 		  int *pp_costs = pp->cost;
851 		  cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
852 		  enum reg_class *cost_classes = cost_classes_ptr->classes;
853 		  bool in_p = recog_data.operand_type[i] != OP_OUT;
854 		  bool out_p = recog_data.operand_type[i] != OP_IN;
855 		  enum reg_class op_class = classes[i];
856 
857 		  ira_init_register_move_cost_if_necessary (mode);
858 		  if (! in_p)
859 		    {
860 		      ira_assert (out_p);
861 		      if (op_class == NO_REGS)
862 			{
863 			  mem_cost = ira_memory_move_cost[mode];
864 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
865 			    {
866 			      rclass = cost_classes[k];
867 			      pp_costs[k] = mem_cost[rclass][0] * frequency;
868 			    }
869 			}
870 		      else
871 			{
872 			  move_out_cost = ira_may_move_out_cost[mode];
873 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
874 			    {
875 			      rclass = cost_classes[k];
876 			      pp_costs[k]
877 				= move_out_cost[op_class][rclass] * frequency;
878 			    }
879 			}
880 		    }
881 		  else if (! out_p)
882 		    {
883 		      ira_assert (in_p);
884 		      if (op_class == NO_REGS)
885 			{
886 			  mem_cost = ira_memory_move_cost[mode];
887 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
888 			    {
889 			      rclass = cost_classes[k];
890 			      pp_costs[k] = mem_cost[rclass][1] * frequency;
891 			    }
892 			}
893 		      else
894 			{
895 			  move_in_cost = ira_may_move_in_cost[mode];
896 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
897 			    {
898 			      rclass = cost_classes[k];
899 			      pp_costs[k]
900 				= move_in_cost[rclass][op_class] * frequency;
901 			    }
902 			}
903 		    }
904 		  else
905 		    {
906 		      if (op_class == NO_REGS)
907 			{
908 			  mem_cost = ira_memory_move_cost[mode];
909 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
910 			    {
911 			      rclass = cost_classes[k];
912 			      pp_costs[k] = ((mem_cost[rclass][0]
913 					      + mem_cost[rclass][1])
914 					     * frequency);
915 			    }
916 			}
917 		      else
918 			{
919 			  move_in_cost = ira_may_move_in_cost[mode];
920 			  move_out_cost = ira_may_move_out_cost[mode];
921 			  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
922 			    {
923 			      rclass = cost_classes[k];
924 			      pp_costs[k] = ((move_in_cost[rclass][op_class]
925 					      + move_out_cost[op_class][rclass])
926 					     * frequency);
927 			    }
928 			}
929 		    }
930 
931 		  if (op_class == NO_REGS)
932 		    /* Although we don't need insn to reload from
933 		       memory, still accessing memory is usually more
934 		       expensive than a register.  */
935 		    pp->mem_cost = frequency;
936 		  else
937 		    /* If the alternative actually allows memory, make
938 		       things a bit cheaper since we won't need an
939 		       extra insn to load it.  */
940 		    pp->mem_cost
941 		      = ((out_p ? ira_memory_move_cost[mode][op_class][0] : 0)
942 			 + (in_p ? ira_memory_move_cost[mode][op_class][1] : 0)
943 			 - allows_mem[i]) * frequency;
944 		  /* If we have assigned a class to this allocno in
945 		     our first pass, add a cost to this alternative
946 		     corresponding to what we would add if this
947 		     allocno were not in the appropriate class.  */
948 		  if (pref)
949 		    {
950 		      enum reg_class pref_class = pref[COST_INDEX (REGNO (op))];
951 
952 		      if (pref_class == NO_REGS)
953 			{
954 			  if (op_class != NO_REGS)
955 			    alt_cost
956 			      += ((out_p
957 				   ? ira_memory_move_cost[mode][op_class][0]
958 				   : 0)
959 				  + (in_p
960 				     ? ira_memory_move_cost[mode][op_class][1]
961 				     : 0));
962 			}
963 		      else if (op_class == NO_REGS)
964 			alt_cost
965 			  += ((out_p
966 			       ? ira_memory_move_cost[mode][pref_class][1]
967 			       : 0)
968 			      + (in_p
969 				 ? ira_memory_move_cost[mode][pref_class][0]
970 				 : 0));
971 		      else if (ira_reg_class_intersect[pref_class][op_class]
972 			       == NO_REGS)
973 			alt_cost += (ira_register_move_cost
974 				     [mode][pref_class][op_class]);
975 		    }
976 		}
977 	    }
978 
979 	  /* Otherwise, if this alternative wins, either because we
980 	     have already determined that or if we have a hard
981 	     register of the proper class, there is no cost for this
982 	     alternative.  */
983 	  else if (win || (REG_P (op)
984 			   && reg_fits_class_p (op, classes[i],
985 						0, GET_MODE (op))))
986 	    ;
987 
988 	  /* If registers are valid, the cost of this alternative
989 	     includes copying the object to and/or from a
990 	     register.  */
991 	  else if (classes[i] != NO_REGS)
992 	    {
993 	      if (recog_data.operand_type[i] != OP_OUT)
994 		alt_cost += copy_cost (op, mode, classes[i], 1, NULL);
995 
996 	      if (recog_data.operand_type[i] != OP_IN)
997 		alt_cost += copy_cost (op, mode, classes[i], 0, NULL);
998 	    }
999 	  /* The only other way this alternative can be used is if
1000 	     this is a constant that could be placed into memory.  */
1001 	  else if (CONSTANT_P (op) && (allows_addr || allows_mem[i]))
1002 	    alt_cost += ira_memory_move_cost[mode][classes[i]][1];
1003 	  else
1004 	    alt_fail = 1;
1005 
1006 	  if (alt_fail)
1007 	    break;
1008 	}
1009 
1010       if (alt_fail)
1011 	{
1012 	  /* The loop above might have exited early once the failure
1013 	     was seen.  Skip over the constraints for the remaining
1014 	     operands.  */
1015 	  i += 1;
1016 	  for (; i < n_ops; ++i)
1017 	    constraints[i] = skip_alternative (constraints[i]);
1018 	  continue;
1019 	}
1020 
1021       op_cost_add = alt_cost * frequency;
1022       /* Finally, update the costs with the information we've
1023 	 calculated about this alternative.  */
1024       for (i = 0; i < n_ops; i++)
1025 	if (REG_P (ops[i]) && REGNO (ops[i]) >= FIRST_PSEUDO_REGISTER)
1026 	  {
1027 	    struct costs *pp = op_costs[i], *qq = this_op_costs[i];
1028 	    int *pp_costs = pp->cost, *qq_costs = qq->cost;
1029 	    int scale = 1 + (recog_data.operand_type[i] == OP_INOUT);
1030 	    cost_classes_t cost_classes_ptr
1031 	      = regno_cost_classes[REGNO (ops[i])];
1032 
1033 	    pp->mem_cost = MIN (pp->mem_cost,
1034 				(qq->mem_cost + op_cost_add) * scale);
1035 
1036 	    for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1037 	      pp_costs[k]
1038 		= MIN (pp_costs[k], (qq_costs[k] + op_cost_add) * scale);
1039 	  }
1040     }
1041 
1042   if (allocno_p)
1043     for (i = 0; i < n_ops; i++)
1044       {
1045 	ira_allocno_t a;
1046 	rtx op = ops[i];
1047 
1048 	if (! REG_P (op) || REGNO (op) < FIRST_PSEUDO_REGISTER)
1049 	  continue;
1050 	a = ira_curr_regno_allocno_map [REGNO (op)];
1051 	if (! ALLOCNO_BAD_SPILL_P (a) && insn_allows_mem[i] == 0)
1052 	  ALLOCNO_BAD_SPILL_P (a) = true;
1053       }
1054 
1055 }
1056 
1057 
1058 
1059 /* Wrapper around REGNO_OK_FOR_INDEX_P, to allow pseudo registers.  */
1060 static inline bool
ok_for_index_p_nonstrict(rtx reg)1061 ok_for_index_p_nonstrict (rtx reg)
1062 {
1063   unsigned regno = REGNO (reg);
1064 
1065   return regno >= FIRST_PSEUDO_REGISTER || REGNO_OK_FOR_INDEX_P (regno);
1066 }
1067 
1068 /* A version of regno_ok_for_base_p for use here, when all
1069    pseudo-registers should count as OK.  Arguments as for
1070    regno_ok_for_base_p.  */
1071 static inline bool
ok_for_base_p_nonstrict(rtx reg,machine_mode mode,addr_space_t as,enum rtx_code outer_code,enum rtx_code index_code)1072 ok_for_base_p_nonstrict (rtx reg, machine_mode mode, addr_space_t as,
1073 			 enum rtx_code outer_code, enum rtx_code index_code)
1074 {
1075   unsigned regno = REGNO (reg);
1076 
1077   if (regno >= FIRST_PSEUDO_REGISTER)
1078     return true;
1079   return ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
1080 }
1081 
1082 /* Record the pseudo registers we must reload into hard registers in a
1083    subexpression of a memory address, X.
1084 
1085    If CONTEXT is 0, we are looking at the base part of an address,
1086    otherwise we are looking at the index part.
1087 
1088    MODE and AS are the mode and address space of the memory reference;
1089    OUTER_CODE and INDEX_CODE give the context that the rtx appears in.
1090    These four arguments are passed down to base_reg_class.
1091 
1092    SCALE is twice the amount to multiply the cost by (it is twice so
1093    we can represent half-cost adjustments).  */
1094 static void
record_address_regs(machine_mode mode,addr_space_t as,rtx x,int context,enum rtx_code outer_code,enum rtx_code index_code,int scale)1095 record_address_regs (machine_mode mode, addr_space_t as, rtx x,
1096 		     int context, enum rtx_code outer_code,
1097 		     enum rtx_code index_code, int scale)
1098 {
1099   enum rtx_code code = GET_CODE (x);
1100   enum reg_class rclass;
1101 
1102   if (context == 1)
1103     rclass = INDEX_REG_CLASS;
1104   else
1105     rclass = base_reg_class (mode, as, outer_code, index_code);
1106 
1107   switch (code)
1108     {
1109     case CONST_INT:
1110     case CONST:
1111     case CC0:
1112     case PC:
1113     case SYMBOL_REF:
1114     case LABEL_REF:
1115       return;
1116 
1117     case PLUS:
1118       /* When we have an address that is a sum, we must determine
1119 	 whether registers are "base" or "index" regs.  If there is a
1120 	 sum of two registers, we must choose one to be the "base".
1121 	 Luckily, we can use the REG_POINTER to make a good choice
1122 	 most of the time.  We only need to do this on machines that
1123 	 can have two registers in an address and where the base and
1124 	 index register classes are different.
1125 
1126 	 ??? This code used to set REGNO_POINTER_FLAG in some cases,
1127 	 but that seems bogus since it should only be set when we are
1128 	 sure the register is being used as a pointer.  */
1129       {
1130 	rtx arg0 = XEXP (x, 0);
1131 	rtx arg1 = XEXP (x, 1);
1132 	enum rtx_code code0 = GET_CODE (arg0);
1133 	enum rtx_code code1 = GET_CODE (arg1);
1134 
1135 	/* Look inside subregs.  */
1136 	if (code0 == SUBREG)
1137 	  arg0 = SUBREG_REG (arg0), code0 = GET_CODE (arg0);
1138 	if (code1 == SUBREG)
1139 	  arg1 = SUBREG_REG (arg1), code1 = GET_CODE (arg1);
1140 
1141 	/* If index registers do not appear, or coincide with base registers,
1142 	   just record registers in any non-constant operands.  We
1143 	   assume here, as well as in the tests below, that all
1144 	   addresses are in canonical form.  */
1145 	if (MAX_REGS_PER_ADDRESS == 1
1146 	    || INDEX_REG_CLASS == base_reg_class (VOIDmode, as, PLUS, SCRATCH))
1147 	  {
1148 	    record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1149 	    if (! CONSTANT_P (arg1))
1150 	      record_address_regs (mode, as, arg1, context, PLUS, code0, scale);
1151 	  }
1152 
1153 	/* If the second operand is a constant integer, it doesn't
1154 	   change what class the first operand must be.  */
1155 	else if (CONST_SCALAR_INT_P (arg1))
1156 	  record_address_regs (mode, as, arg0, context, PLUS, code1, scale);
1157 	/* If the second operand is a symbolic constant, the first
1158 	   operand must be an index register.  */
1159 	else if (code1 == SYMBOL_REF || code1 == CONST || code1 == LABEL_REF)
1160 	  record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1161 	/* If both operands are registers but one is already a hard
1162 	   register of index or reg-base class, give the other the
1163 	   class that the hard register is not.  */
1164 	else if (code0 == REG && code1 == REG
1165 		 && REGNO (arg0) < FIRST_PSEUDO_REGISTER
1166 		 && (ok_for_base_p_nonstrict (arg0, mode, as, PLUS, REG)
1167 		     || ok_for_index_p_nonstrict (arg0)))
1168 	  record_address_regs (mode, as, arg1,
1169 			       ok_for_base_p_nonstrict (arg0, mode, as,
1170 							PLUS, REG) ? 1 : 0,
1171 			       PLUS, REG, scale);
1172 	else if (code0 == REG && code1 == REG
1173 		 && REGNO (arg1) < FIRST_PSEUDO_REGISTER
1174 		 && (ok_for_base_p_nonstrict (arg1, mode, as, PLUS, REG)
1175 		     || ok_for_index_p_nonstrict (arg1)))
1176 	  record_address_regs (mode, as, arg0,
1177 			       ok_for_base_p_nonstrict (arg1, mode, as,
1178 							PLUS, REG) ? 1 : 0,
1179 			       PLUS, REG, scale);
1180 	/* If one operand is known to be a pointer, it must be the
1181 	   base with the other operand the index.  Likewise if the
1182 	   other operand is a MULT.  */
1183 	else if ((code0 == REG && REG_POINTER (arg0)) || code1 == MULT)
1184 	  {
1185 	    record_address_regs (mode, as, arg0, 0, PLUS, code1, scale);
1186 	    record_address_regs (mode, as, arg1, 1, PLUS, code0, scale);
1187 	  }
1188 	else if ((code1 == REG && REG_POINTER (arg1)) || code0 == MULT)
1189 	  {
1190 	    record_address_regs (mode, as, arg0, 1, PLUS, code1, scale);
1191 	    record_address_regs (mode, as, arg1, 0, PLUS, code0, scale);
1192 	  }
1193 	/* Otherwise, count equal chances that each might be a base or
1194 	   index register.  This case should be rare.  */
1195 	else
1196 	  {
1197 	    record_address_regs (mode, as, arg0, 0, PLUS, code1, scale / 2);
1198 	    record_address_regs (mode, as, arg0, 1, PLUS, code1, scale / 2);
1199 	    record_address_regs (mode, as, arg1, 0, PLUS, code0, scale / 2);
1200 	    record_address_regs (mode, as, arg1, 1, PLUS, code0, scale / 2);
1201 	  }
1202       }
1203       break;
1204 
1205       /* Double the importance of an allocno that is incremented or
1206 	 decremented, since it would take two extra insns if it ends
1207 	 up in the wrong place.  */
1208     case POST_MODIFY:
1209     case PRE_MODIFY:
1210       record_address_regs (mode, as, XEXP (x, 0), 0, code,
1211 			   GET_CODE (XEXP (XEXP (x, 1), 1)), 2 * scale);
1212       if (REG_P (XEXP (XEXP (x, 1), 1)))
1213 	record_address_regs (mode, as, XEXP (XEXP (x, 1), 1), 1, code, REG,
1214 			     2 * scale);
1215       break;
1216 
1217     case POST_INC:
1218     case PRE_INC:
1219     case POST_DEC:
1220     case PRE_DEC:
1221       /* Double the importance of an allocno that is incremented or
1222 	 decremented, since it would take two extra insns if it ends
1223 	 up in the wrong place.  */
1224       record_address_regs (mode, as, XEXP (x, 0), 0, code, SCRATCH, 2 * scale);
1225       break;
1226 
1227     case REG:
1228       {
1229 	struct costs *pp;
1230 	int *pp_costs;
1231 	enum reg_class i;
1232 	int k, regno, add_cost;
1233 	cost_classes_t cost_classes_ptr;
1234 	enum reg_class *cost_classes;
1235 	move_table *move_in_cost;
1236 
1237 	if (REGNO (x) < FIRST_PSEUDO_REGISTER)
1238 	  break;
1239 
1240 	regno = REGNO (x);
1241 	if (allocno_p)
1242 	  ALLOCNO_BAD_SPILL_P (ira_curr_regno_allocno_map[regno]) = true;
1243 	pp = COSTS (costs, COST_INDEX (regno));
1244 	add_cost = (ira_memory_move_cost[Pmode][rclass][1] * scale) / 2;
1245 	if (INT_MAX - add_cost < pp->mem_cost)
1246 	  pp->mem_cost = INT_MAX;
1247 	else
1248 	  pp->mem_cost += add_cost;
1249 	cost_classes_ptr = regno_cost_classes[regno];
1250 	cost_classes = cost_classes_ptr->classes;
1251 	pp_costs = pp->cost;
1252 	ira_init_register_move_cost_if_necessary (Pmode);
1253 	move_in_cost = ira_may_move_in_cost[Pmode];
1254 	for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1255 	  {
1256 	    i = cost_classes[k];
1257 	    add_cost = (move_in_cost[i][rclass] * scale) / 2;
1258 	    if (INT_MAX - add_cost < pp_costs[k])
1259 	      pp_costs[k] = INT_MAX;
1260 	    else
1261 	      pp_costs[k] += add_cost;
1262 	  }
1263       }
1264       break;
1265 
1266     default:
1267       {
1268 	const char *fmt = GET_RTX_FORMAT (code);
1269 	int i;
1270 	for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1271 	  if (fmt[i] == 'e')
1272 	    record_address_regs (mode, as, XEXP (x, i), context, code, SCRATCH,
1273 				 scale);
1274       }
1275     }
1276 }
1277 
1278 
1279 
1280 /* Calculate the costs of insn operands.  */
1281 static void
record_operand_costs(rtx_insn * insn,enum reg_class * pref)1282 record_operand_costs (rtx_insn *insn, enum reg_class *pref)
1283 {
1284   const char *constraints[MAX_RECOG_OPERANDS];
1285   machine_mode modes[MAX_RECOG_OPERANDS];
1286   rtx ops[MAX_RECOG_OPERANDS];
1287   rtx set;
1288   int i;
1289 
1290   for (i = 0; i < recog_data.n_operands; i++)
1291     {
1292       constraints[i] = recog_data.constraints[i];
1293       modes[i] = recog_data.operand_mode[i];
1294     }
1295 
1296   /* If we get here, we are set up to record the costs of all the
1297      operands for this insn.  Start by initializing the costs.  Then
1298      handle any address registers.  Finally record the desired classes
1299      for any allocnos, doing it twice if some pair of operands are
1300      commutative.  */
1301   for (i = 0; i < recog_data.n_operands; i++)
1302     {
1303       memcpy (op_costs[i], init_cost, struct_costs_size);
1304 
1305       ops[i] = recog_data.operand[i];
1306       if (GET_CODE (recog_data.operand[i]) == SUBREG)
1307 	recog_data.operand[i] = SUBREG_REG (recog_data.operand[i]);
1308 
1309       if (MEM_P (recog_data.operand[i]))
1310 	record_address_regs (GET_MODE (recog_data.operand[i]),
1311 			     MEM_ADDR_SPACE (recog_data.operand[i]),
1312 			     XEXP (recog_data.operand[i], 0),
1313 			     0, MEM, SCRATCH, frequency * 2);
1314       else if (constraints[i][0] == 'p'
1315 	       || (insn_extra_address_constraint
1316 		   (lookup_constraint (constraints[i]))))
1317 	record_address_regs (VOIDmode, ADDR_SPACE_GENERIC,
1318 			     recog_data.operand[i], 0, ADDRESS, SCRATCH,
1319 			     frequency * 2);
1320     }
1321 
1322   /* Check for commutative in a separate loop so everything will have
1323      been initialized.  We must do this even if one operand is a
1324      constant--see addsi3 in m68k.md.  */
1325   for (i = 0; i < (int) recog_data.n_operands - 1; i++)
1326     if (constraints[i][0] == '%')
1327       {
1328 	const char *xconstraints[MAX_RECOG_OPERANDS];
1329 	int j;
1330 
1331 	/* Handle commutative operands by swapping the constraints.
1332 	   We assume the modes are the same.  */
1333 	for (j = 0; j < recog_data.n_operands; j++)
1334 	  xconstraints[j] = constraints[j];
1335 
1336 	xconstraints[i] = constraints[i+1];
1337 	xconstraints[i+1] = constraints[i];
1338 	record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1339 			    recog_data.operand, modes,
1340 			    xconstraints, insn, pref);
1341       }
1342   record_reg_classes (recog_data.n_alternatives, recog_data.n_operands,
1343 		      recog_data.operand, modes,
1344 		      constraints, insn, pref);
1345 
1346   /* If this insn is a single set copying operand 1 to operand 0 and
1347      one operand is an allocno with the other a hard reg or an allocno
1348      that prefers a hard register that is in its own register class
1349      then we may want to adjust the cost of that register class to -1.
1350 
1351      Avoid the adjustment if the source does not die to avoid
1352      stressing of register allocator by preferencing two colliding
1353      registers into single class.
1354 
1355      Also avoid the adjustment if a copy between hard registers of the
1356      class is expensive (ten times the cost of a default copy is
1357      considered arbitrarily expensive).  This avoids losing when the
1358      preferred class is very expensive as the source of a copy
1359      instruction.  */
1360   if ((set = single_set (insn)) != NULL_RTX
1361       /* In rare cases the single set insn might have less 2 operands
1362 	 as the source can be a fixed special reg.  */
1363       && recog_data.n_operands > 1
1364       && ops[0] == SET_DEST (set) && ops[1] == SET_SRC (set))
1365     {
1366       int regno, other_regno;
1367       rtx dest = SET_DEST (set);
1368       rtx src = SET_SRC (set);
1369 
1370       if (GET_CODE (dest) == SUBREG
1371 	  && known_eq (GET_MODE_SIZE (GET_MODE (dest)),
1372 		       GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))))
1373 	dest = SUBREG_REG (dest);
1374       if (GET_CODE (src) == SUBREG
1375 	  && known_eq (GET_MODE_SIZE (GET_MODE (src)),
1376 		       GET_MODE_SIZE (GET_MODE (SUBREG_REG (src)))))
1377 	src = SUBREG_REG (src);
1378       if (REG_P (src) && REG_P (dest)
1379 	  && find_regno_note (insn, REG_DEAD, REGNO (src))
1380 	  && (((regno = REGNO (src)) >= FIRST_PSEUDO_REGISTER
1381 	       && (other_regno = REGNO (dest)) < FIRST_PSEUDO_REGISTER)
1382 	      || ((regno = REGNO (dest)) >= FIRST_PSEUDO_REGISTER
1383 		  && (other_regno = REGNO (src)) < FIRST_PSEUDO_REGISTER)))
1384 	{
1385 	  machine_mode mode = GET_MODE (src);
1386 	  cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1387 	  enum reg_class *cost_classes = cost_classes_ptr->classes;
1388 	  reg_class_t rclass;
1389 	  int k;
1390 
1391 	  i = regno == (int) REGNO (src) ? 1 : 0;
1392 	  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1393 	    {
1394 	      rclass = cost_classes[k];
1395 	      if (TEST_HARD_REG_BIT (reg_class_contents[rclass], other_regno)
1396 		  && (reg_class_size[(int) rclass]
1397 		      == ira_reg_class_max_nregs [(int) rclass][(int) mode]))
1398 		{
1399 		  if (reg_class_size[rclass] == 1)
1400 		    op_costs[i]->cost[k] = -frequency;
1401 		  else if (in_hard_reg_set_p (reg_class_contents[rclass],
1402 					      mode, other_regno))
1403 		    op_costs[i]->cost[k] = -frequency;
1404 		}
1405 	    }
1406 	}
1407     }
1408 }
1409 
1410 
1411 
1412 /* Process one insn INSN.  Scan it and record each time it would save
1413    code to put a certain allocnos in a certain class.  Return the last
1414    insn processed, so that the scan can be continued from there.  */
1415 static rtx_insn *
scan_one_insn(rtx_insn * insn)1416 scan_one_insn (rtx_insn *insn)
1417 {
1418   enum rtx_code pat_code;
1419   rtx set, note;
1420   int i, k;
1421   bool counted_mem;
1422 
1423   if (!NONDEBUG_INSN_P (insn))
1424     return insn;
1425 
1426   pat_code = GET_CODE (PATTERN (insn));
1427   if (pat_code == ASM_INPUT)
1428     return insn;
1429 
1430   /* If INSN is a USE/CLOBBER of a pseudo in a mode M then go ahead
1431      and initialize the register move costs of mode M.
1432 
1433      The pseudo may be related to another pseudo via a copy (implicit or
1434      explicit) and if there are no mode M uses/sets of the original
1435      pseudo, then we may leave the register move costs uninitialized for
1436      mode M. */
1437   if (pat_code == USE || pat_code == CLOBBER)
1438     {
1439       rtx x = XEXP (PATTERN (insn), 0);
1440       if (GET_CODE (x) == REG
1441 	  && REGNO (x) >= FIRST_PSEUDO_REGISTER
1442 	  && have_regs_of_mode[GET_MODE (x)])
1443         ira_init_register_move_cost_if_necessary (GET_MODE (x));
1444       return insn;
1445     }
1446 
1447   counted_mem = false;
1448   set = single_set (insn);
1449   extract_insn (insn);
1450 
1451   /* If this insn loads a parameter from its stack slot, then it
1452      represents a savings, rather than a cost, if the parameter is
1453      stored in memory.  Record this fact.
1454 
1455      Similarly if we're loading other constants from memory (constant
1456      pool, TOC references, small data areas, etc) and this is the only
1457      assignment to the destination pseudo.
1458 
1459      Don't do this if SET_SRC (set) isn't a general operand, if it is
1460      a memory requiring special instructions to load it, decreasing
1461      mem_cost might result in it being loaded using the specialized
1462      instruction into a register, then stored into stack and loaded
1463      again from the stack.  See PR52208.
1464 
1465      Don't do this if SET_SRC (set) has side effect.  See PR56124.  */
1466   if (set != 0 && REG_P (SET_DEST (set)) && MEM_P (SET_SRC (set))
1467       && (note = find_reg_note (insn, REG_EQUIV, NULL_RTX)) != NULL_RTX
1468       && ((MEM_P (XEXP (note, 0))
1469 	   && !side_effects_p (SET_SRC (set)))
1470 	  || (CONSTANT_P (XEXP (note, 0))
1471 	      && targetm.legitimate_constant_p (GET_MODE (SET_DEST (set)),
1472 						XEXP (note, 0))
1473 	      && REG_N_SETS (REGNO (SET_DEST (set))) == 1))
1474       && general_operand (SET_SRC (set), GET_MODE (SET_SRC (set)))
1475       /* LRA does not use equiv with a symbol for PIC code.  */
1476       && (! ira_use_lra_p || ! pic_offset_table_rtx
1477 	  || ! contains_symbol_ref_p (XEXP (note, 0))))
1478     {
1479       enum reg_class cl = GENERAL_REGS;
1480       rtx reg = SET_DEST (set);
1481       int num = COST_INDEX (REGNO (reg));
1482 
1483       COSTS (costs, num)->mem_cost
1484 	-= ira_memory_move_cost[GET_MODE (reg)][cl][1] * frequency;
1485       record_address_regs (GET_MODE (SET_SRC (set)),
1486 			   MEM_ADDR_SPACE (SET_SRC (set)),
1487 			   XEXP (SET_SRC (set), 0), 0, MEM, SCRATCH,
1488 			   frequency * 2);
1489       counted_mem = true;
1490     }
1491 
1492   record_operand_costs (insn, pref);
1493 
1494   /* Now add the cost for each operand to the total costs for its
1495      allocno.  */
1496   for (i = 0; i < recog_data.n_operands; i++)
1497     if (REG_P (recog_data.operand[i])
1498 	&& REGNO (recog_data.operand[i]) >= FIRST_PSEUDO_REGISTER)
1499       {
1500 	int regno = REGNO (recog_data.operand[i]);
1501 	struct costs *p = COSTS (costs, COST_INDEX (regno));
1502 	struct costs *q = op_costs[i];
1503 	int *p_costs = p->cost, *q_costs = q->cost;
1504 	cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1505 	int add_cost;
1506 
1507 	/* If the already accounted for the memory "cost" above, don't
1508 	   do so again.  */
1509 	if (!counted_mem)
1510 	  {
1511 	    add_cost = q->mem_cost;
1512 	    if (add_cost > 0 && INT_MAX - add_cost < p->mem_cost)
1513 	      p->mem_cost = INT_MAX;
1514 	    else
1515 	      p->mem_cost += add_cost;
1516 	  }
1517 	for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1518 	  {
1519 	    add_cost = q_costs[k];
1520 	    if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1521 	      p_costs[k] = INT_MAX;
1522 	    else
1523 	      p_costs[k] += add_cost;
1524 	  }
1525       }
1526 
1527   return insn;
1528 }
1529 
1530 
1531 
1532 /* Print allocnos costs to file F.  */
1533 static void
print_allocno_costs(FILE * f)1534 print_allocno_costs (FILE *f)
1535 {
1536   int k;
1537   ira_allocno_t a;
1538   ira_allocno_iterator ai;
1539 
1540   ira_assert (allocno_p);
1541   fprintf (f, "\n");
1542   FOR_EACH_ALLOCNO (a, ai)
1543     {
1544       int i, rclass;
1545       basic_block bb;
1546       int regno = ALLOCNO_REGNO (a);
1547       cost_classes_t cost_classes_ptr = regno_cost_classes[regno];
1548       enum reg_class *cost_classes = cost_classes_ptr->classes;
1549 
1550       i = ALLOCNO_NUM (a);
1551       fprintf (f, "  a%d(r%d,", i, regno);
1552       if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1553 	fprintf (f, "b%d", bb->index);
1554       else
1555 	fprintf (f, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1556       fprintf (f, ") costs:");
1557       for (k = 0; k < cost_classes_ptr->num; k++)
1558 	{
1559 	  rclass = cost_classes[k];
1560 	  fprintf (f, " %s:%d", reg_class_names[rclass],
1561 		   COSTS (costs, i)->cost[k]);
1562 	  if (flag_ira_region == IRA_REGION_ALL
1563 	      || flag_ira_region == IRA_REGION_MIXED)
1564 	    fprintf (f, ",%d", COSTS (total_allocno_costs, i)->cost[k]);
1565 	}
1566       fprintf (f, " MEM:%i", COSTS (costs, i)->mem_cost);
1567       if (flag_ira_region == IRA_REGION_ALL
1568 	  || flag_ira_region == IRA_REGION_MIXED)
1569 	fprintf (f, ",%d", COSTS (total_allocno_costs, i)->mem_cost);
1570       fprintf (f, "\n");
1571     }
1572 }
1573 
1574 /* Print pseudo costs to file F.  */
1575 static void
print_pseudo_costs(FILE * f)1576 print_pseudo_costs (FILE *f)
1577 {
1578   int regno, k;
1579   int rclass;
1580   cost_classes_t cost_classes_ptr;
1581   enum reg_class *cost_classes;
1582 
1583   ira_assert (! allocno_p);
1584   fprintf (f, "\n");
1585   for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
1586     {
1587       if (REG_N_REFS (regno) <= 0)
1588 	continue;
1589       cost_classes_ptr = regno_cost_classes[regno];
1590       cost_classes = cost_classes_ptr->classes;
1591       fprintf (f, "  r%d costs:", regno);
1592       for (k = 0; k < cost_classes_ptr->num; k++)
1593 	{
1594 	  rclass = cost_classes[k];
1595 	  fprintf (f, " %s:%d", reg_class_names[rclass],
1596 		   COSTS (costs, regno)->cost[k]);
1597 	}
1598       fprintf (f, " MEM:%i\n", COSTS (costs, regno)->mem_cost);
1599     }
1600 }
1601 
1602 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1603    costs.  */
1604 static void
process_bb_for_costs(basic_block bb)1605 process_bb_for_costs (basic_block bb)
1606 {
1607   rtx_insn *insn;
1608 
1609   frequency = REG_FREQ_FROM_BB (bb);
1610   if (frequency == 0)
1611     frequency = 1;
1612   FOR_BB_INSNS (bb, insn)
1613     insn = scan_one_insn (insn);
1614 }
1615 
1616 /* Traverse the BB represented by LOOP_TREE_NODE to update the allocno
1617    costs.  */
1618 static void
process_bb_node_for_costs(ira_loop_tree_node_t loop_tree_node)1619 process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
1620 {
1621   basic_block bb;
1622 
1623   bb = loop_tree_node->bb;
1624   if (bb != NULL)
1625     process_bb_for_costs (bb);
1626 }
1627 
1628 /* Find costs of register classes and memory for allocnos or pseudos
1629    and their best costs.  Set up preferred, alternative and allocno
1630    classes for pseudos.  */
1631 static void
find_costs_and_classes(FILE * dump_file)1632 find_costs_and_classes (FILE *dump_file)
1633 {
1634   int i, k, start, max_cost_classes_num;
1635   int pass;
1636   basic_block bb;
1637   enum reg_class *regno_best_class, new_class;
1638 
1639   init_recog ();
1640   regno_best_class
1641     = (enum reg_class *) ira_allocate (max_reg_num ()
1642 				       * sizeof (enum reg_class));
1643   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1644     regno_best_class[i] = NO_REGS;
1645   if (!resize_reg_info () && allocno_p
1646       && pseudo_classes_defined_p && flag_expensive_optimizations)
1647     {
1648       ira_allocno_t a;
1649       ira_allocno_iterator ai;
1650 
1651       pref = pref_buffer;
1652       max_cost_classes_num = 1;
1653       FOR_EACH_ALLOCNO (a, ai)
1654 	{
1655 	  pref[ALLOCNO_NUM (a)] = reg_preferred_class (ALLOCNO_REGNO (a));
1656 	  setup_regno_cost_classes_by_aclass
1657 	    (ALLOCNO_REGNO (a), pref[ALLOCNO_NUM (a)]);
1658 	  max_cost_classes_num
1659 	    = MAX (max_cost_classes_num,
1660 		   regno_cost_classes[ALLOCNO_REGNO (a)]->num);
1661 	}
1662       start = 1;
1663     }
1664   else
1665     {
1666       pref = NULL;
1667       max_cost_classes_num = ira_important_classes_num;
1668       for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1669 	if (regno_reg_rtx[i] != NULL_RTX)
1670  	  setup_regno_cost_classes_by_mode (i, PSEUDO_REGNO_MODE (i));
1671 	else
1672 	  setup_regno_cost_classes_by_aclass (i, ALL_REGS);
1673       start = 0;
1674     }
1675   if (allocno_p)
1676     /* Clear the flag for the next compiled function.  */
1677     pseudo_classes_defined_p = false;
1678   /* Normally we scan the insns once and determine the best class to
1679      use for each allocno.  However, if -fexpensive-optimizations are
1680      on, we do so twice, the second time using the tentative best
1681      classes to guide the selection.  */
1682   for (pass = start; pass <= flag_expensive_optimizations; pass++)
1683     {
1684       if ((!allocno_p || internal_flag_ira_verbose > 0) && dump_file)
1685 	fprintf (dump_file,
1686 		 "\nPass %i for finding pseudo/allocno costs\n\n", pass);
1687 
1688       if (pass != start)
1689 	{
1690 	  max_cost_classes_num = 1;
1691 	  for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1692 	    {
1693 	      setup_regno_cost_classes_by_aclass (i, regno_best_class[i]);
1694 	      max_cost_classes_num
1695 		= MAX (max_cost_classes_num, regno_cost_classes[i]->num);
1696 	    }
1697 	}
1698 
1699       struct_costs_size
1700 	= sizeof (struct costs) + sizeof (int) * (max_cost_classes_num - 1);
1701       /* Zero out our accumulation of the cost of each class for each
1702 	 allocno.  */
1703       memset (costs, 0, cost_elements_num * struct_costs_size);
1704 
1705       if (allocno_p)
1706 	{
1707 	  /* Scan the instructions and record each time it would save code
1708 	     to put a certain allocno in a certain class.  */
1709 	  ira_traverse_loop_tree (true, ira_loop_tree_root,
1710 				  process_bb_node_for_costs, NULL);
1711 
1712 	  memcpy (total_allocno_costs, costs,
1713 		  max_struct_costs_size * ira_allocnos_num);
1714 	}
1715       else
1716 	{
1717 	  basic_block bb;
1718 
1719 	  FOR_EACH_BB_FN (bb, cfun)
1720 	    process_bb_for_costs (bb);
1721 	}
1722 
1723       if (pass == 0)
1724 	pref = pref_buffer;
1725 
1726       /* Now for each allocno look at how desirable each class is and
1727 	 find which class is preferred.  */
1728       for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
1729 	{
1730 	  ira_allocno_t a, parent_a;
1731 	  int rclass, a_num, parent_a_num, add_cost;
1732 	  ira_loop_tree_node_t parent;
1733 	  int best_cost, allocno_cost;
1734 	  enum reg_class best, alt_class;
1735 	  cost_classes_t cost_classes_ptr = regno_cost_classes[i];
1736 	  enum reg_class *cost_classes;
1737 	  int *i_costs = temp_costs->cost;
1738 	  int i_mem_cost;
1739 	  int equiv_savings = regno_equiv_gains[i];
1740 
1741 	  if (! allocno_p)
1742 	    {
1743 	      if (regno_reg_rtx[i] == NULL_RTX)
1744 		continue;
1745 	      memcpy (temp_costs, COSTS (costs, i), struct_costs_size);
1746 	      i_mem_cost = temp_costs->mem_cost;
1747 	      cost_classes = cost_classes_ptr->classes;
1748 	    }
1749 	  else
1750 	    {
1751 	      if (ira_regno_allocno_map[i] == NULL)
1752 		continue;
1753 	      memset (temp_costs, 0, struct_costs_size);
1754 	      i_mem_cost = 0;
1755 	      cost_classes = cost_classes_ptr->classes;
1756 	      /* Find cost of all allocnos with the same regno.  */
1757 	      for (a = ira_regno_allocno_map[i];
1758 		   a != NULL;
1759 		   a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1760 		{
1761 		  int *a_costs, *p_costs;
1762 
1763 		  a_num = ALLOCNO_NUM (a);
1764 		  if ((flag_ira_region == IRA_REGION_ALL
1765 		       || flag_ira_region == IRA_REGION_MIXED)
1766 		      && (parent = ALLOCNO_LOOP_TREE_NODE (a)->parent) != NULL
1767 		      && (parent_a = parent->regno_allocno_map[i]) != NULL
1768 		      /* There are no caps yet.  */
1769 		      && bitmap_bit_p (ALLOCNO_LOOP_TREE_NODE
1770 				       (a)->border_allocnos,
1771 				       ALLOCNO_NUM (a)))
1772 		    {
1773 		      /* Propagate costs to upper levels in the region
1774 			 tree.  */
1775 		      parent_a_num = ALLOCNO_NUM (parent_a);
1776 		      a_costs = COSTS (total_allocno_costs, a_num)->cost;
1777 		      p_costs = COSTS (total_allocno_costs, parent_a_num)->cost;
1778 		      for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1779 			{
1780 			  add_cost = a_costs[k];
1781 			  if (add_cost > 0 && INT_MAX - add_cost < p_costs[k])
1782 			    p_costs[k] = INT_MAX;
1783 			  else
1784 			    p_costs[k] += add_cost;
1785 			}
1786 		      add_cost = COSTS (total_allocno_costs, a_num)->mem_cost;
1787 		      if (add_cost > 0
1788 			  && (INT_MAX - add_cost
1789 			      < COSTS (total_allocno_costs,
1790 				       parent_a_num)->mem_cost))
1791 			COSTS (total_allocno_costs, parent_a_num)->mem_cost
1792 			  = INT_MAX;
1793 		      else
1794 			COSTS (total_allocno_costs, parent_a_num)->mem_cost
1795 			  += add_cost;
1796 
1797 		      if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1798 			COSTS (total_allocno_costs, parent_a_num)->mem_cost = 0;
1799 		    }
1800 		  a_costs = COSTS (costs, a_num)->cost;
1801 		  for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1802 		    {
1803 		      add_cost = a_costs[k];
1804 		      if (add_cost > 0 && INT_MAX - add_cost < i_costs[k])
1805 			i_costs[k] = INT_MAX;
1806 		      else
1807 			i_costs[k] += add_cost;
1808 		    }
1809 		  add_cost = COSTS (costs, a_num)->mem_cost;
1810 		  if (add_cost > 0 && INT_MAX - add_cost < i_mem_cost)
1811 		    i_mem_cost = INT_MAX;
1812 		  else
1813 		    i_mem_cost += add_cost;
1814 		}
1815 	    }
1816 	  if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
1817 	    i_mem_cost = 0;
1818 	  else if (equiv_savings < 0)
1819 	    i_mem_cost = -equiv_savings;
1820 	  else if (equiv_savings > 0)
1821 	    {
1822 	      i_mem_cost = 0;
1823 	      for (k = cost_classes_ptr->num - 1; k >= 0; k--)
1824 		i_costs[k] += equiv_savings;
1825 	    }
1826 
1827 	  best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1828 	  best = ALL_REGS;
1829 	  alt_class = NO_REGS;
1830 	  /* Find best common class for all allocnos with the same
1831 	     regno.  */
1832 	  for (k = 0; k < cost_classes_ptr->num; k++)
1833 	    {
1834 	      rclass = cost_classes[k];
1835 	      if (i_costs[k] < best_cost)
1836 		{
1837 		  best_cost = i_costs[k];
1838 		  best = (enum reg_class) rclass;
1839 		}
1840 	      else if (i_costs[k] == best_cost)
1841 		best = ira_reg_class_subunion[best][rclass];
1842 	      if (pass == flag_expensive_optimizations
1843 		  /* We still prefer registers to memory even at this
1844 		     stage if their costs are the same.  We will make
1845 		     a final decision during assigning hard registers
1846 		     when we have all info including more accurate
1847 		     costs which might be affected by assigning hard
1848 		     registers to other pseudos because the pseudos
1849 		     involved in moves can be coalesced.  */
1850 		  && i_costs[k] <= i_mem_cost
1851 		  && (reg_class_size[reg_class_subunion[alt_class][rclass]]
1852 		      > reg_class_size[alt_class]))
1853 		alt_class = reg_class_subunion[alt_class][rclass];
1854 	    }
1855 	  alt_class = ira_allocno_class_translate[alt_class];
1856 	  if (best_cost > i_mem_cost
1857 	      && ! non_spilled_static_chain_regno_p (i))
1858 	    regno_aclass[i] = NO_REGS;
1859 	  else if (!optimize && !targetm.class_likely_spilled_p (best))
1860 	    /* Registers in the alternative class are likely to need
1861 	       longer or slower sequences than registers in the best class.
1862 	       When optimizing we make some effort to use the best class
1863 	       over the alternative class where possible, but at -O0 we
1864 	       effectively give the alternative class equal weight.
1865 	       We then run the risk of using slower alternative registers
1866 	       when plenty of registers from the best class are still free.
1867 	       This is especially true because live ranges tend to be very
1868 	       short in -O0 code and so register pressure tends to be low.
1869 
1870 	       Avoid that by ignoring the alternative class if the best
1871 	       class has plenty of registers.
1872 
1873 	       The union class arrays give important classes and only
1874 	       part of it are allocno classes.  So translate them into
1875 	       allocno classes.  */
1876 	    regno_aclass[i] = ira_allocno_class_translate[best];
1877 	  else
1878 	    {
1879 	      /* Make the common class the biggest class of best and
1880 		 alt_class.  Translate the common class into an
1881 		 allocno class too.  */
1882 	      regno_aclass[i] = (ira_allocno_class_translate
1883 				 [ira_reg_class_superunion[best][alt_class]]);
1884 	      ira_assert (regno_aclass[i] != NO_REGS
1885 			  && ira_reg_allocno_class_p[regno_aclass[i]]);
1886 	    }
1887 	  if ((new_class
1888 	       = (reg_class) (targetm.ira_change_pseudo_allocno_class
1889 			      (i, regno_aclass[i], best))) != regno_aclass[i])
1890 	    {
1891 	      regno_aclass[i] = new_class;
1892 	      if (hard_reg_set_subset_p (reg_class_contents[new_class],
1893 					 reg_class_contents[best]))
1894 		best = new_class;
1895 	      if (hard_reg_set_subset_p (reg_class_contents[new_class],
1896 					 reg_class_contents[alt_class]))
1897 		alt_class = new_class;
1898 	    }
1899 	  if (pass == flag_expensive_optimizations)
1900 	    {
1901 	      if (best_cost > i_mem_cost
1902 		  /* Do not assign NO_REGS to static chain pointer
1903 		     pseudo when non-local goto is used.  */
1904 		  && ! non_spilled_static_chain_regno_p (i))
1905 		best = alt_class = NO_REGS;
1906 	      else if (best == alt_class)
1907 		alt_class = NO_REGS;
1908 	      setup_reg_classes (i, best, alt_class, regno_aclass[i]);
1909 	      if ((!allocno_p || internal_flag_ira_verbose > 2)
1910 		  && dump_file != NULL)
1911 		fprintf (dump_file,
1912 			 "    r%d: preferred %s, alternative %s, allocno %s\n",
1913 			 i, reg_class_names[best], reg_class_names[alt_class],
1914 			 reg_class_names[regno_aclass[i]]);
1915 	    }
1916 	  regno_best_class[i] = best;
1917 	  if (! allocno_p)
1918 	    {
1919 	      pref[i] = (best_cost > i_mem_cost
1920 			 && ! non_spilled_static_chain_regno_p (i)
1921 			 ? NO_REGS : best);
1922 	      continue;
1923 	    }
1924 	  for (a = ira_regno_allocno_map[i];
1925 	       a != NULL;
1926 	       a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
1927 	    {
1928 	      enum reg_class aclass = regno_aclass[i];
1929 	      int a_num = ALLOCNO_NUM (a);
1930 	      int *total_a_costs = COSTS (total_allocno_costs, a_num)->cost;
1931 	      int *a_costs = COSTS (costs, a_num)->cost;
1932 
1933 	      if (aclass == NO_REGS)
1934 		best = NO_REGS;
1935 	      else
1936 		{
1937 		  /* Finding best class which is subset of the common
1938 		     class.  */
1939 		  best_cost = (1 << (HOST_BITS_PER_INT - 2)) - 1;
1940 		  allocno_cost = best_cost;
1941 		  best = ALL_REGS;
1942 		  for (k = 0; k < cost_classes_ptr->num; k++)
1943 		    {
1944 		      rclass = cost_classes[k];
1945 		      if (! ira_class_subset_p[rclass][aclass])
1946 			continue;
1947 		      if (total_a_costs[k] < best_cost)
1948 			{
1949 			  best_cost = total_a_costs[k];
1950 			  allocno_cost = a_costs[k];
1951 			  best = (enum reg_class) rclass;
1952 			}
1953 		      else if (total_a_costs[k] == best_cost)
1954 			{
1955 			  best = ira_reg_class_subunion[best][rclass];
1956 			  allocno_cost = MAX (allocno_cost, a_costs[k]);
1957 			}
1958 		    }
1959 		  ALLOCNO_CLASS_COST (a) = allocno_cost;
1960 		}
1961 	      if (internal_flag_ira_verbose > 2 && dump_file != NULL
1962 		  && (pass == 0 || pref[a_num] != best))
1963 		{
1964 		  fprintf (dump_file, "    a%d (r%d,", a_num, i);
1965 		  if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
1966 		    fprintf (dump_file, "b%d", bb->index);
1967 		  else
1968 		    fprintf (dump_file, "l%d",
1969 			     ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
1970 		  fprintf (dump_file, ") best %s, allocno %s\n",
1971 			   reg_class_names[best],
1972 			   reg_class_names[aclass]);
1973 		}
1974 	      pref[a_num] = best;
1975 	      if (pass == flag_expensive_optimizations && best != aclass
1976 		  && ira_class_hard_regs_num[best] > 0
1977 		  && (ira_reg_class_max_nregs[best][ALLOCNO_MODE (a)]
1978 		      >= ira_class_hard_regs_num[best]))
1979 		{
1980 		  int ind = cost_classes_ptr->index[aclass];
1981 
1982 		  ira_assert (ind >= 0);
1983 		  ira_init_register_move_cost_if_necessary (ALLOCNO_MODE (a));
1984 		  ira_add_allocno_pref (a, ira_class_hard_regs[best][0],
1985 					(a_costs[ind] - ALLOCNO_CLASS_COST (a))
1986 					/ (ira_register_move_cost
1987 					   [ALLOCNO_MODE (a)][best][aclass]));
1988 		  for (k = 0; k < cost_classes_ptr->num; k++)
1989 		    if (ira_class_subset_p[cost_classes[k]][best])
1990 		      a_costs[k] = a_costs[ind];
1991 		}
1992 	    }
1993 	}
1994 
1995       if (internal_flag_ira_verbose > 4 && dump_file)
1996 	{
1997 	  if (allocno_p)
1998 	    print_allocno_costs (dump_file);
1999 	  else
2000 	    print_pseudo_costs (dump_file);
2001 	  fprintf (dump_file,"\n");
2002 	}
2003     }
2004   ira_free (regno_best_class);
2005 }
2006 
2007 
2008 
2009 /* Process moves involving hard regs to modify allocno hard register
2010    costs.  We can do this only after determining allocno class.  If a
2011    hard register forms a register class, then moves with the hard
2012    register are already taken into account in class costs for the
2013    allocno.  */
2014 static void
process_bb_node_for_hard_reg_moves(ira_loop_tree_node_t loop_tree_node)2015 process_bb_node_for_hard_reg_moves (ira_loop_tree_node_t loop_tree_node)
2016 {
2017   int i, freq, src_regno, dst_regno, hard_regno, a_regno;
2018   bool to_p;
2019   ira_allocno_t a, curr_a;
2020   ira_loop_tree_node_t curr_loop_tree_node;
2021   enum reg_class rclass;
2022   basic_block bb;
2023   rtx_insn *insn;
2024   rtx set, src, dst;
2025 
2026   bb = loop_tree_node->bb;
2027   if (bb == NULL)
2028     return;
2029   freq = REG_FREQ_FROM_BB (bb);
2030   if (freq == 0)
2031     freq = 1;
2032   FOR_BB_INSNS (bb, insn)
2033     {
2034       if (!NONDEBUG_INSN_P (insn))
2035 	continue;
2036       set = single_set (insn);
2037       if (set == NULL_RTX)
2038 	continue;
2039       dst = SET_DEST (set);
2040       src = SET_SRC (set);
2041       if (! REG_P (dst) || ! REG_P (src))
2042 	continue;
2043       dst_regno = REGNO (dst);
2044       src_regno = REGNO (src);
2045       if (dst_regno >= FIRST_PSEUDO_REGISTER
2046 	  && src_regno < FIRST_PSEUDO_REGISTER)
2047 	{
2048 	  hard_regno = src_regno;
2049 	  a = ira_curr_regno_allocno_map[dst_regno];
2050 	  to_p = true;
2051 	}
2052       else if (src_regno >= FIRST_PSEUDO_REGISTER
2053 	       && dst_regno < FIRST_PSEUDO_REGISTER)
2054 	{
2055 	  hard_regno = dst_regno;
2056 	  a = ira_curr_regno_allocno_map[src_regno];
2057 	  to_p = false;
2058 	}
2059       else
2060 	continue;
2061       rclass = ALLOCNO_CLASS (a);
2062       if (! TEST_HARD_REG_BIT (reg_class_contents[rclass], hard_regno))
2063 	continue;
2064       i = ira_class_hard_reg_index[rclass][hard_regno];
2065       if (i < 0)
2066 	continue;
2067       a_regno = ALLOCNO_REGNO (a);
2068       for (curr_loop_tree_node = ALLOCNO_LOOP_TREE_NODE (a);
2069 	   curr_loop_tree_node != NULL;
2070 	   curr_loop_tree_node = curr_loop_tree_node->parent)
2071 	if ((curr_a = curr_loop_tree_node->regno_allocno_map[a_regno]) != NULL)
2072 	  ira_add_allocno_pref (curr_a, hard_regno, freq);
2073       {
2074 	int cost;
2075 	enum reg_class hard_reg_class;
2076 	machine_mode mode;
2077 
2078 	mode = ALLOCNO_MODE (a);
2079 	hard_reg_class = REGNO_REG_CLASS (hard_regno);
2080 	ira_init_register_move_cost_if_necessary (mode);
2081 	cost = (to_p ? ira_register_move_cost[mode][hard_reg_class][rclass]
2082 		: ira_register_move_cost[mode][rclass][hard_reg_class]) * freq;
2083 	ira_allocate_and_set_costs (&ALLOCNO_HARD_REG_COSTS (a), rclass,
2084 				    ALLOCNO_CLASS_COST (a));
2085 	ira_allocate_and_set_costs (&ALLOCNO_CONFLICT_HARD_REG_COSTS (a),
2086 				    rclass, 0);
2087 	ALLOCNO_HARD_REG_COSTS (a)[i] -= cost;
2088 	ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[i] -= cost;
2089 	ALLOCNO_CLASS_COST (a) = MIN (ALLOCNO_CLASS_COST (a),
2090 				      ALLOCNO_HARD_REG_COSTS (a)[i]);
2091       }
2092     }
2093 }
2094 
2095 /* After we find hard register and memory costs for allocnos, define
2096    its class and modify hard register cost because insns moving
2097    allocno to/from hard registers.  */
2098 static void
setup_allocno_class_and_costs(void)2099 setup_allocno_class_and_costs (void)
2100 {
2101   int i, j, n, regno, hard_regno, num;
2102   int *reg_costs;
2103   enum reg_class aclass, rclass;
2104   ira_allocno_t a;
2105   ira_allocno_iterator ai;
2106   cost_classes_t cost_classes_ptr;
2107 
2108   ira_assert (allocno_p);
2109   FOR_EACH_ALLOCNO (a, ai)
2110     {
2111       i = ALLOCNO_NUM (a);
2112       regno = ALLOCNO_REGNO (a);
2113       aclass = regno_aclass[regno];
2114       cost_classes_ptr = regno_cost_classes[regno];
2115       ira_assert (pref[i] == NO_REGS || aclass != NO_REGS);
2116       ALLOCNO_MEMORY_COST (a) = COSTS (costs, i)->mem_cost;
2117       ira_set_allocno_class (a, aclass);
2118       if (aclass == NO_REGS)
2119 	continue;
2120       if (optimize && ALLOCNO_CLASS (a) != pref[i])
2121 	{
2122 	  n = ira_class_hard_regs_num[aclass];
2123 	  ALLOCNO_HARD_REG_COSTS (a)
2124 	    = reg_costs = ira_allocate_cost_vector (aclass);
2125 	  for (j = n - 1; j >= 0; j--)
2126 	    {
2127 	      hard_regno = ira_class_hard_regs[aclass][j];
2128 	      if (TEST_HARD_REG_BIT (reg_class_contents[pref[i]], hard_regno))
2129 		reg_costs[j] = ALLOCNO_CLASS_COST (a);
2130 	      else
2131 		{
2132 		  rclass = REGNO_REG_CLASS (hard_regno);
2133 		  num = cost_classes_ptr->index[rclass];
2134 		  if (num < 0)
2135 		    {
2136 		      num = cost_classes_ptr->hard_regno_index[hard_regno];
2137 		      ira_assert (num >= 0);
2138 		    }
2139 		  reg_costs[j] = COSTS (costs, i)->cost[num];
2140 		}
2141 	    }
2142 	}
2143     }
2144   if (optimize)
2145     ira_traverse_loop_tree (true, ira_loop_tree_root,
2146 			    process_bb_node_for_hard_reg_moves, NULL);
2147 }
2148 
2149 
2150 
2151 /* Function called once during compiler work.  */
2152 void
ira_init_costs_once(void)2153 ira_init_costs_once (void)
2154 {
2155   int i;
2156 
2157   init_cost = NULL;
2158   for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2159     {
2160       op_costs[i] = NULL;
2161       this_op_costs[i] = NULL;
2162     }
2163   temp_costs = NULL;
2164 }
2165 
2166 /* Free allocated temporary cost vectors.  */
2167 void
free_ira_costs()2168 target_ira_int::free_ira_costs ()
2169 {
2170   int i;
2171 
2172   free (x_init_cost);
2173   x_init_cost = NULL;
2174   for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2175     {
2176       free (x_op_costs[i]);
2177       free (x_this_op_costs[i]);
2178       x_op_costs[i] = x_this_op_costs[i] = NULL;
2179     }
2180   free (x_temp_costs);
2181   x_temp_costs = NULL;
2182 }
2183 
2184 /* This is called each time register related information is
2185    changed.  */
2186 void
ira_init_costs(void)2187 ira_init_costs (void)
2188 {
2189   int i;
2190 
2191   this_target_ira_int->free_ira_costs ();
2192   max_struct_costs_size
2193     = sizeof (struct costs) + sizeof (int) * (ira_important_classes_num - 1);
2194   /* Don't use ira_allocate because vectors live through several IRA
2195      calls.  */
2196   init_cost = (struct costs *) xmalloc (max_struct_costs_size);
2197   init_cost->mem_cost = 1000000;
2198   for (i = 0; i < ira_important_classes_num; i++)
2199     init_cost->cost[i] = 1000000;
2200   for (i = 0; i < MAX_RECOG_OPERANDS; i++)
2201     {
2202       op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2203       this_op_costs[i] = (struct costs *) xmalloc (max_struct_costs_size);
2204     }
2205   temp_costs = (struct costs *) xmalloc (max_struct_costs_size);
2206 }
2207 
2208 
2209 
2210 /* Common initialization function for ira_costs and
2211    ira_set_pseudo_classes.  */
2212 static void
init_costs(void)2213 init_costs (void)
2214 {
2215   init_subregs_of_mode ();
2216   costs = (struct costs *) ira_allocate (max_struct_costs_size
2217 					 * cost_elements_num);
2218   pref_buffer = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2219 						 * cost_elements_num);
2220   regno_aclass = (enum reg_class *) ira_allocate (sizeof (enum reg_class)
2221 						 * max_reg_num ());
2222   regno_equiv_gains = (int *) ira_allocate (sizeof (int) * max_reg_num ());
2223   memset (regno_equiv_gains, 0, sizeof (int) * max_reg_num ());
2224 }
2225 
2226 /* Common finalization function for ira_costs and
2227    ira_set_pseudo_classes.  */
2228 static void
finish_costs(void)2229 finish_costs (void)
2230 {
2231   finish_subregs_of_mode ();
2232   ira_free (regno_equiv_gains);
2233   ira_free (regno_aclass);
2234   ira_free (pref_buffer);
2235   ira_free (costs);
2236 }
2237 
2238 /* Entry function which defines register class, memory and hard
2239    register costs for each allocno.  */
2240 void
ira_costs(void)2241 ira_costs (void)
2242 {
2243   allocno_p = true;
2244   cost_elements_num = ira_allocnos_num;
2245   init_costs ();
2246   total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
2247 						       * ira_allocnos_num);
2248   initiate_regno_cost_classes ();
2249   calculate_elim_costs_all_insns ();
2250   find_costs_and_classes (ira_dump_file);
2251   setup_allocno_class_and_costs ();
2252   finish_regno_cost_classes ();
2253   finish_costs ();
2254   ira_free (total_allocno_costs);
2255 }
2256 
2257 /* Entry function which defines classes for pseudos.
2258    Set pseudo_classes_defined_p only if DEFINE_PSEUDO_CLASSES is true.  */
2259 void
ira_set_pseudo_classes(bool define_pseudo_classes,FILE * dump_file)2260 ira_set_pseudo_classes (bool define_pseudo_classes, FILE *dump_file)
2261 {
2262   allocno_p = false;
2263   internal_flag_ira_verbose = flag_ira_verbose;
2264   cost_elements_num = max_reg_num ();
2265   init_costs ();
2266   initiate_regno_cost_classes ();
2267   find_costs_and_classes (dump_file);
2268   finish_regno_cost_classes ();
2269   if (define_pseudo_classes)
2270     pseudo_classes_defined_p = true;
2271 
2272   finish_costs ();
2273 }
2274 
2275 
2276 
2277 /* Change hard register costs for allocnos which lives through
2278    function calls.  This is called only when we found all intersected
2279    calls during building allocno live ranges.  */
2280 void
ira_tune_allocno_costs(void)2281 ira_tune_allocno_costs (void)
2282 {
2283   int j, n, regno;
2284   int cost, min_cost, *reg_costs;
2285   enum reg_class aclass, rclass;
2286   machine_mode mode;
2287   ira_allocno_t a;
2288   ira_allocno_iterator ai;
2289   ira_allocno_object_iterator oi;
2290   ira_object_t obj;
2291   bool skip_p;
2292   HARD_REG_SET *crossed_calls_clobber_regs;
2293 
2294   FOR_EACH_ALLOCNO (a, ai)
2295     {
2296       aclass = ALLOCNO_CLASS (a);
2297       if (aclass == NO_REGS)
2298 	continue;
2299       mode = ALLOCNO_MODE (a);
2300       n = ira_class_hard_regs_num[aclass];
2301       min_cost = INT_MAX;
2302       if (ALLOCNO_CALLS_CROSSED_NUM (a)
2303 	  != ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a))
2304 	{
2305 	  ira_allocate_and_set_costs
2306 	    (&ALLOCNO_HARD_REG_COSTS (a), aclass,
2307 	     ALLOCNO_CLASS_COST (a));
2308 	  reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2309 	  for (j = n - 1; j >= 0; j--)
2310 	    {
2311 	      regno = ira_class_hard_regs[aclass][j];
2312 	      skip_p = false;
2313 	      FOR_EACH_ALLOCNO_OBJECT (a, obj, oi)
2314 		{
2315 		  if (ira_hard_reg_set_intersection_p (regno, mode,
2316 						       OBJECT_CONFLICT_HARD_REGS
2317 						       (obj)))
2318 		    {
2319 		      skip_p = true;
2320 		      break;
2321 		    }
2322 		}
2323 	      if (skip_p)
2324 		continue;
2325 	      rclass = REGNO_REG_CLASS (regno);
2326 	      cost = 0;
2327 	      crossed_calls_clobber_regs
2328 		= &(ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a));
2329 	      if (ira_hard_reg_set_intersection_p (regno, mode,
2330 						   *crossed_calls_clobber_regs)
2331 		  && (ira_hard_reg_set_intersection_p (regno, mode,
2332 						       call_used_reg_set)
2333 		      || targetm.hard_regno_call_part_clobbered (regno,
2334 								 mode)))
2335 		cost += (ALLOCNO_CALL_FREQ (a)
2336 			 * (ira_memory_move_cost[mode][rclass][0]
2337 			    + ira_memory_move_cost[mode][rclass][1]));
2338 #ifdef IRA_HARD_REGNO_ADD_COST_MULTIPLIER
2339 	      cost += ((ira_memory_move_cost[mode][rclass][0]
2340 			+ ira_memory_move_cost[mode][rclass][1])
2341 		       * ALLOCNO_FREQ (a)
2342 		       * IRA_HARD_REGNO_ADD_COST_MULTIPLIER (regno) / 2);
2343 #endif
2344 	      if (INT_MAX - cost < reg_costs[j])
2345 		reg_costs[j] = INT_MAX;
2346 	      else
2347 		reg_costs[j] += cost;
2348 	      if (min_cost > reg_costs[j])
2349 		min_cost = reg_costs[j];
2350 	    }
2351 	}
2352       if (min_cost != INT_MAX)
2353 	ALLOCNO_CLASS_COST (a) = min_cost;
2354 
2355       /* Some targets allow pseudos to be allocated to unaligned sequences
2356 	 of hard registers.  However, selecting an unaligned sequence can
2357 	 unnecessarily restrict later allocations.  So increase the cost of
2358 	 unaligned hard regs to encourage the use of aligned hard regs.  */
2359       {
2360 	const int nregs = ira_reg_class_max_nregs[aclass][ALLOCNO_MODE (a)];
2361 
2362 	if (nregs > 1)
2363 	  {
2364 	    ira_allocate_and_set_costs
2365 	      (&ALLOCNO_HARD_REG_COSTS (a), aclass, ALLOCNO_CLASS_COST (a));
2366 	    reg_costs = ALLOCNO_HARD_REG_COSTS (a);
2367 	    for (j = n - 1; j >= 0; j--)
2368 	      {
2369 		regno = ira_non_ordered_class_hard_regs[aclass][j];
2370 		if ((regno % nregs) != 0)
2371 		  {
2372 		    int index = ira_class_hard_reg_index[aclass][regno];
2373 		    ira_assert (index != -1);
2374 		    reg_costs[index] += ALLOCNO_FREQ (a);
2375 		  }
2376 	      }
2377 	  }
2378       }
2379     }
2380 }
2381 
2382 /* Add COST to the estimated gain for eliminating REGNO with its
2383    equivalence.  If COST is zero, record that no such elimination is
2384    possible.  */
2385 
2386 void
ira_adjust_equiv_reg_cost(unsigned regno,int cost)2387 ira_adjust_equiv_reg_cost (unsigned regno, int cost)
2388 {
2389   if (cost == 0)
2390     regno_equiv_gains[regno] = 0;
2391   else
2392     regno_equiv_gains[regno] += cost;
2393 }
2394 
2395 void
ira_costs_c_finalize(void)2396 ira_costs_c_finalize (void)
2397 {
2398   this_target_ira_int->free_ira_costs ();
2399 }
2400