1 /* IRA conflict builder.
2    Copyright (C) 2006-2020 Free Software Foundation, Inc.
3    Contributed by Vladimir Makarov <vmakarov@redhat.com>.
4 
5 This file is part of GCC.
6 
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 3, or (at your option) any later
10 version.
11 
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3.  If not see
19 <http://www.gnu.org/licenses/>.  */
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "predict.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "insn-config.h"
31 #include "regs.h"
32 #include "ira.h"
33 #include "ira-int.h"
34 #include "sparseset.h"
35 #include "addresses.h"
36 
37 /* This file contains code responsible for allocno conflict creation,
38    allocno copy creation and allocno info accumulation on upper level
39    regions.  */
40 
41 /* ira_allocnos_num array of arrays of bits, recording whether two
42    allocno's conflict (can't go in the same hardware register).
43 
44    Some arrays will be used as conflict bit vector of the
45    corresponding allocnos see function build_object_conflicts.  */
46 static IRA_INT_TYPE **conflicts;
47 
48 /* Macro to test a conflict of C1 and C2 in `conflicts'.  */
49 #define OBJECTS_CONFLICT_P(C1, C2)					\
50   (OBJECT_MIN (C1) <= OBJECT_CONFLICT_ID (C2)				\
51    && OBJECT_CONFLICT_ID (C2) <= OBJECT_MAX (C1)			\
52    && TEST_MINMAX_SET_BIT (conflicts[OBJECT_CONFLICT_ID (C1)],		\
53 			   OBJECT_CONFLICT_ID (C2),			\
54 			   OBJECT_MIN (C1), OBJECT_MAX (C1)))
55 
56 
57 /* Record a conflict between objects OBJ1 and OBJ2.  If necessary,
58    canonicalize the conflict by recording it for lower-order subobjects
59    of the corresponding allocnos.  */
60 static void
record_object_conflict(ira_object_t obj1,ira_object_t obj2)61 record_object_conflict (ira_object_t obj1, ira_object_t obj2)
62 {
63   ira_allocno_t a1 = OBJECT_ALLOCNO (obj1);
64   ira_allocno_t a2 = OBJECT_ALLOCNO (obj2);
65   int w1 = OBJECT_SUBWORD (obj1);
66   int w2 = OBJECT_SUBWORD (obj2);
67   int id1, id2;
68 
69   /* Canonicalize the conflict.  If two identically-numbered words
70      conflict, always record this as a conflict between words 0.  That
71      is the only information we need, and it is easier to test for if
72      it is collected in each allocno's lowest-order object.  */
73   if (w1 == w2 && w1 > 0)
74     {
75       obj1 = ALLOCNO_OBJECT (a1, 0);
76       obj2 = ALLOCNO_OBJECT (a2, 0);
77     }
78   id1 = OBJECT_CONFLICT_ID (obj1);
79   id2 = OBJECT_CONFLICT_ID (obj2);
80 
81   SET_MINMAX_SET_BIT (conflicts[id1], id2, OBJECT_MIN (obj1),
82 		      OBJECT_MAX (obj1));
83   SET_MINMAX_SET_BIT (conflicts[id2], id1, OBJECT_MIN (obj2),
84 		      OBJECT_MAX (obj2));
85 }
86 
87 /* Build allocno conflict table by processing allocno live ranges.
88    Return true if the table was built.  The table is not built if it
89    is too big.  */
90 static bool
build_conflict_bit_table(void)91 build_conflict_bit_table (void)
92 {
93   int i;
94   unsigned int j;
95   enum reg_class aclass;
96   int object_set_words, allocated_words_num, conflict_bit_vec_words_num;
97   live_range_t r;
98   ira_allocno_t allocno;
99   ira_allocno_iterator ai;
100   sparseset objects_live;
101   ira_object_t obj;
102   ira_allocno_object_iterator aoi;
103 
104   allocated_words_num = 0;
105   FOR_EACH_ALLOCNO (allocno, ai)
106     FOR_EACH_ALLOCNO_OBJECT (allocno, obj, aoi)
107       {
108 	if (OBJECT_MAX (obj) < OBJECT_MIN (obj))
109 	  continue;
110 	conflict_bit_vec_words_num
111 	  = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
112 	     / IRA_INT_BITS);
113 	allocated_words_num += conflict_bit_vec_words_num;
114 	if ((uint64_t) allocated_words_num * sizeof (IRA_INT_TYPE)
115 	    > (uint64_t) param_ira_max_conflict_table_size * 1024 * 1024)
116 	  {
117 	    if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
118 	      fprintf
119 		(ira_dump_file,
120 		 "+++Conflict table will be too big(>%dMB) -- don't use it\n",
121 		 param_ira_max_conflict_table_size);
122 	    return false;
123 	  }
124       }
125 
126   conflicts = (IRA_INT_TYPE **) ira_allocate (sizeof (IRA_INT_TYPE *)
127 					      * ira_objects_num);
128   allocated_words_num = 0;
129   FOR_EACH_ALLOCNO (allocno, ai)
130     FOR_EACH_ALLOCNO_OBJECT (allocno, obj, aoi)
131       {
132 	int id = OBJECT_CONFLICT_ID (obj);
133 	if (OBJECT_MAX (obj) < OBJECT_MIN (obj))
134 	  {
135 	    conflicts[id] = NULL;
136 	    continue;
137 	  }
138 	conflict_bit_vec_words_num
139 	  = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
140 	     / IRA_INT_BITS);
141 	allocated_words_num += conflict_bit_vec_words_num;
142 	conflicts[id]
143 	  = (IRA_INT_TYPE *) ira_allocate (sizeof (IRA_INT_TYPE)
144 					   * conflict_bit_vec_words_num);
145 	memset (conflicts[id], 0,
146 		sizeof (IRA_INT_TYPE) * conflict_bit_vec_words_num);
147       }
148 
149   object_set_words = (ira_objects_num + IRA_INT_BITS - 1) / IRA_INT_BITS;
150   if (internal_flag_ira_verbose > 0 && ira_dump_file != NULL)
151     fprintf
152       (ira_dump_file,
153        "+++Allocating %ld bytes for conflict table (uncompressed size %ld)\n",
154        (long) allocated_words_num * sizeof (IRA_INT_TYPE),
155        (long) object_set_words * ira_objects_num * sizeof (IRA_INT_TYPE));
156 
157   objects_live = sparseset_alloc (ira_objects_num);
158   for (i = 0; i < ira_max_point; i++)
159     {
160       for (r = ira_start_point_ranges[i]; r != NULL; r = r->start_next)
161 	{
162 	  ira_object_t obj = r->object;
163 	  ira_allocno_t allocno = OBJECT_ALLOCNO (obj);
164 	  int id = OBJECT_CONFLICT_ID (obj);
165 
166 	  gcc_assert (id < ira_objects_num);
167 
168 	  aclass = ALLOCNO_CLASS (allocno);
169 	  EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)
170 	    {
171 	      ira_object_t live_obj = ira_object_id_map[j];
172 	      ira_allocno_t live_a = OBJECT_ALLOCNO (live_obj);
173 	      enum reg_class live_aclass = ALLOCNO_CLASS (live_a);
174 
175 	      if (ira_reg_classes_intersect_p[aclass][live_aclass]
176 		  /* Don't set up conflict for the allocno with itself.  */
177 		  && live_a != allocno)
178 		{
179 		  record_object_conflict (obj, live_obj);
180 		}
181 	    }
182 	  sparseset_set_bit (objects_live, id);
183 	}
184 
185       for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
186 	sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (r->object));
187     }
188   sparseset_free (objects_live);
189   return true;
190 }
191 
192 /* Return true iff allocnos A1 and A2 cannot be allocated to the same
193    register due to conflicts.  */
194 
195 static bool
allocnos_conflict_for_copy_p(ira_allocno_t a1,ira_allocno_t a2)196 allocnos_conflict_for_copy_p (ira_allocno_t a1, ira_allocno_t a2)
197 {
198   /* Due to the fact that we canonicalize conflicts (see
199      record_object_conflict), we only need to test for conflicts of
200      the lowest order words.  */
201   ira_object_t obj1 = ALLOCNO_OBJECT (a1, 0);
202   ira_object_t obj2 = ALLOCNO_OBJECT (a2, 0);
203 
204   return OBJECTS_CONFLICT_P (obj1, obj2);
205 }
206 
207 /* Check that X is REG or SUBREG of REG.  */
208 #define REG_SUBREG_P(x)							\
209    (REG_P (x) || (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x))))
210 
211 /* Return X if X is a REG, otherwise it should be SUBREG of REG and
212    the function returns the reg in this case.  *OFFSET will be set to
213    0 in the first case or the regno offset in the first case.  */
214 static rtx
go_through_subreg(rtx x,int * offset)215 go_through_subreg (rtx x, int *offset)
216 {
217   rtx reg;
218 
219   *offset = 0;
220   if (REG_P (x))
221     return x;
222   ira_assert (GET_CODE (x) == SUBREG);
223   reg = SUBREG_REG (x);
224   ira_assert (REG_P (reg));
225   if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
226     *offset = subreg_regno_offset (REGNO (reg), GET_MODE (reg),
227 				   SUBREG_BYTE (x), GET_MODE (x));
228   else if (!can_div_trunc_p (SUBREG_BYTE (x),
229 			     REGMODE_NATURAL_SIZE (GET_MODE (x)), offset))
230     /* Checked by validate_subreg.  We must know at compile time which
231        inner hard registers are being accessed.  */
232     gcc_unreachable ();
233   return reg;
234 }
235 
236 /* Process registers REG1 and REG2 in move INSN with execution
237    frequency FREQ.  The function also processes the registers in a
238    potential move insn (INSN == NULL in this case) with frequency
239    FREQ.  The function can modify hard register costs of the
240    corresponding allocnos or create a copy involving the corresponding
241    allocnos.  The function does nothing if the both registers are hard
242    registers.  When nothing is changed, the function returns
243    FALSE.  */
244 static bool
process_regs_for_copy(rtx reg1,rtx reg2,bool constraint_p,rtx_insn * insn,int freq)245 process_regs_for_copy (rtx reg1, rtx reg2, bool constraint_p,
246 		       rtx_insn *insn, int freq)
247 {
248   int allocno_preferenced_hard_regno, cost, index, offset1, offset2;
249   bool only_regs_p;
250   ira_allocno_t a;
251   reg_class_t rclass, aclass;
252   machine_mode mode;
253   ira_copy_t cp;
254 
255   gcc_assert (REG_SUBREG_P (reg1) && REG_SUBREG_P (reg2));
256   only_regs_p = REG_P (reg1) && REG_P (reg2);
257   reg1 = go_through_subreg (reg1, &offset1);
258   reg2 = go_through_subreg (reg2, &offset2);
259   /* Set up hard regno preferenced by allocno.  If allocno gets the
260      hard regno the copy (or potential move) insn will be removed.  */
261   if (HARD_REGISTER_P (reg1))
262     {
263       if (HARD_REGISTER_P (reg2))
264 	return false;
265       allocno_preferenced_hard_regno = REGNO (reg1) + offset1 - offset2;
266       a = ira_curr_regno_allocno_map[REGNO (reg2)];
267     }
268   else if (HARD_REGISTER_P (reg2))
269     {
270       allocno_preferenced_hard_regno = REGNO (reg2) + offset2 - offset1;
271       a = ira_curr_regno_allocno_map[REGNO (reg1)];
272     }
273   else
274     {
275       ira_allocno_t a1 = ira_curr_regno_allocno_map[REGNO (reg1)];
276       ira_allocno_t a2 = ira_curr_regno_allocno_map[REGNO (reg2)];
277 
278       if (!allocnos_conflict_for_copy_p (a1, a2) && offset1 == offset2)
279 	{
280 	  cp = ira_add_allocno_copy (a1, a2, freq, constraint_p, insn,
281 				     ira_curr_loop_tree_node);
282 	  bitmap_set_bit (ira_curr_loop_tree_node->local_copies, cp->num);
283 	  return true;
284 	}
285       else
286 	return false;
287     }
288 
289   if (! IN_RANGE (allocno_preferenced_hard_regno,
290 		  0, FIRST_PSEUDO_REGISTER - 1))
291     /* Cannot be tied.  */
292     return false;
293   rclass = REGNO_REG_CLASS (allocno_preferenced_hard_regno);
294   mode = ALLOCNO_MODE (a);
295   aclass = ALLOCNO_CLASS (a);
296   if (only_regs_p && insn != NULL_RTX
297       && reg_class_size[rclass] <= ira_reg_class_max_nregs [rclass][mode])
298     /* It is already taken into account in ira-costs.c.  */
299     return false;
300   index = ira_class_hard_reg_index[aclass][allocno_preferenced_hard_regno];
301   if (index < 0)
302     /* Cannot be tied.  It is not in the allocno class.  */
303     return false;
304   ira_init_register_move_cost_if_necessary (mode);
305   if (HARD_REGISTER_P (reg1))
306     cost = ira_register_move_cost[mode][aclass][rclass] * freq;
307   else
308     cost = ira_register_move_cost[mode][rclass][aclass] * freq;
309   do
310     {
311       ira_allocate_and_set_costs
312 	(&ALLOCNO_HARD_REG_COSTS (a), aclass,
313 	 ALLOCNO_CLASS_COST (a));
314       ira_allocate_and_set_costs
315 	(&ALLOCNO_CONFLICT_HARD_REG_COSTS (a), aclass, 0);
316       ALLOCNO_HARD_REG_COSTS (a)[index] -= cost;
317       ALLOCNO_CONFLICT_HARD_REG_COSTS (a)[index] -= cost;
318       if (ALLOCNO_HARD_REG_COSTS (a)[index] < ALLOCNO_CLASS_COST (a))
319 	ALLOCNO_CLASS_COST (a) = ALLOCNO_HARD_REG_COSTS (a)[index];
320       ira_add_allocno_pref (a, allocno_preferenced_hard_regno, freq);
321       a = ira_parent_or_cap_allocno (a);
322     }
323   while (a != NULL);
324   return true;
325 }
326 
327 /* Return true if output operand OUTPUT and input operand INPUT of
328    INSN can use the same register class for at least one alternative.
329    INSN is already described in recog_data and recog_op_alt.  */
330 static bool
can_use_same_reg_p(rtx_insn * insn,int output,int input)331 can_use_same_reg_p (rtx_insn *insn, int output, int input)
332 {
333   alternative_mask preferred = get_preferred_alternatives (insn);
334   for (int nalt = 0; nalt < recog_data.n_alternatives; nalt++)
335     {
336       if (!TEST_BIT (preferred, nalt))
337 	continue;
338 
339       const operand_alternative *op_alt
340 	= &recog_op_alt[nalt * recog_data.n_operands];
341       if (op_alt[input].matches == output)
342 	return true;
343 
344       if (ira_reg_class_intersect[op_alt[input].cl][op_alt[output].cl]
345 	  != NO_REGS)
346 	return true;
347     }
348   return false;
349 }
350 
351 /* Process all of the output registers of the current insn (INSN) which
352    are not bound (BOUND_P) and the input register REG (its operand number
353    OP_NUM) which dies in the insn as if there were a move insn between
354    them with frequency FREQ.  */
355 static void
process_reg_shuffles(rtx_insn * insn,rtx reg,int op_num,int freq,bool * bound_p)356 process_reg_shuffles (rtx_insn *insn, rtx reg, int op_num, int freq,
357 		      bool *bound_p)
358 {
359   int i;
360   rtx another_reg;
361 
362   gcc_assert (REG_SUBREG_P (reg));
363   for (i = 0; i < recog_data.n_operands; i++)
364     {
365       another_reg = recog_data.operand[i];
366 
367       if (!REG_SUBREG_P (another_reg) || op_num == i
368 	  || recog_data.operand_type[i] != OP_OUT
369 	  || bound_p[i]
370 	  || (!can_use_same_reg_p (insn, i, op_num)
371 	      && (recog_data.constraints[op_num][0] != '%'
372 		  || !can_use_same_reg_p (insn, i, op_num + 1))
373 	      && (op_num == 0
374 		  || recog_data.constraints[op_num - 1][0] != '%'
375 		  || !can_use_same_reg_p (insn, i, op_num - 1))))
376 	continue;
377 
378       process_regs_for_copy (reg, another_reg, false, NULL, freq);
379     }
380 }
381 
382 /* Process INSN and create allocno copies if necessary.  For example,
383    it might be because INSN is a pseudo-register move or INSN is two
384    operand insn.  */
385 static void
add_insn_allocno_copies(rtx_insn * insn)386 add_insn_allocno_copies (rtx_insn *insn)
387 {
388   rtx set, operand, dup;
389   bool bound_p[MAX_RECOG_OPERANDS];
390   int i, n, freq;
391   alternative_mask alts;
392 
393   freq = REG_FREQ_FROM_BB (BLOCK_FOR_INSN (insn));
394   if (freq == 0)
395     freq = 1;
396   if ((set = single_set (insn)) != NULL_RTX
397       && REG_SUBREG_P (SET_DEST (set)) && REG_SUBREG_P (SET_SRC (set))
398       && ! side_effects_p (set)
399       && find_reg_note (insn, REG_DEAD,
400 			REG_P (SET_SRC (set))
401 			? SET_SRC (set)
402 			: SUBREG_REG (SET_SRC (set))) != NULL_RTX)
403     {
404       process_regs_for_copy (SET_SRC (set), SET_DEST (set),
405 			     false, insn, freq);
406       return;
407     }
408   /* Fast check of possibility of constraint or shuffle copies.  If
409      there are no dead registers, there will be no such copies.  */
410   if (! find_reg_note (insn, REG_DEAD, NULL_RTX))
411     return;
412   alts = ira_setup_alts (insn);
413   for (i = 0; i < recog_data.n_operands; i++)
414     bound_p[i] = false;
415   for (i = 0; i < recog_data.n_operands; i++)
416     {
417       operand = recog_data.operand[i];
418       if (! REG_SUBREG_P (operand))
419 	continue;
420       if ((n = ira_get_dup_out_num (i, alts)) >= 0)
421 	{
422 	  bound_p[n] = true;
423 	  dup = recog_data.operand[n];
424 	  if (REG_SUBREG_P (dup)
425 	      && find_reg_note (insn, REG_DEAD,
426 				REG_P (operand)
427 				? operand
428 				: SUBREG_REG (operand)) != NULL_RTX)
429 	    process_regs_for_copy (operand, dup, true, NULL,
430 				   freq);
431 	}
432     }
433   for (i = 0; i < recog_data.n_operands; i++)
434     {
435       operand = recog_data.operand[i];
436       if (REG_SUBREG_P (operand)
437 	  && find_reg_note (insn, REG_DEAD,
438 			    REG_P (operand)
439 			    ? operand : SUBREG_REG (operand)) != NULL_RTX)
440 	/* If an operand dies, prefer its hard register for the output
441 	   operands by decreasing the hard register cost or creating
442 	   the corresponding allocno copies.  The cost will not
443 	   correspond to a real move insn cost, so make the frequency
444 	   smaller.  */
445 	process_reg_shuffles (insn, operand, i, freq < 8 ? 1 : freq / 8,
446 			      bound_p);
447     }
448 }
449 
450 /* Add copies originated from BB given by LOOP_TREE_NODE.  */
451 static void
add_copies(ira_loop_tree_node_t loop_tree_node)452 add_copies (ira_loop_tree_node_t loop_tree_node)
453 {
454   basic_block bb;
455   rtx_insn *insn;
456 
457   bb = loop_tree_node->bb;
458   if (bb == NULL)
459     return;
460   FOR_BB_INSNS (bb, insn)
461     if (NONDEBUG_INSN_P (insn))
462       add_insn_allocno_copies (insn);
463 }
464 
465 /* Propagate copies the corresponding allocnos on upper loop tree
466    level.  */
467 static void
propagate_copies(void)468 propagate_copies (void)
469 {
470   ira_copy_t cp;
471   ira_copy_iterator ci;
472   ira_allocno_t a1, a2, parent_a1, parent_a2;
473 
474   FOR_EACH_COPY (cp, ci)
475     {
476       a1 = cp->first;
477       a2 = cp->second;
478       if (ALLOCNO_LOOP_TREE_NODE (a1) == ira_loop_tree_root)
479 	continue;
480       ira_assert ((ALLOCNO_LOOP_TREE_NODE (a2) != ira_loop_tree_root));
481       parent_a1 = ira_parent_or_cap_allocno (a1);
482       parent_a2 = ira_parent_or_cap_allocno (a2);
483       ira_assert (parent_a1 != NULL && parent_a2 != NULL);
484       if (! allocnos_conflict_for_copy_p (parent_a1, parent_a2))
485 	ira_add_allocno_copy (parent_a1, parent_a2, cp->freq,
486 			      cp->constraint_p, cp->insn, cp->loop_tree_node);
487     }
488 }
489 
490 /* Array used to collect all conflict allocnos for given allocno.  */
491 static ira_object_t *collected_conflict_objects;
492 
493 /* Build conflict vectors or bit conflict vectors (whatever is more
494    profitable) for object OBJ from the conflict table.  */
495 static void
build_object_conflicts(ira_object_t obj)496 build_object_conflicts (ira_object_t obj)
497 {
498   int i, px, parent_num;
499   ira_allocno_t parent_a, another_parent_a;
500   ira_object_t parent_obj;
501   ira_allocno_t a = OBJECT_ALLOCNO (obj);
502   IRA_INT_TYPE *object_conflicts;
503   minmax_set_iterator asi;
504   int parent_min, parent_max ATTRIBUTE_UNUSED;
505 
506   object_conflicts = conflicts[OBJECT_CONFLICT_ID (obj)];
507   px = 0;
508   FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts,
509 			      OBJECT_MIN (obj), OBJECT_MAX (obj), i, asi)
510     {
511       ira_object_t another_obj = ira_object_id_map[i];
512       ira_allocno_t another_a = OBJECT_ALLOCNO (obj);
513 
514       ira_assert (ira_reg_classes_intersect_p
515 		  [ALLOCNO_CLASS (a)][ALLOCNO_CLASS (another_a)]);
516       collected_conflict_objects[px++] = another_obj;
517     }
518   if (ira_conflict_vector_profitable_p (obj, px))
519     {
520       ira_object_t *vec;
521       ira_allocate_conflict_vec (obj, px);
522       vec = OBJECT_CONFLICT_VEC (obj);
523       memcpy (vec, collected_conflict_objects, sizeof (ira_object_t) * px);
524       vec[px] = NULL;
525       OBJECT_NUM_CONFLICTS (obj) = px;
526     }
527   else
528     {
529       int conflict_bit_vec_words_num;
530 
531       OBJECT_CONFLICT_ARRAY (obj) = object_conflicts;
532       if (OBJECT_MAX (obj) < OBJECT_MIN (obj))
533 	conflict_bit_vec_words_num = 0;
534       else
535 	conflict_bit_vec_words_num
536 	  = ((OBJECT_MAX (obj) - OBJECT_MIN (obj) + IRA_INT_BITS)
537 	     / IRA_INT_BITS);
538       OBJECT_CONFLICT_ARRAY_SIZE (obj)
539 	= conflict_bit_vec_words_num * sizeof (IRA_INT_TYPE);
540     }
541 
542   parent_a = ira_parent_or_cap_allocno (a);
543   if (parent_a == NULL)
544     return;
545   ira_assert (ALLOCNO_CLASS (a) == ALLOCNO_CLASS (parent_a));
546   ira_assert (ALLOCNO_NUM_OBJECTS (a) == ALLOCNO_NUM_OBJECTS (parent_a));
547   parent_obj = ALLOCNO_OBJECT (parent_a, OBJECT_SUBWORD (obj));
548   parent_num = OBJECT_CONFLICT_ID (parent_obj);
549   parent_min = OBJECT_MIN (parent_obj);
550   parent_max = OBJECT_MAX (parent_obj);
551   FOR_EACH_BIT_IN_MINMAX_SET (object_conflicts,
552 			      OBJECT_MIN (obj), OBJECT_MAX (obj), i, asi)
553     {
554       ira_object_t another_obj = ira_object_id_map[i];
555       ira_allocno_t another_a = OBJECT_ALLOCNO (another_obj);
556       int another_word = OBJECT_SUBWORD (another_obj);
557 
558       ira_assert (ira_reg_classes_intersect_p
559 		  [ALLOCNO_CLASS (a)][ALLOCNO_CLASS (another_a)]);
560 
561       another_parent_a = ira_parent_or_cap_allocno (another_a);
562       if (another_parent_a == NULL)
563 	continue;
564       ira_assert (ALLOCNO_NUM (another_parent_a) >= 0);
565       ira_assert (ALLOCNO_CLASS (another_a)
566 		  == ALLOCNO_CLASS (another_parent_a));
567       ira_assert (ALLOCNO_NUM_OBJECTS (another_a)
568 		  == ALLOCNO_NUM_OBJECTS (another_parent_a));
569       SET_MINMAX_SET_BIT (conflicts[parent_num],
570 			  OBJECT_CONFLICT_ID (ALLOCNO_OBJECT (another_parent_a,
571 							      another_word)),
572 			  parent_min, parent_max);
573     }
574 }
575 
576 /* Build conflict vectors or bit conflict vectors (whatever is more
577    profitable) of all allocnos from the conflict table.  */
578 static void
build_conflicts(void)579 build_conflicts (void)
580 {
581   int i;
582   ira_allocno_t a, cap;
583 
584   collected_conflict_objects
585     = (ira_object_t *) ira_allocate (sizeof (ira_object_t)
586 					  * ira_objects_num);
587   for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
588     for (a = ira_regno_allocno_map[i];
589 	 a != NULL;
590 	 a = ALLOCNO_NEXT_REGNO_ALLOCNO (a))
591       {
592 	int j, nregs = ALLOCNO_NUM_OBJECTS (a);
593 	for (j = 0; j < nregs; j++)
594 	  {
595 	    ira_object_t obj = ALLOCNO_OBJECT (a, j);
596 	    build_object_conflicts (obj);
597 	    for (cap = ALLOCNO_CAP (a); cap != NULL; cap = ALLOCNO_CAP (cap))
598 	      {
599 		ira_object_t cap_obj = ALLOCNO_OBJECT (cap, j);
600 		gcc_assert (ALLOCNO_NUM_OBJECTS (cap) == ALLOCNO_NUM_OBJECTS (a));
601 		build_object_conflicts (cap_obj);
602 	      }
603 	  }
604       }
605   ira_free (collected_conflict_objects);
606 }
607 
608 
609 
610 /* Print hard reg set SET with TITLE to FILE.  */
611 static void
print_hard_reg_set(FILE * file,const char * title,HARD_REG_SET set)612 print_hard_reg_set (FILE *file, const char *title, HARD_REG_SET set)
613 {
614   int i, start, end;
615 
616   fputs (title, file);
617   for (start = end = -1, i = 0; i < FIRST_PSEUDO_REGISTER; i++)
618     {
619       bool reg_included = TEST_HARD_REG_BIT (set, i);
620 
621       if (reg_included)
622 	{
623 	  if (start == -1)
624 	    start = i;
625 	  end = i;
626 	}
627       if (start >= 0 && (!reg_included || i == FIRST_PSEUDO_REGISTER - 1))
628 	{
629 	  if (start == end)
630 	    fprintf (file, " %d", start);
631 	  else if (start == end + 1)
632 	    fprintf (file, " %d %d", start, end);
633 	  else
634 	    fprintf (file, " %d-%d", start, end);
635 	  start = -1;
636 	}
637     }
638   putc ('\n', file);
639 }
640 
641 static void
print_allocno_conflicts(FILE * file,bool reg_p,ira_allocno_t a)642 print_allocno_conflicts (FILE * file, bool reg_p, ira_allocno_t a)
643 {
644   HARD_REG_SET conflicting_hard_regs;
645   basic_block bb;
646   int n, i;
647 
648   if (reg_p)
649     fprintf (file, ";; r%d", ALLOCNO_REGNO (a));
650   else
651     {
652       fprintf (file, ";; a%d(r%d,", ALLOCNO_NUM (a), ALLOCNO_REGNO (a));
653       if ((bb = ALLOCNO_LOOP_TREE_NODE (a)->bb) != NULL)
654         fprintf (file, "b%d", bb->index);
655       else
656         fprintf (file, "l%d", ALLOCNO_LOOP_TREE_NODE (a)->loop_num);
657       putc (')', file);
658     }
659 
660   fputs (" conflicts:", file);
661   n = ALLOCNO_NUM_OBJECTS (a);
662   for (i = 0; i < n; i++)
663     {
664       ira_object_t obj = ALLOCNO_OBJECT (a, i);
665       ira_object_t conflict_obj;
666       ira_object_conflict_iterator oci;
667 
668       if (OBJECT_CONFLICT_ARRAY (obj) == NULL)
669 	{
670 	  fprintf (file, "\n;;     total conflict hard regs:\n");
671 	  fprintf (file, ";;     conflict hard regs:\n\n");
672 	  continue;
673 	}
674 
675       if (n > 1)
676 	fprintf (file, "\n;;   subobject %d:", i);
677       FOR_EACH_OBJECT_CONFLICT (obj, conflict_obj, oci)
678 	{
679 	  ira_allocno_t conflict_a = OBJECT_ALLOCNO (conflict_obj);
680 	  if (reg_p)
681 	    fprintf (file, " r%d,", ALLOCNO_REGNO (conflict_a));
682 	  else
683 	    {
684 	      fprintf (file, " a%d(r%d", ALLOCNO_NUM (conflict_a),
685 		       ALLOCNO_REGNO (conflict_a));
686 	      if (ALLOCNO_NUM_OBJECTS (conflict_a) > 1)
687 		fprintf (file, ",w%d", OBJECT_SUBWORD (conflict_obj));
688 	      if ((bb = ALLOCNO_LOOP_TREE_NODE (conflict_a)->bb) != NULL)
689 		fprintf (file, ",b%d", bb->index);
690 	      else
691 		fprintf (file, ",l%d",
692 			 ALLOCNO_LOOP_TREE_NODE (conflict_a)->loop_num);
693 	      putc (')', file);
694 	    }
695 	}
696       conflicting_hard_regs = (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)
697 			       & ~ira_no_alloc_regs
698 			       & reg_class_contents[ALLOCNO_CLASS (a)]);
699       print_hard_reg_set (file, "\n;;     total conflict hard regs:",
700 			  conflicting_hard_regs);
701 
702       conflicting_hard_regs = (OBJECT_CONFLICT_HARD_REGS (obj)
703 			       & ~ira_no_alloc_regs
704 			       & reg_class_contents[ALLOCNO_CLASS (a)]);
705       print_hard_reg_set (file, ";;     conflict hard regs:",
706 			  conflicting_hard_regs);
707       putc ('\n', file);
708     }
709 
710 }
711 
712 /* Print information about allocno or only regno (if REG_P) conflicts
713    to FILE.  */
714 static void
print_conflicts(FILE * file,bool reg_p)715 print_conflicts (FILE *file, bool reg_p)
716 {
717   ira_allocno_t a;
718   ira_allocno_iterator ai;
719 
720   FOR_EACH_ALLOCNO (a, ai)
721     print_allocno_conflicts (file, reg_p, a);
722   putc ('\n', file);
723 }
724 
725 /* Print information about allocno or only regno (if REG_P) conflicts
726    to stderr.  */
727 void
ira_debug_conflicts(bool reg_p)728 ira_debug_conflicts (bool reg_p)
729 {
730   print_conflicts (stderr, reg_p);
731 }
732 
733 
734 
735 /* Entry function which builds allocno conflicts and allocno copies
736    and accumulate some allocno info on upper level regions.  */
737 void
ira_build_conflicts(void)738 ira_build_conflicts (void)
739 {
740   enum reg_class base;
741   ira_allocno_t a;
742   ira_allocno_iterator ai;
743   HARD_REG_SET temp_hard_reg_set;
744 
745   if (ira_conflicts_p)
746     {
747       ira_conflicts_p = build_conflict_bit_table ();
748       if (ira_conflicts_p)
749 	{
750 	  ira_object_t obj;
751 	  ira_object_iterator oi;
752 
753 	  build_conflicts ();
754 	  ira_traverse_loop_tree (true, ira_loop_tree_root, add_copies, NULL);
755 	  /* We need finished conflict table for the subsequent call.  */
756 	  if (flag_ira_region == IRA_REGION_ALL
757 	      || flag_ira_region == IRA_REGION_MIXED)
758 	    propagate_copies ();
759 
760 	  /* Now we can free memory for the conflict table (see function
761 	     build_object_conflicts for details).  */
762 	  FOR_EACH_OBJECT (obj, oi)
763 	    {
764 	      if (OBJECT_CONFLICT_ARRAY (obj) != conflicts[OBJECT_CONFLICT_ID (obj)])
765 		ira_free (conflicts[OBJECT_CONFLICT_ID (obj)]);
766 	    }
767 	  ira_free (conflicts);
768 	}
769     }
770   base = base_reg_class (VOIDmode, ADDR_SPACE_GENERIC, ADDRESS, SCRATCH);
771   if (! targetm.class_likely_spilled_p (base))
772     CLEAR_HARD_REG_SET (temp_hard_reg_set);
773   else
774     temp_hard_reg_set = reg_class_contents[base] & ~ira_no_alloc_regs;
775   FOR_EACH_ALLOCNO (a, ai)
776     {
777       int i, n = ALLOCNO_NUM_OBJECTS (a);
778 
779       for (i = 0; i < n; i++)
780 	{
781 	  ira_object_t obj = ALLOCNO_OBJECT (a, i);
782 	  rtx allocno_reg = regno_reg_rtx [ALLOCNO_REGNO (a)];
783 
784 	  /* For debugging purposes don't put user defined variables in
785 	     callee-clobbered registers.  However, do allow parameters
786 	     in callee-clobbered registers to improve debugging.  This
787 	     is a bit of a fragile hack.  */
788 	  if (optimize == 0
789 	      && REG_USERVAR_P (allocno_reg)
790 	      && ! reg_is_parm_p (allocno_reg))
791 	    {
792 	      HARD_REG_SET new_conflict_regs = crtl->abi->full_reg_clobbers ();
793 	      OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= new_conflict_regs;
794 	      OBJECT_CONFLICT_HARD_REGS (obj) |= new_conflict_regs;
795 	    }
796 
797 	  if (ALLOCNO_CALLS_CROSSED_NUM (a) != 0)
798 	    {
799 	      HARD_REG_SET new_conflict_regs = ira_need_caller_save_regs (a);
800 	      if (flag_caller_saves)
801 		new_conflict_regs &= (~savable_regs | temp_hard_reg_set);
802 	      OBJECT_TOTAL_CONFLICT_HARD_REGS (obj) |= new_conflict_regs;
803 	      OBJECT_CONFLICT_HARD_REGS (obj) |= new_conflict_regs;
804 	    }
805 
806 	  /* Now we deal with paradoxical subreg cases where certain registers
807 	     cannot be accessed in the widest mode.  */
808 	  machine_mode outer_mode = ALLOCNO_WMODE (a);
809 	  machine_mode inner_mode = ALLOCNO_MODE (a);
810 	  if (paradoxical_subreg_p (outer_mode, inner_mode))
811 	    {
812 	      enum reg_class aclass = ALLOCNO_CLASS (a);
813 	      for (int j = ira_class_hard_regs_num[aclass] - 1; j >= 0; --j)
814 		{
815 		   int inner_regno = ira_class_hard_regs[aclass][j];
816 		   int outer_regno = simplify_subreg_regno (inner_regno,
817 							    inner_mode, 0,
818 							    outer_mode);
819 		   if (outer_regno < 0
820 		       || !in_hard_reg_set_p (reg_class_contents[aclass],
821 					      outer_mode, outer_regno))
822 		     {
823 		       SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj),
824 					 inner_regno);
825 		       SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj),
826 					 inner_regno);
827 		     }
828 		}
829 	    }
830 	}
831     }
832   if (optimize && ira_conflicts_p
833       && internal_flag_ira_verbose > 2 && ira_dump_file != NULL)
834     print_conflicts (ira_dump_file, false);
835 }
836