1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "rtlanal.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "df.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "insn-config.h"
34 #include "regs.h"
35 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
36 #include "recog.h"
37 #include "addresses.h"
38 #include "rtl-iter.h"
39 #include "hard-reg-set.h"
40 #include "function-abi.h"
41 
42 /* Forward declarations */
43 static void set_of_1 (rtx, const_rtx, void *);
44 static bool covers_regno_p (const_rtx, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48 
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
50                                                    const_rtx, machine_mode,
51                                                    unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
53 					     const_rtx, machine_mode,
54                                              unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
56 						const_rtx, machine_mode,
57                                                 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
59 					  const_rtx, machine_mode,
60 					  unsigned int);
61 
62 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
63 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
64 
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67    SIGN_EXTEND then while narrowing we also have to enforce the
68    representation and sign-extend the value to mode DESTINATION_REP.
69 
70    If the value is already sign-extended to DESTINATION_REP mode we
71    can just switch to DESTINATION mode on it.  For each pair of
72    integral modes SOURCE and DESTINATION, when truncating from SOURCE
73    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74    contains the number of high-order bits in SOURCE that have to be
75    copies of the sign-bit so that we can do this mode-switch to
76    DESTINATION.  */
77 
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 
81 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
82    elements.  Return the new base of ARRAY.  */
83 
84 template <typename T>
85 typename T::value_type *
add_single_to_queue(array_type & array,value_type * base,size_t i,value_type x)86 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
87 						  value_type *base,
88 						  size_t i, value_type x)
89 {
90   if (base == array.stack)
91     {
92       if (i < LOCAL_ELEMS)
93 	{
94 	  base[i] = x;
95 	  return base;
96 	}
97       gcc_checking_assert (i == LOCAL_ELEMS);
98       /* A previous iteration might also have moved from the stack to the
99 	 heap, in which case the heap array will already be big enough.  */
100       if (vec_safe_length (array.heap) <= i)
101 	vec_safe_grow (array.heap, i + 1, true);
102       base = array.heap->address ();
103       memcpy (base, array.stack, sizeof (array.stack));
104       base[LOCAL_ELEMS] = x;
105       return base;
106     }
107   unsigned int length = array.heap->length ();
108   if (length > i)
109     {
110       gcc_checking_assert (base == array.heap->address ());
111       base[i] = x;
112       return base;
113     }
114   else
115     {
116       gcc_checking_assert (i == length);
117       vec_safe_push (array.heap, x);
118       return array.heap->address ();
119     }
120 }
121 
122 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
123    number of elements added to the worklist.  */
124 
125 template <typename T>
126 size_t
add_subrtxes_to_queue(array_type & array,value_type * base,size_t end,rtx_type x)127 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
128 						    value_type *base,
129 						    size_t end, rtx_type x)
130 {
131   enum rtx_code code = GET_CODE (x);
132   const char *format = GET_RTX_FORMAT (code);
133   size_t orig_end = end;
134   if (__builtin_expect (INSN_P (x), false))
135     {
136       /* Put the pattern at the top of the queue, since that's what
137 	 we're likely to want most.  It also allows for the SEQUENCE
138 	 code below.  */
139       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
140 	if (format[i] == 'e')
141 	  {
142 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
143 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
144 	      base[end++] = subx;
145 	    else
146 	      base = add_single_to_queue (array, base, end++, subx);
147 	  }
148     }
149   else
150     for (int i = 0; format[i]; ++i)
151       if (format[i] == 'e')
152 	{
153 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
154 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
155 	    base[end++] = subx;
156 	  else
157 	    base = add_single_to_queue (array, base, end++, subx);
158 	}
159       else if (format[i] == 'E')
160 	{
161 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
162 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
163 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
164 	    for (unsigned int j = 0; j < length; j++)
165 	      base[end++] = T::get_value (vec[j]);
166 	  else
167 	    for (unsigned int j = 0; j < length; j++)
168 	      base = add_single_to_queue (array, base, end++,
169 					  T::get_value (vec[j]));
170 	  if (code == SEQUENCE && end == length)
171 	    /* If the subrtxes of the sequence fill the entire array then
172 	       we know that no other parts of a containing insn are queued.
173 	       The caller is therefore iterating over the sequence as a
174 	       PATTERN (...), so we also want the patterns of the
175 	       subinstructions.  */
176 	    for (unsigned int j = 0; j < length; j++)
177 	      {
178 		typename T::rtx_type x = T::get_rtx (base[j]);
179 		if (INSN_P (x))
180 		  base[j] = T::get_value (PATTERN (x));
181 	      }
182 	}
183   return end - orig_end;
184 }
185 
186 template <typename T>
187 void
free_array(array_type & array)188 generic_subrtx_iterator <T>::free_array (array_type &array)
189 {
190   vec_free (array.heap);
191 }
192 
193 template <typename T>
194 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
195 
196 template class generic_subrtx_iterator <const_rtx_accessor>;
197 template class generic_subrtx_iterator <rtx_var_accessor>;
198 template class generic_subrtx_iterator <rtx_ptr_accessor>;
199 
200 /* Return 1 if the value of X is unstable
201    (would be different at a different point in the program).
202    The frame pointer, arg pointer, etc. are considered stable
203    (within one function) and so is anything marked `unchanging'.  */
204 
205 int
rtx_unstable_p(const_rtx x)206 rtx_unstable_p (const_rtx x)
207 {
208   const RTX_CODE code = GET_CODE (x);
209   int i;
210   const char *fmt;
211 
212   switch (code)
213     {
214     case MEM:
215       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
216 
217     case CONST:
218     CASE_CONST_ANY:
219     case SYMBOL_REF:
220     case LABEL_REF:
221       return 0;
222 
223     case REG:
224       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
225       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
226 	  /* The arg pointer varies if it is not a fixed register.  */
227 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
228 	return 0;
229       /* ??? When call-clobbered, the value is stable modulo the restore
230 	 that must happen after a call.  This currently screws up local-alloc
231 	 into believing that the restore is not needed.  */
232       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
233 	return 0;
234       return 1;
235 
236     case ASM_OPERANDS:
237       if (MEM_VOLATILE_P (x))
238 	return 1;
239 
240       /* Fall through.  */
241 
242     default:
243       break;
244     }
245 
246   fmt = GET_RTX_FORMAT (code);
247   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
248     if (fmt[i] == 'e')
249       {
250 	if (rtx_unstable_p (XEXP (x, i)))
251 	  return 1;
252       }
253     else if (fmt[i] == 'E')
254       {
255 	int j;
256 	for (j = 0; j < XVECLEN (x, i); j++)
257 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
258 	    return 1;
259       }
260 
261   return 0;
262 }
263 
264 /* Return 1 if X has a value that can vary even between two
265    executions of the program.  0 means X can be compared reliably
266    against certain constants or near-constants.
267    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
268    zero, we are slightly more conservative.
269    The frame pointer and the arg pointer are considered constant.  */
270 
271 bool
rtx_varies_p(const_rtx x,bool for_alias)272 rtx_varies_p (const_rtx x, bool for_alias)
273 {
274   RTX_CODE code;
275   int i;
276   const char *fmt;
277 
278   if (!x)
279     return 0;
280 
281   code = GET_CODE (x);
282   switch (code)
283     {
284     case MEM:
285       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
286 
287     case CONST:
288     CASE_CONST_ANY:
289     case SYMBOL_REF:
290     case LABEL_REF:
291       return 0;
292 
293     case REG:
294       /* Note that we have to test for the actual rtx used for the frame
295 	 and arg pointers and not just the register number in case we have
296 	 eliminated the frame and/or arg pointer and are using it
297 	 for pseudos.  */
298       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
299 	  /* The arg pointer varies if it is not a fixed register.  */
300 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
301 	return 0;
302       if (x == pic_offset_table_rtx
303 	  /* ??? When call-clobbered, the value is stable modulo the restore
304 	     that must happen after a call.  This currently screws up
305 	     local-alloc into believing that the restore is not needed, so we
306 	     must return 0 only if we are called from alias analysis.  */
307 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
308 	return 0;
309       return 1;
310 
311     case LO_SUM:
312       /* The operand 0 of a LO_SUM is considered constant
313 	 (in fact it is related specifically to operand 1)
314 	 during alias analysis.  */
315       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
316 	     || rtx_varies_p (XEXP (x, 1), for_alias);
317 
318     case ASM_OPERANDS:
319       if (MEM_VOLATILE_P (x))
320 	return 1;
321 
322       /* Fall through.  */
323 
324     default:
325       break;
326     }
327 
328   fmt = GET_RTX_FORMAT (code);
329   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
330     if (fmt[i] == 'e')
331       {
332 	if (rtx_varies_p (XEXP (x, i), for_alias))
333 	  return 1;
334       }
335     else if (fmt[i] == 'E')
336       {
337 	int j;
338 	for (j = 0; j < XVECLEN (x, i); j++)
339 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
340 	    return 1;
341       }
342 
343   return 0;
344 }
345 
346 /* Compute an approximation for the offset between the register
347    FROM and TO for the current function, as it was at the start
348    of the routine.  */
349 
350 static poly_int64
get_initial_register_offset(int from,int to)351 get_initial_register_offset (int from, int to)
352 {
353   static const struct elim_table_t
354   {
355     const int from;
356     const int to;
357   } table[] = ELIMINABLE_REGS;
358   poly_int64 offset1, offset2;
359   unsigned int i, j;
360 
361   if (to == from)
362     return 0;
363 
364   /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
365      is completed, but we need to give at least an estimate for the stack
366      pointer based on the frame size.  */
367   if (!epilogue_completed)
368     {
369       offset1 = crtl->outgoing_args_size + get_frame_size ();
370 #if !STACK_GROWS_DOWNWARD
371       offset1 = - offset1;
372 #endif
373       if (to == STACK_POINTER_REGNUM)
374 	return offset1;
375       else if (from == STACK_POINTER_REGNUM)
376 	return - offset1;
377       else
378 	return 0;
379      }
380 
381   for (i = 0; i < ARRAY_SIZE (table); i++)
382       if (table[i].from == from)
383 	{
384 	  if (table[i].to == to)
385 	    {
386 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
387 					  offset1);
388 	      return offset1;
389 	    }
390 	  for (j = 0; j < ARRAY_SIZE (table); j++)
391 	    {
392 	      if (table[j].to == to
393 		  && table[j].from == table[i].to)
394 		{
395 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
396 					      offset1);
397 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
398 					      offset2);
399 		  return offset1 + offset2;
400 		}
401 	      if (table[j].from == to
402 		  && table[j].to == table[i].to)
403 		{
404 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
405 					      offset1);
406 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
407 					      offset2);
408 		  return offset1 - offset2;
409 		}
410 	    }
411 	}
412       else if (table[i].to == from)
413 	{
414 	  if (table[i].from == to)
415 	    {
416 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
417 					  offset1);
418 	      return - offset1;
419 	    }
420 	  for (j = 0; j < ARRAY_SIZE (table); j++)
421 	    {
422 	      if (table[j].to == to
423 		  && table[j].from == table[i].from)
424 		{
425 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
426 					      offset1);
427 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
428 					      offset2);
429 		  return - offset1 + offset2;
430 		}
431 	      if (table[j].from == to
432 		  && table[j].to == table[i].from)
433 		{
434 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
435 					      offset1);
436 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
437 					      offset2);
438 		  return - offset1 - offset2;
439 		}
440 	    }
441 	}
442 
443   /* If the requested register combination was not found,
444      try a different more simple combination.  */
445   if (from == ARG_POINTER_REGNUM)
446     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
447   else if (to == ARG_POINTER_REGNUM)
448     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
449   else if (from == HARD_FRAME_POINTER_REGNUM)
450     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
451   else if (to == HARD_FRAME_POINTER_REGNUM)
452     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
453   else
454     return 0;
455 }
456 
457 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
458    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
459    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
460    references on strict alignment machines.  */
461 
462 static int
rtx_addr_can_trap_p_1(const_rtx x,poly_int64 offset,poly_int64 size,machine_mode mode,bool unaligned_mems)463 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
464 		       machine_mode mode, bool unaligned_mems)
465 {
466   enum rtx_code code = GET_CODE (x);
467   gcc_checking_assert (mode == BLKmode
468 		       || mode == VOIDmode
469 		       || known_size_p (size));
470   poly_int64 const_x1;
471 
472   /* The offset must be a multiple of the mode size if we are considering
473      unaligned memory references on strict alignment machines.  */
474   if (STRICT_ALIGNMENT
475       && unaligned_mems
476       && mode != BLKmode
477       && mode != VOIDmode)
478     {
479       poly_int64 actual_offset = offset;
480 
481 #ifdef SPARC_STACK_BOUNDARY_HACK
482       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
483 	     the real alignment of %sp.  However, when it does this, the
484 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
485       if (SPARC_STACK_BOUNDARY_HACK
486 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
487 	actual_offset -= STACK_POINTER_OFFSET;
488 #endif
489 
490       if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
491 	return 1;
492     }
493 
494   switch (code)
495     {
496     case SYMBOL_REF:
497       if (SYMBOL_REF_WEAK (x))
498 	return 1;
499       if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
500 	{
501 	  tree decl;
502 	  poly_int64 decl_size;
503 
504 	  if (maybe_lt (offset, 0))
505 	    return 1;
506 	  if (!known_size_p (size))
507 	    return maybe_ne (offset, 0);
508 
509 	  /* If the size of the access or of the symbol is unknown,
510 	     assume the worst.  */
511 	  decl = SYMBOL_REF_DECL (x);
512 
513 	  /* Else check that the access is in bounds.  TODO: restructure
514 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
515 	  if (!decl)
516 	    decl_size = -1;
517 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
518 	    {
519 	      if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
520 		decl_size = -1;
521 	    }
522 	  else if (TREE_CODE (decl) == STRING_CST)
523 	    decl_size = TREE_STRING_LENGTH (decl);
524 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
525 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
526 	  else
527 	    decl_size = -1;
528 
529 	  return (!known_size_p (decl_size) || known_eq (decl_size, 0)
530 		  ? maybe_ne (offset, 0)
531 		  : !known_subrange_p (offset, size, 0, decl_size));
532         }
533 
534       return 0;
535 
536     case LABEL_REF:
537       return 0;
538 
539     case REG:
540       /* Stack references are assumed not to trap, but we need to deal with
541 	 nonsensical offsets.  */
542       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
543 	 || x == stack_pointer_rtx
544 	 /* The arg pointer varies if it is not a fixed register.  */
545 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
546 	{
547 #ifdef RED_ZONE_SIZE
548 	  poly_int64 red_zone_size = RED_ZONE_SIZE;
549 #else
550 	  poly_int64 red_zone_size = 0;
551 #endif
552 	  poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
553 	  poly_int64 low_bound, high_bound;
554 
555 	  if (!known_size_p (size))
556 	    return 1;
557 
558 	  if (x == frame_pointer_rtx)
559 	    {
560 	      if (FRAME_GROWS_DOWNWARD)
561 		{
562 		  high_bound = targetm.starting_frame_offset ();
563 		  low_bound  = high_bound - get_frame_size ();
564 		}
565 	      else
566 		{
567 		  low_bound  = targetm.starting_frame_offset ();
568 		  high_bound = low_bound + get_frame_size ();
569 		}
570 	    }
571 	  else if (x == hard_frame_pointer_rtx)
572 	    {
573 	      poly_int64 sp_offset
574 		= get_initial_register_offset (STACK_POINTER_REGNUM,
575 					       HARD_FRAME_POINTER_REGNUM);
576 	      poly_int64 ap_offset
577 		= get_initial_register_offset (ARG_POINTER_REGNUM,
578 					       HARD_FRAME_POINTER_REGNUM);
579 
580 #if STACK_GROWS_DOWNWARD
581 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
582 	      high_bound = ap_offset
583 			   + FIRST_PARM_OFFSET (current_function_decl)
584 #if !ARGS_GROW_DOWNWARD
585 			   + crtl->args.size
586 #endif
587 			   + stack_boundary;
588 #else
589 	      high_bound = sp_offset + red_zone_size + stack_boundary;
590 	      low_bound  = ap_offset
591 			   + FIRST_PARM_OFFSET (current_function_decl)
592 #if ARGS_GROW_DOWNWARD
593 			   - crtl->args.size
594 #endif
595 			   - stack_boundary;
596 #endif
597 	    }
598 	  else if (x == stack_pointer_rtx)
599 	    {
600 	      poly_int64 ap_offset
601 		= get_initial_register_offset (ARG_POINTER_REGNUM,
602 					       STACK_POINTER_REGNUM);
603 
604 #if STACK_GROWS_DOWNWARD
605 	      low_bound  = - red_zone_size - stack_boundary;
606 	      high_bound = ap_offset
607 			   + FIRST_PARM_OFFSET (current_function_decl)
608 #if !ARGS_GROW_DOWNWARD
609 			   + crtl->args.size
610 #endif
611 			   + stack_boundary;
612 #else
613 	      high_bound = red_zone_size + stack_boundary;
614 	      low_bound  = ap_offset
615 			   + FIRST_PARM_OFFSET (current_function_decl)
616 #if ARGS_GROW_DOWNWARD
617 			   - crtl->args.size
618 #endif
619 			   - stack_boundary;
620 #endif
621 	    }
622 	  else
623 	    {
624 	      /* We assume that accesses are safe to at least the
625 		 next stack boundary.
626 		 Examples are varargs and __builtin_return_address.  */
627 #if ARGS_GROW_DOWNWARD
628 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 			   + stack_boundary;
630 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
631 			   - crtl->args.size - stack_boundary;
632 #else
633 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
634 			   - stack_boundary;
635 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
636 			   + crtl->args.size + stack_boundary;
637 #endif
638 	    }
639 
640 	  if (known_ge (offset, low_bound)
641 	      && known_le (offset, high_bound - size))
642 	    return 0;
643 	  return 1;
644 	}
645       /* All of the virtual frame registers are stack references.  */
646       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
647 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
648 	return 0;
649       return 1;
650 
651     case CONST:
652       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
653 				    mode, unaligned_mems);
654 
655     case PLUS:
656       /* An address is assumed not to trap if:
657 	 - it is the pic register plus a const unspec without offset.  */
658       if (XEXP (x, 0) == pic_offset_table_rtx
659 	  && GET_CODE (XEXP (x, 1)) == CONST
660 	  && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
661 	  && known_eq (offset, 0))
662 	return 0;
663 
664       /* - or it is an address that can't trap plus a constant integer.  */
665       if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
666 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
667 				     size, mode, unaligned_mems))
668 	return 0;
669 
670       return 1;
671 
672     case LO_SUM:
673     case PRE_MODIFY:
674       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
675 				    mode, unaligned_mems);
676 
677     case PRE_DEC:
678     case PRE_INC:
679     case POST_DEC:
680     case POST_INC:
681     case POST_MODIFY:
682       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
683 				    mode, unaligned_mems);
684 
685     default:
686       break;
687     }
688 
689   /* If it isn't one of the case above, it can cause a trap.  */
690   return 1;
691 }
692 
693 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
694 
695 int
rtx_addr_can_trap_p(const_rtx x)696 rtx_addr_can_trap_p (const_rtx x)
697 {
698   return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
699 }
700 
701 /* Return true if X contains a MEM subrtx.  */
702 
703 bool
contains_mem_rtx_p(rtx x)704 contains_mem_rtx_p (rtx x)
705 {
706   subrtx_iterator::array_type array;
707   FOR_EACH_SUBRTX (iter, array, x, ALL)
708     if (MEM_P (*iter))
709       return true;
710 
711   return false;
712 }
713 
714 /* Return true if X is an address that is known to not be zero.  */
715 
716 bool
nonzero_address_p(const_rtx x)717 nonzero_address_p (const_rtx x)
718 {
719   const enum rtx_code code = GET_CODE (x);
720 
721   switch (code)
722     {
723     case SYMBOL_REF:
724       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
725 
726     case LABEL_REF:
727       return true;
728 
729     case REG:
730       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
731       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
732 	  || x == stack_pointer_rtx
733 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
734 	return true;
735       /* All of the virtual frame registers are stack references.  */
736       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
737 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
738 	return true;
739       return false;
740 
741     case CONST:
742       return nonzero_address_p (XEXP (x, 0));
743 
744     case PLUS:
745       /* Handle PIC references.  */
746       if (XEXP (x, 0) == pic_offset_table_rtx
747 	       && CONSTANT_P (XEXP (x, 1)))
748 	return true;
749       return false;
750 
751     case PRE_MODIFY:
752       /* Similar to the above; allow positive offsets.  Further, since
753 	 auto-inc is only allowed in memories, the register must be a
754 	 pointer.  */
755       if (CONST_INT_P (XEXP (x, 1))
756 	  && INTVAL (XEXP (x, 1)) > 0)
757 	return true;
758       return nonzero_address_p (XEXP (x, 0));
759 
760     case PRE_INC:
761       /* Similarly.  Further, the offset is always positive.  */
762       return true;
763 
764     case PRE_DEC:
765     case POST_DEC:
766     case POST_INC:
767     case POST_MODIFY:
768       return nonzero_address_p (XEXP (x, 0));
769 
770     case LO_SUM:
771       return nonzero_address_p (XEXP (x, 1));
772 
773     default:
774       break;
775     }
776 
777   /* If it isn't one of the case above, might be zero.  */
778   return false;
779 }
780 
781 /* Return 1 if X refers to a memory location whose address
782    cannot be compared reliably with constant addresses,
783    or if X refers to a BLKmode memory object.
784    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
785    zero, we are slightly more conservative.  */
786 
787 bool
rtx_addr_varies_p(const_rtx x,bool for_alias)788 rtx_addr_varies_p (const_rtx x, bool for_alias)
789 {
790   enum rtx_code code;
791   int i;
792   const char *fmt;
793 
794   if (x == 0)
795     return 0;
796 
797   code = GET_CODE (x);
798   if (code == MEM)
799     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
800 
801   fmt = GET_RTX_FORMAT (code);
802   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
803     if (fmt[i] == 'e')
804       {
805 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
806 	  return 1;
807       }
808     else if (fmt[i] == 'E')
809       {
810 	int j;
811 	for (j = 0; j < XVECLEN (x, i); j++)
812 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
813 	    return 1;
814       }
815   return 0;
816 }
817 
818 /* Return the CALL in X if there is one.  */
819 
820 rtx
get_call_rtx_from(const rtx_insn * insn)821 get_call_rtx_from (const rtx_insn *insn)
822 {
823   rtx x = PATTERN (insn);
824   if (GET_CODE (x) == PARALLEL)
825     x = XVECEXP (x, 0, 0);
826   if (GET_CODE (x) == SET)
827     x = SET_SRC (x);
828   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
829     return x;
830   return NULL_RTX;
831 }
832 
833 /* Get the declaration of the function called by INSN.  */
834 
835 tree
get_call_fndecl(const rtx_insn * insn)836 get_call_fndecl (const rtx_insn *insn)
837 {
838   rtx note, datum;
839 
840   note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
841   if (note == NULL_RTX)
842     return NULL_TREE;
843 
844   datum = XEXP (note, 0);
845   if (datum != NULL_RTX)
846     return SYMBOL_REF_DECL (datum);
847 
848   return NULL_TREE;
849 }
850 
851 /* Return the value of the integer term in X, if one is apparent;
852    otherwise return 0.
853    Only obvious integer terms are detected.
854    This is used in cse.c with the `related_value' field.  */
855 
856 HOST_WIDE_INT
get_integer_term(const_rtx x)857 get_integer_term (const_rtx x)
858 {
859   if (GET_CODE (x) == CONST)
860     x = XEXP (x, 0);
861 
862   if (GET_CODE (x) == MINUS
863       && CONST_INT_P (XEXP (x, 1)))
864     return - INTVAL (XEXP (x, 1));
865   if (GET_CODE (x) == PLUS
866       && CONST_INT_P (XEXP (x, 1)))
867     return INTVAL (XEXP (x, 1));
868   return 0;
869 }
870 
871 /* If X is a constant, return the value sans apparent integer term;
872    otherwise return 0.
873    Only obvious integer terms are detected.  */
874 
875 rtx
get_related_value(const_rtx x)876 get_related_value (const_rtx x)
877 {
878   if (GET_CODE (x) != CONST)
879     return 0;
880   x = XEXP (x, 0);
881   if (GET_CODE (x) == PLUS
882       && CONST_INT_P (XEXP (x, 1)))
883     return XEXP (x, 0);
884   else if (GET_CODE (x) == MINUS
885 	   && CONST_INT_P (XEXP (x, 1)))
886     return XEXP (x, 0);
887   return 0;
888 }
889 
890 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
891    to somewhere in the same object or object_block as SYMBOL.  */
892 
893 bool
offset_within_block_p(const_rtx symbol,HOST_WIDE_INT offset)894 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
895 {
896   tree decl;
897 
898   if (GET_CODE (symbol) != SYMBOL_REF)
899     return false;
900 
901   if (offset == 0)
902     return true;
903 
904   if (offset > 0)
905     {
906       if (CONSTANT_POOL_ADDRESS_P (symbol)
907 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
908 	return true;
909 
910       decl = SYMBOL_REF_DECL (symbol);
911       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
912 	return true;
913     }
914 
915   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
916       && SYMBOL_REF_BLOCK (symbol)
917       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
918       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
919 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
920     return true;
921 
922   return false;
923 }
924 
925 /* Split X into a base and a constant offset, storing them in *BASE_OUT
926    and *OFFSET_OUT respectively.  */
927 
928 void
split_const(rtx x,rtx * base_out,rtx * offset_out)929 split_const (rtx x, rtx *base_out, rtx *offset_out)
930 {
931   if (GET_CODE (x) == CONST)
932     {
933       x = XEXP (x, 0);
934       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
935 	{
936 	  *base_out = XEXP (x, 0);
937 	  *offset_out = XEXP (x, 1);
938 	  return;
939 	}
940     }
941   *base_out = x;
942   *offset_out = const0_rtx;
943 }
944 
945 /* Express integer value X as some value Y plus a polynomial offset,
946    where Y is either const0_rtx, X or something within X (as opposed
947    to a new rtx).  Return the Y and store the offset in *OFFSET_OUT.  */
948 
949 rtx
strip_offset(rtx x,poly_int64_pod * offset_out)950 strip_offset (rtx x, poly_int64_pod *offset_out)
951 {
952   rtx base = const0_rtx;
953   rtx test = x;
954   if (GET_CODE (test) == CONST)
955     test = XEXP (test, 0);
956   if (GET_CODE (test) == PLUS)
957     {
958       base = XEXP (test, 0);
959       test = XEXP (test, 1);
960     }
961   if (poly_int_rtx_p (test, offset_out))
962     return base;
963   *offset_out = 0;
964   return x;
965 }
966 
967 /* Return the argument size in REG_ARGS_SIZE note X.  */
968 
969 poly_int64
get_args_size(const_rtx x)970 get_args_size (const_rtx x)
971 {
972   gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
973   return rtx_to_poly_int64 (XEXP (x, 0));
974 }
975 
976 /* Return the number of places FIND appears within X.  If COUNT_DEST is
977    zero, we do not count occurrences inside the destination of a SET.  */
978 
979 int
count_occurrences(const_rtx x,const_rtx find,int count_dest)980 count_occurrences (const_rtx x, const_rtx find, int count_dest)
981 {
982   int i, j;
983   enum rtx_code code;
984   const char *format_ptr;
985   int count;
986 
987   if (x == find)
988     return 1;
989 
990   code = GET_CODE (x);
991 
992   switch (code)
993     {
994     case REG:
995     CASE_CONST_ANY:
996     case SYMBOL_REF:
997     case CODE_LABEL:
998     case PC:
999       return 0;
1000 
1001     case EXPR_LIST:
1002       count = count_occurrences (XEXP (x, 0), find, count_dest);
1003       if (XEXP (x, 1))
1004 	count += count_occurrences (XEXP (x, 1), find, count_dest);
1005       return count;
1006 
1007     case MEM:
1008       if (MEM_P (find) && rtx_equal_p (x, find))
1009 	return 1;
1010       break;
1011 
1012     case SET:
1013       if (SET_DEST (x) == find && ! count_dest)
1014 	return count_occurrences (SET_SRC (x), find, count_dest);
1015       break;
1016 
1017     default:
1018       break;
1019     }
1020 
1021   format_ptr = GET_RTX_FORMAT (code);
1022   count = 0;
1023 
1024   for (i = 0; i < GET_RTX_LENGTH (code); i++)
1025     {
1026       switch (*format_ptr++)
1027 	{
1028 	case 'e':
1029 	  count += count_occurrences (XEXP (x, i), find, count_dest);
1030 	  break;
1031 
1032 	case 'E':
1033 	  for (j = 0; j < XVECLEN (x, i); j++)
1034 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1035 	  break;
1036 	}
1037     }
1038   return count;
1039 }
1040 
1041 
1042 /* Return TRUE if OP is a register or subreg of a register that
1043    holds an unsigned quantity.  Otherwise, return FALSE.  */
1044 
1045 bool
unsigned_reg_p(rtx op)1046 unsigned_reg_p (rtx op)
1047 {
1048   if (REG_P (op)
1049       && REG_EXPR (op)
1050       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1051     return true;
1052 
1053   if (GET_CODE (op) == SUBREG
1054       && SUBREG_PROMOTED_SIGN (op))
1055     return true;
1056 
1057   return false;
1058 }
1059 
1060 
1061 /* Nonzero if register REG appears somewhere within IN.
1062    Also works if REG is not a register; in this case it checks
1063    for a subexpression of IN that is Lisp "equal" to REG.  */
1064 
1065 int
reg_mentioned_p(const_rtx reg,const_rtx in)1066 reg_mentioned_p (const_rtx reg, const_rtx in)
1067 {
1068   const char *fmt;
1069   int i;
1070   enum rtx_code code;
1071 
1072   if (in == 0)
1073     return 0;
1074 
1075   if (reg == in)
1076     return 1;
1077 
1078   if (GET_CODE (in) == LABEL_REF)
1079     return reg == label_ref_label (in);
1080 
1081   code = GET_CODE (in);
1082 
1083   switch (code)
1084     {
1085       /* Compare registers by number.  */
1086     case REG:
1087       return REG_P (reg) && REGNO (in) == REGNO (reg);
1088 
1089       /* These codes have no constituent expressions
1090 	 and are unique.  */
1091     case SCRATCH:
1092     case PC:
1093       return 0;
1094 
1095     CASE_CONST_ANY:
1096       /* These are kept unique for a given value.  */
1097       return 0;
1098 
1099     default:
1100       break;
1101     }
1102 
1103   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1104     return 1;
1105 
1106   fmt = GET_RTX_FORMAT (code);
1107 
1108   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1109     {
1110       if (fmt[i] == 'E')
1111 	{
1112 	  int j;
1113 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1114 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1115 	      return 1;
1116 	}
1117       else if (fmt[i] == 'e'
1118 	       && reg_mentioned_p (reg, XEXP (in, i)))
1119 	return 1;
1120     }
1121   return 0;
1122 }
1123 
1124 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1125    no CODE_LABEL insn.  */
1126 
1127 int
no_labels_between_p(const rtx_insn * beg,const rtx_insn * end)1128 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1129 {
1130   rtx_insn *p;
1131   if (beg == end)
1132     return 0;
1133   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1134     if (LABEL_P (p))
1135       return 0;
1136   return 1;
1137 }
1138 
1139 /* Nonzero if register REG is used in an insn between
1140    FROM_INSN and TO_INSN (exclusive of those two).  */
1141 
1142 int
reg_used_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1143 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1144 		    const rtx_insn *to_insn)
1145 {
1146   rtx_insn *insn;
1147 
1148   if (from_insn == to_insn)
1149     return 0;
1150 
1151   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1152     if (NONDEBUG_INSN_P (insn)
1153 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1154 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1155       return 1;
1156   return 0;
1157 }
1158 
1159 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1160    is entirely replaced by a new value and the only use is as a SET_DEST,
1161    we do not consider it a reference.  */
1162 
1163 int
reg_referenced_p(const_rtx x,const_rtx body)1164 reg_referenced_p (const_rtx x, const_rtx body)
1165 {
1166   int i;
1167 
1168   switch (GET_CODE (body))
1169     {
1170     case SET:
1171       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1172 	return 1;
1173 
1174       /* If the destination is anything other than PC, a REG or a SUBREG
1175 	 of a REG that occupies all of the REG, the insn references X if
1176 	 it is mentioned in the destination.  */
1177       if (GET_CODE (SET_DEST (body)) != PC
1178 	  && !REG_P (SET_DEST (body))
1179 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1180 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1181 		&& !read_modify_subreg_p (SET_DEST (body)))
1182 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1183 	return 1;
1184       return 0;
1185 
1186     case ASM_OPERANDS:
1187       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1188 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1189 	  return 1;
1190       return 0;
1191 
1192     case CALL:
1193     case USE:
1194     case IF_THEN_ELSE:
1195       return reg_overlap_mentioned_p (x, body);
1196 
1197     case TRAP_IF:
1198       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1199 
1200     case PREFETCH:
1201       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1202 
1203     case UNSPEC:
1204     case UNSPEC_VOLATILE:
1205       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1206 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1207 	  return 1;
1208       return 0;
1209 
1210     case PARALLEL:
1211       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1212 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1213 	  return 1;
1214       return 0;
1215 
1216     case CLOBBER:
1217       if (MEM_P (XEXP (body, 0)))
1218 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1219 	  return 1;
1220       return 0;
1221 
1222     case COND_EXEC:
1223       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1224 	return 1;
1225       return reg_referenced_p (x, COND_EXEC_CODE (body));
1226 
1227     default:
1228       return 0;
1229     }
1230 }
1231 
1232 /* Nonzero if register REG is set or clobbered in an insn between
1233    FROM_INSN and TO_INSN (exclusive of those two).  */
1234 
1235 int
reg_set_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1236 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1237 		   const rtx_insn *to_insn)
1238 {
1239   const rtx_insn *insn;
1240 
1241   if (from_insn == to_insn)
1242     return 0;
1243 
1244   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1245     if (INSN_P (insn) && reg_set_p (reg, insn))
1246       return 1;
1247   return 0;
1248 }
1249 
1250 /* Return true if REG is set or clobbered inside INSN.  */
1251 
1252 int
reg_set_p(const_rtx reg,const_rtx insn)1253 reg_set_p (const_rtx reg, const_rtx insn)
1254 {
1255   /* After delay slot handling, call and branch insns might be in a
1256      sequence.  Check all the elements there.  */
1257   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1258     {
1259       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1260 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1261 	  return true;
1262 
1263       return false;
1264     }
1265 
1266   /* We can be passed an insn or part of one.  If we are passed an insn,
1267      check if a side-effect of the insn clobbers REG.  */
1268   if (INSN_P (insn)
1269       && (FIND_REG_INC_NOTE (insn, reg)
1270 	  || (CALL_P (insn)
1271 	      && ((REG_P (reg)
1272 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1273 		   && (insn_callee_abi (as_a<const rtx_insn *> (insn))
1274 		       .clobbers_reg_p (GET_MODE (reg), REGNO (reg))))
1275 		  || MEM_P (reg)
1276 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1277     return true;
1278 
1279   /* There are no REG_INC notes for SP autoinc.  */
1280   if (reg == stack_pointer_rtx && INSN_P (insn))
1281     {
1282       subrtx_var_iterator::array_type array;
1283       FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1284 	{
1285 	  rtx mem = *iter;
1286 	  if (mem
1287 	      && MEM_P (mem)
1288 	      && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1289 	    {
1290 	      if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1291 		return true;
1292 	      iter.skip_subrtxes ();
1293 	    }
1294 	}
1295     }
1296 
1297   return set_of (reg, insn) != NULL_RTX;
1298 }
1299 
1300 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1301    only if none of them are modified between START and END.  Return 1 if
1302    X contains a MEM; this routine does use memory aliasing.  */
1303 
1304 int
modified_between_p(const_rtx x,const rtx_insn * start,const rtx_insn * end)1305 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1306 {
1307   const enum rtx_code code = GET_CODE (x);
1308   const char *fmt;
1309   int i, j;
1310   rtx_insn *insn;
1311 
1312   if (start == end)
1313     return 0;
1314 
1315   switch (code)
1316     {
1317     CASE_CONST_ANY:
1318     case CONST:
1319     case SYMBOL_REF:
1320     case LABEL_REF:
1321       return 0;
1322 
1323     case PC:
1324       return 1;
1325 
1326     case MEM:
1327       if (modified_between_p (XEXP (x, 0), start, end))
1328 	return 1;
1329       if (MEM_READONLY_P (x))
1330 	return 0;
1331       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1332 	if (memory_modified_in_insn_p (x, insn))
1333 	  return 1;
1334       return 0;
1335 
1336     case REG:
1337       return reg_set_between_p (x, start, end);
1338 
1339     default:
1340       break;
1341     }
1342 
1343   fmt = GET_RTX_FORMAT (code);
1344   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1345     {
1346       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1347 	return 1;
1348 
1349       else if (fmt[i] == 'E')
1350 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1351 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1352 	    return 1;
1353     }
1354 
1355   return 0;
1356 }
1357 
1358 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1359    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1360    does use memory aliasing.  */
1361 
1362 int
modified_in_p(const_rtx x,const_rtx insn)1363 modified_in_p (const_rtx x, const_rtx insn)
1364 {
1365   const enum rtx_code code = GET_CODE (x);
1366   const char *fmt;
1367   int i, j;
1368 
1369   switch (code)
1370     {
1371     CASE_CONST_ANY:
1372     case CONST:
1373     case SYMBOL_REF:
1374     case LABEL_REF:
1375       return 0;
1376 
1377     case PC:
1378       return 1;
1379 
1380     case MEM:
1381       if (modified_in_p (XEXP (x, 0), insn))
1382 	return 1;
1383       if (MEM_READONLY_P (x))
1384 	return 0;
1385       if (memory_modified_in_insn_p (x, insn))
1386 	return 1;
1387       return 0;
1388 
1389     case REG:
1390       return reg_set_p (x, insn);
1391 
1392     default:
1393       break;
1394     }
1395 
1396   fmt = GET_RTX_FORMAT (code);
1397   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1398     {
1399       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1400 	return 1;
1401 
1402       else if (fmt[i] == 'E')
1403 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1404 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1405 	    return 1;
1406     }
1407 
1408   return 0;
1409 }
1410 
1411 /* Return true if X is a SUBREG and if storing a value to X would
1412    preserve some of its SUBREG_REG.  For example, on a normal 32-bit
1413    target, using a SUBREG to store to one half of a DImode REG would
1414    preserve the other half.  */
1415 
1416 bool
read_modify_subreg_p(const_rtx x)1417 read_modify_subreg_p (const_rtx x)
1418 {
1419   if (GET_CODE (x) != SUBREG)
1420     return false;
1421   poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1422   poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1423   poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1424   /* The inner and outer modes of a subreg must be ordered, so that we
1425      can tell whether they're paradoxical or partial.  */
1426   gcc_checking_assert (ordered_p (isize, osize));
1427   return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1428 }
1429 
1430 /* Helper function for set_of.  */
1431 struct set_of_data
1432   {
1433     const_rtx found;
1434     const_rtx pat;
1435   };
1436 
1437 static void
set_of_1(rtx x,const_rtx pat,void * data1)1438 set_of_1 (rtx x, const_rtx pat, void *data1)
1439 {
1440   struct set_of_data *const data = (struct set_of_data *) (data1);
1441   if (rtx_equal_p (x, data->pat)
1442       || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1443     data->found = pat;
1444 }
1445 
1446 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1447    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1448 const_rtx
set_of(const_rtx pat,const_rtx insn)1449 set_of (const_rtx pat, const_rtx insn)
1450 {
1451   struct set_of_data data;
1452   data.found = NULL_RTX;
1453   data.pat = pat;
1454   note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1455   return data.found;
1456 }
1457 
1458 /* Check whether instruction pattern PAT contains a SET with the following
1459    properties:
1460 
1461    - the SET is executed unconditionally; and
1462    - either:
1463      - the destination of the SET is a REG that contains REGNO; or
1464      - both:
1465        - the destination of the SET is a SUBREG of such a REG; and
1466        - writing to the subreg clobbers all of the SUBREG_REG
1467 	 (in other words, read_modify_subreg_p is false).
1468 
1469    If PAT does have a SET like that, return the set, otherwise return null.
1470 
1471    This is intended to be an alternative to single_set for passes that
1472    can handle patterns with multiple_sets.  */
1473 rtx
simple_regno_set(rtx pat,unsigned int regno)1474 simple_regno_set (rtx pat, unsigned int regno)
1475 {
1476   if (GET_CODE (pat) == PARALLEL)
1477     {
1478       int last = XVECLEN (pat, 0) - 1;
1479       for (int i = 0; i < last; ++i)
1480 	if (rtx set = simple_regno_set (XVECEXP (pat, 0, i), regno))
1481 	  return set;
1482 
1483       pat = XVECEXP (pat, 0, last);
1484     }
1485 
1486   if (GET_CODE (pat) == SET
1487       && covers_regno_no_parallel_p (SET_DEST (pat), regno))
1488     return pat;
1489 
1490   return nullptr;
1491 }
1492 
1493 /* Add all hard register in X to *PSET.  */
1494 void
find_all_hard_regs(const_rtx x,HARD_REG_SET * pset)1495 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1496 {
1497   subrtx_iterator::array_type array;
1498   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1499     {
1500       const_rtx x = *iter;
1501       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1502 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1503     }
1504 }
1505 
1506 /* This function, called through note_stores, collects sets and
1507    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1508    by DATA.  */
1509 void
record_hard_reg_sets(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)1510 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1511 {
1512   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1513   if (REG_P (x) && HARD_REGISTER_P (x))
1514     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1515 }
1516 
1517 /* Examine INSN, and compute the set of hard registers written by it.
1518    Store it in *PSET.  Should only be called after reload.
1519 
1520    IMPLICIT is true if we should include registers that are fully-clobbered
1521    by calls.  This should be used with caution, since it doesn't include
1522    partially-clobbered registers.  */
1523 void
find_all_hard_reg_sets(const rtx_insn * insn,HARD_REG_SET * pset,bool implicit)1524 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1525 {
1526   rtx link;
1527 
1528   CLEAR_HARD_REG_SET (*pset);
1529   note_stores (insn, record_hard_reg_sets, pset);
1530   if (CALL_P (insn) && implicit)
1531     *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1532   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1533     if (REG_NOTE_KIND (link) == REG_INC)
1534       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1535 }
1536 
1537 /* Like record_hard_reg_sets, but called through note_uses.  */
1538 void
record_hard_reg_uses(rtx * px,void * data)1539 record_hard_reg_uses (rtx *px, void *data)
1540 {
1541   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1542 }
1543 
1544 /* Given an INSN, return a SET expression if this insn has only a single SET.
1545    It may also have CLOBBERs, USEs, or SET whose output
1546    will not be used, which we ignore.  */
1547 
1548 rtx
single_set_2(const rtx_insn * insn,const_rtx pat)1549 single_set_2 (const rtx_insn *insn, const_rtx pat)
1550 {
1551   rtx set = NULL;
1552   int set_verified = 1;
1553   int i;
1554 
1555   if (GET_CODE (pat) == PARALLEL)
1556     {
1557       for (i = 0; i < XVECLEN (pat, 0); i++)
1558 	{
1559 	  rtx sub = XVECEXP (pat, 0, i);
1560 	  switch (GET_CODE (sub))
1561 	    {
1562 	    case USE:
1563 	    case CLOBBER:
1564 	      break;
1565 
1566 	    case SET:
1567 	      /* We can consider insns having multiple sets, where all
1568 		 but one are dead as single set insns.  In common case
1569 		 only single set is present in the pattern so we want
1570 		 to avoid checking for REG_UNUSED notes unless necessary.
1571 
1572 		 When we reach set first time, we just expect this is
1573 		 the single set we are looking for and only when more
1574 		 sets are found in the insn, we check them.  */
1575 	      if (!set_verified)
1576 		{
1577 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1578 		      && !side_effects_p (set))
1579 		    set = NULL;
1580 		  else
1581 		    set_verified = 1;
1582 		}
1583 	      if (!set)
1584 		set = sub, set_verified = 0;
1585 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1586 		       || side_effects_p (sub))
1587 		return NULL_RTX;
1588 	      break;
1589 
1590 	    default:
1591 	      return NULL_RTX;
1592 	    }
1593 	}
1594     }
1595   return set;
1596 }
1597 
1598 /* Given an INSN, return nonzero if it has more than one SET, else return
1599    zero.  */
1600 
1601 int
multiple_sets(const_rtx insn)1602 multiple_sets (const_rtx insn)
1603 {
1604   int found;
1605   int i;
1606 
1607   /* INSN must be an insn.  */
1608   if (! INSN_P (insn))
1609     return 0;
1610 
1611   /* Only a PARALLEL can have multiple SETs.  */
1612   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1613     {
1614       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1615 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1616 	  {
1617 	    /* If we have already found a SET, then return now.  */
1618 	    if (found)
1619 	      return 1;
1620 	    else
1621 	      found = 1;
1622 	  }
1623     }
1624 
1625   /* Either zero or one SET.  */
1626   return 0;
1627 }
1628 
1629 /* Return nonzero if the destination of SET equals the source
1630    and there are no side effects.  */
1631 
1632 int
set_noop_p(const_rtx set)1633 set_noop_p (const_rtx set)
1634 {
1635   rtx src = SET_SRC (set);
1636   rtx dst = SET_DEST (set);
1637 
1638   if (dst == pc_rtx && src == pc_rtx)
1639     return 1;
1640 
1641   if (MEM_P (dst) && MEM_P (src))
1642     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1643 
1644   if (GET_CODE (dst) == ZERO_EXTRACT)
1645     return rtx_equal_p (XEXP (dst, 0), src)
1646 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1647 	   && !side_effects_p (src);
1648 
1649   if (GET_CODE (dst) == STRICT_LOW_PART)
1650     dst = XEXP (dst, 0);
1651 
1652   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1653     {
1654       if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1655 	return 0;
1656       src = SUBREG_REG (src);
1657       dst = SUBREG_REG (dst);
1658       if (GET_MODE (src) != GET_MODE (dst))
1659 	/* It is hard to tell whether subregs refer to the same bits, so act
1660 	   conservatively and return 0.  */
1661 	return 0;
1662     }
1663 
1664   /* It is a NOOP if destination overlaps with selected src vector
1665      elements.  */
1666   if (GET_CODE (src) == VEC_SELECT
1667       && REG_P (XEXP (src, 0)) && REG_P (dst)
1668       && HARD_REGISTER_P (XEXP (src, 0))
1669       && HARD_REGISTER_P (dst))
1670     {
1671       int i;
1672       rtx par = XEXP (src, 1);
1673       rtx src0 = XEXP (src, 0);
1674       poly_int64 c0;
1675       if (!poly_int_rtx_p (XVECEXP (par, 0, 0), &c0))
1676 	return 0;
1677       poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1678 
1679       for (i = 1; i < XVECLEN (par, 0); i++)
1680 	{
1681 	  poly_int64 c0i;
1682 	  if (!poly_int_rtx_p (XVECEXP (par, 0, i), &c0i)
1683 	      || maybe_ne (c0i, c0 + i))
1684 	    return 0;
1685 	}
1686       return
1687 	REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1688 	&& simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1689 				  offset, GET_MODE (dst)) == (int) REGNO (dst);
1690     }
1691 
1692   return (REG_P (src) && REG_P (dst)
1693 	  && REGNO (src) == REGNO (dst));
1694 }
1695 
1696 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1697    value to itself.  */
1698 
1699 int
noop_move_p(const rtx_insn * insn)1700 noop_move_p (const rtx_insn *insn)
1701 {
1702   rtx pat = PATTERN (insn);
1703 
1704   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1705     return 1;
1706 
1707   /* Check the code to be executed for COND_EXEC.  */
1708   if (GET_CODE (pat) == COND_EXEC)
1709     pat = COND_EXEC_CODE (pat);
1710 
1711   if (GET_CODE (pat) == SET && set_noop_p (pat))
1712     return 1;
1713 
1714   if (GET_CODE (pat) == PARALLEL)
1715     {
1716       int i;
1717       /* If nothing but SETs of registers to themselves,
1718 	 this insn can also be deleted.  */
1719       for (i = 0; i < XVECLEN (pat, 0); i++)
1720 	{
1721 	  rtx tem = XVECEXP (pat, 0, i);
1722 
1723 	  if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
1724 	    continue;
1725 
1726 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1727 	    return 0;
1728 	}
1729 
1730       return 1;
1731     }
1732   return 0;
1733 }
1734 
1735 
1736 /* Return nonzero if register in range [REGNO, ENDREGNO)
1737    appears either explicitly or implicitly in X
1738    other than being stored into.
1739 
1740    References contained within the substructure at LOC do not count.
1741    LOC may be zero, meaning don't ignore anything.  */
1742 
1743 bool
refers_to_regno_p(unsigned int regno,unsigned int endregno,const_rtx x,rtx * loc)1744 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1745 		   rtx *loc)
1746 {
1747   int i;
1748   unsigned int x_regno;
1749   RTX_CODE code;
1750   const char *fmt;
1751 
1752  repeat:
1753   /* The contents of a REG_NONNEG note is always zero, so we must come here
1754      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1755   if (x == 0)
1756     return false;
1757 
1758   code = GET_CODE (x);
1759 
1760   switch (code)
1761     {
1762     case REG:
1763       x_regno = REGNO (x);
1764 
1765       /* If we modifying the stack, frame, or argument pointer, it will
1766 	 clobber a virtual register.  In fact, we could be more precise,
1767 	 but it isn't worth it.  */
1768       if ((x_regno == STACK_POINTER_REGNUM
1769 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1770 	       && x_regno == ARG_POINTER_REGNUM)
1771 	   || x_regno == FRAME_POINTER_REGNUM)
1772 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1773 	return true;
1774 
1775       return endregno > x_regno && regno < END_REGNO (x);
1776 
1777     case SUBREG:
1778       /* If this is a SUBREG of a hard reg, we can see exactly which
1779 	 registers are being modified.  Otherwise, handle normally.  */
1780       if (REG_P (SUBREG_REG (x))
1781 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1782 	{
1783 	  unsigned int inner_regno = subreg_regno (x);
1784 	  unsigned int inner_endregno
1785 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1786 			     ? subreg_nregs (x) : 1);
1787 
1788 	  return endregno > inner_regno && regno < inner_endregno;
1789 	}
1790       break;
1791 
1792     case CLOBBER:
1793     case SET:
1794       if (&SET_DEST (x) != loc
1795 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1796 	     a pseudo but not for hard registers since we can
1797 	     treat each word individually.  */
1798 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1799 	       && loc != &SUBREG_REG (SET_DEST (x))
1800 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1801 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1802 	       && refers_to_regno_p (regno, endregno,
1803 				     SUBREG_REG (SET_DEST (x)), loc))
1804 	      || (!REG_P (SET_DEST (x))
1805 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1806 	return true;
1807 
1808       if (code == CLOBBER || loc == &SET_SRC (x))
1809 	return false;
1810       x = SET_SRC (x);
1811       goto repeat;
1812 
1813     default:
1814       break;
1815     }
1816 
1817   /* X does not match, so try its subexpressions.  */
1818 
1819   fmt = GET_RTX_FORMAT (code);
1820   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1821     {
1822       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1823 	{
1824 	  if (i == 0)
1825 	    {
1826 	      x = XEXP (x, 0);
1827 	      goto repeat;
1828 	    }
1829 	  else
1830 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1831 	      return true;
1832 	}
1833       else if (fmt[i] == 'E')
1834 	{
1835 	  int j;
1836 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1837 	    if (loc != &XVECEXP (x, i, j)
1838 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1839 	      return true;
1840 	}
1841     }
1842   return false;
1843 }
1844 
1845 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1846    we check if any register number in X conflicts with the relevant register
1847    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1848    contains a MEM (we don't bother checking for memory addresses that can't
1849    conflict because we expect this to be a rare case.  */
1850 
1851 int
reg_overlap_mentioned_p(const_rtx x,const_rtx in)1852 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1853 {
1854   unsigned int regno, endregno;
1855 
1856   /* If either argument is a constant, then modifying X cannot
1857      affect IN.  Here we look at IN, we can profitably combine
1858      CONSTANT_P (x) with the switch statement below.  */
1859   if (CONSTANT_P (in))
1860     return 0;
1861 
1862  recurse:
1863   switch (GET_CODE (x))
1864     {
1865     case CLOBBER:
1866     case STRICT_LOW_PART:
1867     case ZERO_EXTRACT:
1868     case SIGN_EXTRACT:
1869       /* Overly conservative.  */
1870       x = XEXP (x, 0);
1871       goto recurse;
1872 
1873     case SUBREG:
1874       regno = REGNO (SUBREG_REG (x));
1875       if (regno < FIRST_PSEUDO_REGISTER)
1876 	regno = subreg_regno (x);
1877       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1878 			  ? subreg_nregs (x) : 1);
1879       goto do_reg;
1880 
1881     case REG:
1882       regno = REGNO (x);
1883       endregno = END_REGNO (x);
1884     do_reg:
1885       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1886 
1887     case MEM:
1888       {
1889 	const char *fmt;
1890 	int i;
1891 
1892 	if (MEM_P (in))
1893 	  return 1;
1894 
1895 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1896 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1897 	  if (fmt[i] == 'e')
1898 	    {
1899 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1900 		return 1;
1901 	    }
1902 	  else if (fmt[i] == 'E')
1903 	    {
1904 	      int j;
1905 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1906 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1907 		  return 1;
1908 	    }
1909 
1910 	return 0;
1911       }
1912 
1913     case SCRATCH:
1914     case PC:
1915       return reg_mentioned_p (x, in);
1916 
1917     case PARALLEL:
1918       {
1919 	int i;
1920 
1921 	/* If any register in here refers to it we return true.  */
1922 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1923 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1924 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1925 	    return 1;
1926 	return 0;
1927       }
1928 
1929     default:
1930       gcc_assert (CONSTANT_P (x));
1931       return 0;
1932     }
1933 }
1934 
1935 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1936    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1937    ignored by note_stores, but passed to FUN.
1938 
1939    FUN receives three arguments:
1940    1. the REG, MEM or PC being stored in or clobbered,
1941    2. the SET or CLOBBER rtx that does the store,
1942    3. the pointer DATA provided to note_stores.
1943 
1944   If the item being stored in or clobbered is a SUBREG of a hard register,
1945   the SUBREG will be passed.  */
1946 
1947 void
note_pattern_stores(const_rtx x,void (* fun)(rtx,const_rtx,void *),void * data)1948 note_pattern_stores (const_rtx x,
1949 		     void (*fun) (rtx, const_rtx, void *), void *data)
1950 {
1951   int i;
1952 
1953   if (GET_CODE (x) == COND_EXEC)
1954     x = COND_EXEC_CODE (x);
1955 
1956   if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1957     {
1958       rtx dest = SET_DEST (x);
1959 
1960       while ((GET_CODE (dest) == SUBREG
1961 	      && (!REG_P (SUBREG_REG (dest))
1962 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1963 	     || GET_CODE (dest) == ZERO_EXTRACT
1964 	     || GET_CODE (dest) == STRICT_LOW_PART)
1965 	dest = XEXP (dest, 0);
1966 
1967       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1968 	 each of whose first operand is a register.  */
1969       if (GET_CODE (dest) == PARALLEL)
1970 	{
1971 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1972 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1973 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1974 	}
1975       else
1976 	(*fun) (dest, x, data);
1977     }
1978 
1979   else if (GET_CODE (x) == PARALLEL)
1980     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1981       note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1982 }
1983 
1984 /* Same, but for an instruction.  If the instruction is a call, include
1985    any CLOBBERs in its CALL_INSN_FUNCTION_USAGE.  */
1986 
1987 void
note_stores(const rtx_insn * insn,void (* fun)(rtx,const_rtx,void *),void * data)1988 note_stores (const rtx_insn *insn,
1989 	     void (*fun) (rtx, const_rtx, void *), void *data)
1990 {
1991   if (CALL_P (insn))
1992     for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1993 	 link; link = XEXP (link, 1))
1994       if (GET_CODE (XEXP (link, 0)) == CLOBBER)
1995 	note_pattern_stores (XEXP (link, 0), fun, data);
1996   note_pattern_stores (PATTERN (insn), fun, data);
1997 }
1998 
1999 /* Like notes_stores, but call FUN for each expression that is being
2000    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
2001    FUN for each expression, not any interior subexpressions.  FUN receives a
2002    pointer to the expression and the DATA passed to this function.
2003 
2004    Note that this is not quite the same test as that done in reg_referenced_p
2005    since that considers something as being referenced if it is being
2006    partially set, while we do not.  */
2007 
2008 void
note_uses(rtx * pbody,void (* fun)(rtx *,void *),void * data)2009 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
2010 {
2011   rtx body = *pbody;
2012   int i;
2013 
2014   switch (GET_CODE (body))
2015     {
2016     case COND_EXEC:
2017       (*fun) (&COND_EXEC_TEST (body), data);
2018       note_uses (&COND_EXEC_CODE (body), fun, data);
2019       return;
2020 
2021     case PARALLEL:
2022       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2023 	note_uses (&XVECEXP (body, 0, i), fun, data);
2024       return;
2025 
2026     case SEQUENCE:
2027       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2028 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
2029       return;
2030 
2031     case USE:
2032       (*fun) (&XEXP (body, 0), data);
2033       return;
2034 
2035     case ASM_OPERANDS:
2036       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
2037 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
2038       return;
2039 
2040     case TRAP_IF:
2041       (*fun) (&TRAP_CONDITION (body), data);
2042       return;
2043 
2044     case PREFETCH:
2045       (*fun) (&XEXP (body, 0), data);
2046       return;
2047 
2048     case UNSPEC:
2049     case UNSPEC_VOLATILE:
2050       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2051 	(*fun) (&XVECEXP (body, 0, i), data);
2052       return;
2053 
2054     case CLOBBER:
2055       if (MEM_P (XEXP (body, 0)))
2056 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
2057       return;
2058 
2059     case SET:
2060       {
2061 	rtx dest = SET_DEST (body);
2062 
2063 	/* For sets we replace everything in source plus registers in memory
2064 	   expression in store and operands of a ZERO_EXTRACT.  */
2065 	(*fun) (&SET_SRC (body), data);
2066 
2067 	if (GET_CODE (dest) == ZERO_EXTRACT)
2068 	  {
2069 	    (*fun) (&XEXP (dest, 1), data);
2070 	    (*fun) (&XEXP (dest, 2), data);
2071 	  }
2072 
2073 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2074 	  dest = XEXP (dest, 0);
2075 
2076 	if (MEM_P (dest))
2077 	  (*fun) (&XEXP (dest, 0), data);
2078       }
2079       return;
2080 
2081     default:
2082       /* All the other possibilities never store.  */
2083       (*fun) (pbody, data);
2084       return;
2085     }
2086 }
2087 
2088 /* Try to add a description of REG X to this object, stopping once
2089    the REF_END limit has been reached.  FLAGS is a bitmask of
2090    rtx_obj_reference flags that describe the context.  */
2091 
2092 void
try_to_add_reg(const_rtx x,unsigned int flags)2093 rtx_properties::try_to_add_reg (const_rtx x, unsigned int flags)
2094 {
2095   if (REG_NREGS (x) != 1)
2096     flags |= rtx_obj_flags::IS_MULTIREG;
2097   machine_mode mode = GET_MODE (x);
2098   unsigned int start_regno = REGNO (x);
2099   unsigned int end_regno = END_REGNO (x);
2100   for (unsigned int regno = start_regno; regno < end_regno; ++regno)
2101     if (ref_iter != ref_end)
2102       *ref_iter++ = rtx_obj_reference (regno, flags, mode,
2103 				       regno - start_regno);
2104 }
2105 
2106 /* Add a description of destination X to this object.  FLAGS is a bitmask
2107    of rtx_obj_reference flags that describe the context.
2108 
2109    This routine accepts all rtxes that can legitimately appear in a
2110    SET_DEST.  */
2111 
2112 void
try_to_add_dest(const_rtx x,unsigned int flags)2113 rtx_properties::try_to_add_dest (const_rtx x, unsigned int flags)
2114 {
2115   /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
2116      each of whose first operand is a register.  */
2117   if (__builtin_expect (GET_CODE (x) == PARALLEL, 0))
2118     {
2119       for (int i = XVECLEN (x, 0) - 1; i >= 0; --i)
2120 	if (rtx dest = XEXP (XVECEXP (x, 0, i), 0))
2121 	  try_to_add_dest (dest, flags);
2122       return;
2123     }
2124 
2125   unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2126   flags |= rtx_obj_flags::IS_WRITE;
2127   for (;;)
2128     if (GET_CODE (x) == ZERO_EXTRACT)
2129       {
2130 	try_to_add_src (XEXP (x, 1), base_flags);
2131 	try_to_add_src (XEXP (x, 2), base_flags);
2132 	flags |= rtx_obj_flags::IS_READ;
2133 	x = XEXP (x, 0);
2134       }
2135     else if (GET_CODE (x) == STRICT_LOW_PART)
2136       {
2137 	flags |= rtx_obj_flags::IS_READ;
2138 	x = XEXP (x, 0);
2139       }
2140     else if (GET_CODE (x) == SUBREG)
2141       {
2142 	flags |= rtx_obj_flags::IN_SUBREG;
2143 	if (read_modify_subreg_p (x))
2144 	  flags |= rtx_obj_flags::IS_READ;
2145 	x = SUBREG_REG (x);
2146       }
2147     else
2148       break;
2149 
2150   if (MEM_P (x))
2151     {
2152       if (ref_iter != ref_end)
2153 	*ref_iter++ = rtx_obj_reference (MEM_REGNO, flags, GET_MODE (x));
2154 
2155       unsigned int addr_flags = base_flags | rtx_obj_flags::IN_MEM_STORE;
2156       if (flags & rtx_obj_flags::IS_READ)
2157 	addr_flags |= rtx_obj_flags::IN_MEM_LOAD;
2158       try_to_add_src (XEXP (x, 0), addr_flags);
2159       return;
2160     }
2161 
2162   if (__builtin_expect (REG_P (x), 1))
2163     {
2164       /* We want to keep sp alive everywhere -  by making all
2165 	 writes to sp also use sp. */
2166       if (REGNO (x) == STACK_POINTER_REGNUM)
2167 	flags |= rtx_obj_flags::IS_READ;
2168       try_to_add_reg (x, flags);
2169       return;
2170     }
2171 }
2172 
2173 /* Try to add a description of source X to this object, stopping once
2174    the REF_END limit has been reached.  FLAGS is a bitmask of
2175    rtx_obj_reference flags that describe the context.
2176 
2177    This routine accepts all rtxes that can legitimately appear in a SET_SRC.  */
2178 
2179 void
try_to_add_src(const_rtx x,unsigned int flags)2180 rtx_properties::try_to_add_src (const_rtx x, unsigned int flags)
2181 {
2182   unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2183   subrtx_iterator::array_type array;
2184   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
2185     {
2186       const_rtx x = *iter;
2187       rtx_code code = GET_CODE (x);
2188       if (code == REG)
2189 	try_to_add_reg (x, flags | rtx_obj_flags::IS_READ);
2190       else if (code == MEM)
2191 	{
2192 	  if (MEM_VOLATILE_P (x))
2193 	    has_volatile_refs = true;
2194 
2195 	  if (!MEM_READONLY_P (x) && ref_iter != ref_end)
2196 	    {
2197 	      auto mem_flags = flags | rtx_obj_flags::IS_READ;
2198 	      *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags,
2199 					       GET_MODE (x));
2200 	    }
2201 
2202 	  try_to_add_src (XEXP (x, 0),
2203 			  base_flags | rtx_obj_flags::IN_MEM_LOAD);
2204 	  iter.skip_subrtxes ();
2205 	}
2206       else if (code == SUBREG)
2207 	{
2208 	  try_to_add_src (SUBREG_REG (x), flags | rtx_obj_flags::IN_SUBREG);
2209 	  iter.skip_subrtxes ();
2210 	}
2211       else if (code == UNSPEC_VOLATILE)
2212 	has_volatile_refs = true;
2213       else if (code == ASM_INPUT || code == ASM_OPERANDS)
2214 	{
2215 	  has_asm = true;
2216 	  if (MEM_VOLATILE_P (x))
2217 	    has_volatile_refs = true;
2218 	}
2219       else if (code == PRE_INC
2220 	       || code == PRE_DEC
2221 	       || code == POST_INC
2222 	       || code == POST_DEC
2223 	       || code == PRE_MODIFY
2224 	       || code == POST_MODIFY)
2225 	{
2226 	  has_pre_post_modify = true;
2227 
2228 	  unsigned int addr_flags = (base_flags
2229 				     | rtx_obj_flags::IS_PRE_POST_MODIFY
2230 				     | rtx_obj_flags::IS_READ);
2231 	  try_to_add_dest (XEXP (x, 0), addr_flags);
2232 	  if (code == PRE_MODIFY || code == POST_MODIFY)
2233 	    iter.substitute (XEXP (XEXP (x, 1), 1));
2234 	  else
2235 	    iter.skip_subrtxes ();
2236 	}
2237       else if (code == CALL)
2238 	has_call = true;
2239     }
2240 }
2241 
2242 /* Try to add a description of instruction pattern PAT to this object,
2243    stopping once the REF_END limit has been reached.  */
2244 
2245 void
try_to_add_pattern(const_rtx pat)2246 rtx_properties::try_to_add_pattern (const_rtx pat)
2247 {
2248   switch (GET_CODE (pat))
2249     {
2250     case COND_EXEC:
2251       try_to_add_src (COND_EXEC_TEST (pat));
2252       try_to_add_pattern (COND_EXEC_CODE (pat));
2253       break;
2254 
2255     case PARALLEL:
2256       {
2257 	int last = XVECLEN (pat, 0) - 1;
2258 	for (int i = 0; i < last; ++i)
2259 	  try_to_add_pattern (XVECEXP (pat, 0, i));
2260 	try_to_add_pattern (XVECEXP (pat, 0, last));
2261 	break;
2262       }
2263 
2264     case ASM_OPERANDS:
2265       for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (pat); i < len; ++i)
2266 	try_to_add_src (ASM_OPERANDS_INPUT (pat, i));
2267       break;
2268 
2269     case CLOBBER:
2270       try_to_add_dest (XEXP (pat, 0), rtx_obj_flags::IS_CLOBBER);
2271       break;
2272 
2273     case SET:
2274       try_to_add_dest (SET_DEST (pat));
2275       try_to_add_src (SET_SRC (pat));
2276       break;
2277 
2278     default:
2279       /* All the other possibilities never store and can use a normal
2280 	 rtx walk.  This includes:
2281 
2282 	 - USE
2283 	 - TRAP_IF
2284 	 - PREFETCH
2285 	 - UNSPEC
2286 	 - UNSPEC_VOLATILE.  */
2287       try_to_add_src (pat);
2288       break;
2289     }
2290 }
2291 
2292 /* Try to add a description of INSN to this object, stopping once
2293    the REF_END limit has been reached.  INCLUDE_NOTES is true if the
2294    description should include REG_EQUAL and REG_EQUIV notes; all such
2295    references will then be marked with rtx_obj_flags::IN_NOTE.
2296 
2297    For calls, this description includes all accesses in
2298    CALL_INSN_FUNCTION_USAGE.  It also include all implicit accesses
2299    to global registers by the target function.  However, it does not
2300    include clobbers performed by the target function; callers that want
2301    this information should instead use the function_abi interface.  */
2302 
2303 void
try_to_add_insn(const rtx_insn * insn,bool include_notes)2304 rtx_properties::try_to_add_insn (const rtx_insn *insn, bool include_notes)
2305 {
2306   if (CALL_P (insn))
2307     {
2308       /* Non-const functions can read from global registers.  Impure
2309 	 functions can also set them.
2310 
2311 	 Adding the global registers first removes a situation in which
2312 	 a fixed-form clobber of register R could come before a real set
2313 	 of register R.  */
2314       if (!hard_reg_set_empty_p (global_reg_set)
2315 	  && !RTL_CONST_CALL_P (insn))
2316 	{
2317 	  unsigned int flags = rtx_obj_flags::IS_READ;
2318 	  if (!RTL_PURE_CALL_P (insn))
2319 	    flags |= rtx_obj_flags::IS_WRITE;
2320 	  for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2321 	    /* As a special case, the stack pointer is invariant across calls
2322 	       even if it has been marked global; see the corresponding
2323 	       handling in df_get_call_refs.  */
2324 	    if (regno != STACK_POINTER_REGNUM
2325 		&& global_regs[regno]
2326 		&& ref_iter != ref_end)
2327 	      *ref_iter++ = rtx_obj_reference (regno, flags,
2328 					       reg_raw_mode[regno], 0);
2329 	}
2330       /* Untyped calls implicitly set all function value registers.
2331 	 Again, we add them first in case the main pattern contains
2332 	 a fixed-form clobber.  */
2333       if (find_reg_note (insn, REG_UNTYPED_CALL, NULL_RTX))
2334 	for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2335 	  if (targetm.calls.function_value_regno_p (regno)
2336 	      && ref_iter != ref_end)
2337 	    *ref_iter++ = rtx_obj_reference (regno, rtx_obj_flags::IS_WRITE,
2338 					     reg_raw_mode[regno], 0);
2339       if (ref_iter != ref_end && !RTL_CONST_CALL_P (insn))
2340 	{
2341 	  auto mem_flags = rtx_obj_flags::IS_READ;
2342 	  if (!RTL_PURE_CALL_P (insn))
2343 	    mem_flags |= rtx_obj_flags::IS_WRITE;
2344 	  *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags, BLKmode);
2345 	}
2346       try_to_add_pattern (PATTERN (insn));
2347       for (rtx link = CALL_INSN_FUNCTION_USAGE (insn); link;
2348 	   link = XEXP (link, 1))
2349 	{
2350 	  rtx x = XEXP (link, 0);
2351 	  if (GET_CODE (x) == CLOBBER)
2352 	    try_to_add_dest (XEXP (x, 0), rtx_obj_flags::IS_CLOBBER);
2353 	  else if (GET_CODE (x) == USE)
2354 	    try_to_add_src (XEXP (x, 0));
2355 	}
2356     }
2357   else
2358     try_to_add_pattern (PATTERN (insn));
2359 
2360   if (include_notes)
2361     for (rtx note = REG_NOTES (insn); note; note = XEXP (note, 1))
2362       if (REG_NOTE_KIND (note) == REG_EQUAL
2363 	  || REG_NOTE_KIND (note) == REG_EQUIV)
2364 	try_to_add_note (XEXP (note, 0));
2365 }
2366 
2367 /* Grow the storage by a bit while keeping the contents of the first
2368    START elements.  */
2369 
2370 void
grow(ptrdiff_t start)2371 vec_rtx_properties_base::grow (ptrdiff_t start)
2372 {
2373   /* The same heuristic that vec uses.  */
2374   ptrdiff_t new_elems = (ref_end - ref_begin) * 3 / 2;
2375   if (ref_begin == m_storage)
2376     {
2377       ref_begin = XNEWVEC (rtx_obj_reference, new_elems);
2378       if (start)
2379 	memcpy (ref_begin, m_storage, start * sizeof (rtx_obj_reference));
2380     }
2381   else
2382     ref_begin = reinterpret_cast<rtx_obj_reference *>
2383       (xrealloc (ref_begin, new_elems * sizeof (rtx_obj_reference)));
2384   ref_iter = ref_begin + start;
2385   ref_end = ref_begin + new_elems;
2386 }
2387 
2388 /* Return nonzero if X's old contents don't survive after INSN.
2389    This will be true if X is a register and X dies in INSN or because
2390    INSN entirely sets X.
2391 
2392    "Entirely set" means set directly and not through a SUBREG, or
2393    ZERO_EXTRACT, so no trace of the old contents remains.
2394    Likewise, REG_INC does not count.
2395 
2396    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
2397    but for this use that makes no difference, since regs don't overlap
2398    during their lifetimes.  Therefore, this function may be used
2399    at any time after deaths have been computed.
2400 
2401    If REG is a hard reg that occupies multiple machine registers, this
2402    function will only return 1 if each of those registers will be replaced
2403    by INSN.  */
2404 
2405 int
dead_or_set_p(const rtx_insn * insn,const_rtx x)2406 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2407 {
2408   unsigned int regno, end_regno;
2409   unsigned int i;
2410 
2411   gcc_assert (REG_P (x));
2412 
2413   regno = REGNO (x);
2414   end_regno = END_REGNO (x);
2415   for (i = regno; i < end_regno; i++)
2416     if (! dead_or_set_regno_p (insn, i))
2417       return 0;
2418 
2419   return 1;
2420 }
2421 
2422 /* Return TRUE iff DEST is a register or subreg of a register, is a
2423    complete rather than read-modify-write destination, and contains
2424    register TEST_REGNO.  */
2425 
2426 static bool
covers_regno_no_parallel_p(const_rtx dest,unsigned int test_regno)2427 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2428 {
2429   unsigned int regno, endregno;
2430 
2431   if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2432     dest = SUBREG_REG (dest);
2433 
2434   if (!REG_P (dest))
2435     return false;
2436 
2437   regno = REGNO (dest);
2438   endregno = END_REGNO (dest);
2439   return (test_regno >= regno && test_regno < endregno);
2440 }
2441 
2442 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2443    any member matches the covers_regno_no_parallel_p criteria.  */
2444 
2445 static bool
covers_regno_p(const_rtx dest,unsigned int test_regno)2446 covers_regno_p (const_rtx dest, unsigned int test_regno)
2447 {
2448   if (GET_CODE (dest) == PARALLEL)
2449     {
2450       /* Some targets place small structures in registers for return
2451 	 values of functions, and those registers are wrapped in
2452 	 PARALLELs that we may see as the destination of a SET.  */
2453       int i;
2454 
2455       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2456 	{
2457 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2458 	  if (inner != NULL_RTX
2459 	      && covers_regno_no_parallel_p (inner, test_regno))
2460 	    return true;
2461 	}
2462 
2463       return false;
2464     }
2465   else
2466     return covers_regno_no_parallel_p (dest, test_regno);
2467 }
2468 
2469 /* Utility function for dead_or_set_p to check an individual register. */
2470 
2471 int
dead_or_set_regno_p(const rtx_insn * insn,unsigned int test_regno)2472 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2473 {
2474   const_rtx pattern;
2475 
2476   /* See if there is a death note for something that includes TEST_REGNO.  */
2477   if (find_regno_note (insn, REG_DEAD, test_regno))
2478     return 1;
2479 
2480   if (CALL_P (insn)
2481       && find_regno_fusage (insn, CLOBBER, test_regno))
2482     return 1;
2483 
2484   pattern = PATTERN (insn);
2485 
2486   /* If a COND_EXEC is not executed, the value survives.  */
2487   if (GET_CODE (pattern) == COND_EXEC)
2488     return 0;
2489 
2490   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2491     return covers_regno_p (SET_DEST (pattern), test_regno);
2492   else if (GET_CODE (pattern) == PARALLEL)
2493     {
2494       int i;
2495 
2496       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2497 	{
2498 	  rtx body = XVECEXP (pattern, 0, i);
2499 
2500 	  if (GET_CODE (body) == COND_EXEC)
2501 	    body = COND_EXEC_CODE (body);
2502 
2503 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2504 	      && covers_regno_p (SET_DEST (body), test_regno))
2505 	    return 1;
2506 	}
2507     }
2508 
2509   return 0;
2510 }
2511 
2512 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2513    If DATUM is nonzero, look for one whose datum is DATUM.  */
2514 
2515 rtx
find_reg_note(const_rtx insn,enum reg_note kind,const_rtx datum)2516 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2517 {
2518   rtx link;
2519 
2520   gcc_checking_assert (insn);
2521 
2522   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2523   if (! INSN_P (insn))
2524     return 0;
2525   if (datum == 0)
2526     {
2527       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2528 	if (REG_NOTE_KIND (link) == kind)
2529 	  return link;
2530       return 0;
2531     }
2532 
2533   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2534     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2535       return link;
2536   return 0;
2537 }
2538 
2539 /* Return the reg-note of kind KIND in insn INSN which applies to register
2540    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2541    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2542    it might be the case that the note overlaps REGNO.  */
2543 
2544 rtx
find_regno_note(const_rtx insn,enum reg_note kind,unsigned int regno)2545 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2546 {
2547   rtx link;
2548 
2549   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2550   if (! INSN_P (insn))
2551     return 0;
2552 
2553   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2554     if (REG_NOTE_KIND (link) == kind
2555 	/* Verify that it is a register, so that scratch and MEM won't cause a
2556 	   problem here.  */
2557 	&& REG_P (XEXP (link, 0))
2558 	&& REGNO (XEXP (link, 0)) <= regno
2559 	&& END_REGNO (XEXP (link, 0)) > regno)
2560       return link;
2561   return 0;
2562 }
2563 
2564 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2565    has such a note.  */
2566 
2567 rtx
find_reg_equal_equiv_note(const_rtx insn)2568 find_reg_equal_equiv_note (const_rtx insn)
2569 {
2570   rtx link;
2571 
2572   if (!INSN_P (insn))
2573     return 0;
2574 
2575   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2576     if (REG_NOTE_KIND (link) == REG_EQUAL
2577 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2578       {
2579 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2580 	   insns that have multiple sets.  Checking single_set to
2581 	   make sure of this is not the proper check, as explained
2582 	   in the comment in set_unique_reg_note.
2583 
2584 	   This should be changed into an assert.  */
2585 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2586 	  return 0;
2587 	return link;
2588       }
2589   return NULL;
2590 }
2591 
2592 /* Check whether INSN is a single_set whose source is known to be
2593    equivalent to a constant.  Return that constant if so, otherwise
2594    return null.  */
2595 
2596 rtx
find_constant_src(const rtx_insn * insn)2597 find_constant_src (const rtx_insn *insn)
2598 {
2599   rtx note, set, x;
2600 
2601   set = single_set (insn);
2602   if (set)
2603     {
2604       x = avoid_constant_pool_reference (SET_SRC (set));
2605       if (CONSTANT_P (x))
2606 	return x;
2607     }
2608 
2609   note = find_reg_equal_equiv_note (insn);
2610   if (note && CONSTANT_P (XEXP (note, 0)))
2611     return XEXP (note, 0);
2612 
2613   return NULL_RTX;
2614 }
2615 
2616 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2617    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2618 
2619 int
find_reg_fusage(const_rtx insn,enum rtx_code code,const_rtx datum)2620 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2621 {
2622   /* If it's not a CALL_INSN, it can't possibly have a
2623      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2624   if (!CALL_P (insn))
2625     return 0;
2626 
2627   gcc_assert (datum);
2628 
2629   if (!REG_P (datum))
2630     {
2631       rtx link;
2632 
2633       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2634 	   link;
2635 	   link = XEXP (link, 1))
2636 	if (GET_CODE (XEXP (link, 0)) == code
2637 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2638 	  return 1;
2639     }
2640   else
2641     {
2642       unsigned int regno = REGNO (datum);
2643 
2644       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2645 	 to pseudo registers, so don't bother checking.  */
2646 
2647       if (regno < FIRST_PSEUDO_REGISTER)
2648 	{
2649 	  unsigned int end_regno = END_REGNO (datum);
2650 	  unsigned int i;
2651 
2652 	  for (i = regno; i < end_regno; i++)
2653 	    if (find_regno_fusage (insn, code, i))
2654 	      return 1;
2655 	}
2656     }
2657 
2658   return 0;
2659 }
2660 
2661 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2662    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2663 
2664 int
find_regno_fusage(const_rtx insn,enum rtx_code code,unsigned int regno)2665 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2666 {
2667   rtx link;
2668 
2669   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2670      to pseudo registers, so don't bother checking.  */
2671 
2672   if (regno >= FIRST_PSEUDO_REGISTER
2673       || !CALL_P (insn) )
2674     return 0;
2675 
2676   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2677     {
2678       rtx op, reg;
2679 
2680       if (GET_CODE (op = XEXP (link, 0)) == code
2681 	  && REG_P (reg = XEXP (op, 0))
2682 	  && REGNO (reg) <= regno
2683 	  && END_REGNO (reg) > regno)
2684 	return 1;
2685     }
2686 
2687   return 0;
2688 }
2689 
2690 
2691 /* Return true if KIND is an integer REG_NOTE.  */
2692 
2693 static bool
int_reg_note_p(enum reg_note kind)2694 int_reg_note_p (enum reg_note kind)
2695 {
2696   return kind == REG_BR_PROB;
2697 }
2698 
2699 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2700    stored as the pointer to the next register note.  */
2701 
2702 rtx
alloc_reg_note(enum reg_note kind,rtx datum,rtx list)2703 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2704 {
2705   rtx note;
2706 
2707   gcc_checking_assert (!int_reg_note_p (kind));
2708   switch (kind)
2709     {
2710     case REG_LABEL_TARGET:
2711     case REG_LABEL_OPERAND:
2712     case REG_TM:
2713       /* These types of register notes use an INSN_LIST rather than an
2714 	 EXPR_LIST, so that copying is done right and dumps look
2715 	 better.  */
2716       note = alloc_INSN_LIST (datum, list);
2717       PUT_REG_NOTE_KIND (note, kind);
2718       break;
2719 
2720     default:
2721       note = alloc_EXPR_LIST (kind, datum, list);
2722       break;
2723     }
2724 
2725   return note;
2726 }
2727 
2728 /* Add register note with kind KIND and datum DATUM to INSN.  */
2729 
2730 void
add_reg_note(rtx insn,enum reg_note kind,rtx datum)2731 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2732 {
2733   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2734 }
2735 
2736 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2737 
2738 void
add_int_reg_note(rtx_insn * insn,enum reg_note kind,int datum)2739 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2740 {
2741   gcc_checking_assert (int_reg_note_p (kind));
2742   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2743 				       datum, REG_NOTES (insn));
2744 }
2745 
2746 /* Add a REG_ARGS_SIZE note to INSN with value VALUE.  */
2747 
2748 void
add_args_size_note(rtx_insn * insn,poly_int64 value)2749 add_args_size_note (rtx_insn *insn, poly_int64 value)
2750 {
2751   gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2752   add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2753 }
2754 
2755 /* Add a register note like NOTE to INSN.  */
2756 
2757 void
add_shallow_copy_of_reg_note(rtx_insn * insn,rtx note)2758 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2759 {
2760   if (GET_CODE (note) == INT_LIST)
2761     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2762   else
2763     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2764 }
2765 
2766 /* Duplicate NOTE and return the copy.  */
2767 rtx
duplicate_reg_note(rtx note)2768 duplicate_reg_note (rtx note)
2769 {
2770   reg_note kind = REG_NOTE_KIND (note);
2771 
2772   if (GET_CODE (note) == INT_LIST)
2773     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2774   else if (GET_CODE (note) == EXPR_LIST)
2775     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2776   else
2777     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2778 }
2779 
2780 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2781 
2782 void
remove_note(rtx_insn * insn,const_rtx note)2783 remove_note (rtx_insn *insn, const_rtx note)
2784 {
2785   rtx link;
2786 
2787   if (note == NULL_RTX)
2788     return;
2789 
2790   if (REG_NOTES (insn) == note)
2791     REG_NOTES (insn) = XEXP (note, 1);
2792   else
2793     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2794       if (XEXP (link, 1) == note)
2795 	{
2796 	  XEXP (link, 1) = XEXP (note, 1);
2797 	  break;
2798 	}
2799 
2800   switch (REG_NOTE_KIND (note))
2801     {
2802     case REG_EQUAL:
2803     case REG_EQUIV:
2804       df_notes_rescan (insn);
2805       break;
2806     default:
2807       break;
2808     }
2809 }
2810 
2811 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2812    If NO_RESCAN is false and any notes were removed, call
2813    df_notes_rescan.  Return true if any note has been removed.  */
2814 
2815 bool
remove_reg_equal_equiv_notes(rtx_insn * insn,bool no_rescan)2816 remove_reg_equal_equiv_notes (rtx_insn *insn, bool no_rescan)
2817 {
2818   rtx *loc;
2819   bool ret = false;
2820 
2821   loc = &REG_NOTES (insn);
2822   while (*loc)
2823     {
2824       enum reg_note kind = REG_NOTE_KIND (*loc);
2825       if (kind == REG_EQUAL || kind == REG_EQUIV)
2826 	{
2827 	  *loc = XEXP (*loc, 1);
2828 	  ret = true;
2829 	}
2830       else
2831 	loc = &XEXP (*loc, 1);
2832     }
2833   if (ret && !no_rescan)
2834     df_notes_rescan (insn);
2835   return ret;
2836 }
2837 
2838 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2839 
2840 void
remove_reg_equal_equiv_notes_for_regno(unsigned int regno)2841 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2842 {
2843   df_ref eq_use;
2844 
2845   if (!df)
2846     return;
2847 
2848   /* This loop is a little tricky.  We cannot just go down the chain because
2849      it is being modified by some actions in the loop.  So we just iterate
2850      over the head.  We plan to drain the list anyway.  */
2851   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2852     {
2853       rtx_insn *insn = DF_REF_INSN (eq_use);
2854       rtx note = find_reg_equal_equiv_note (insn);
2855 
2856       /* This assert is generally triggered when someone deletes a REG_EQUAL
2857 	 or REG_EQUIV note by hacking the list manually rather than calling
2858 	 remove_note.  */
2859       gcc_assert (note);
2860 
2861       remove_note (insn, note);
2862     }
2863 }
2864 
2865 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2866    return 1 if it is found.  A simple equality test is used to determine if
2867    NODE matches.  */
2868 
2869 bool
in_insn_list_p(const rtx_insn_list * listp,const rtx_insn * node)2870 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2871 {
2872   const_rtx x;
2873 
2874   for (x = listp; x; x = XEXP (x, 1))
2875     if (node == XEXP (x, 0))
2876       return true;
2877 
2878   return false;
2879 }
2880 
2881 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2882    remove that entry from the list if it is found.
2883 
2884    A simple equality test is used to determine if NODE matches.  */
2885 
2886 void
remove_node_from_expr_list(const_rtx node,rtx_expr_list ** listp)2887 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2888 {
2889   rtx_expr_list *temp = *listp;
2890   rtx_expr_list *prev = NULL;
2891 
2892   while (temp)
2893     {
2894       if (node == temp->element ())
2895 	{
2896 	  /* Splice the node out of the list.  */
2897 	  if (prev)
2898 	    XEXP (prev, 1) = temp->next ();
2899 	  else
2900 	    *listp = temp->next ();
2901 
2902 	  return;
2903 	}
2904 
2905       prev = temp;
2906       temp = temp->next ();
2907     }
2908 }
2909 
2910 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2911    remove that entry from the list if it is found.
2912 
2913    A simple equality test is used to determine if NODE matches.  */
2914 
2915 void
remove_node_from_insn_list(const rtx_insn * node,rtx_insn_list ** listp)2916 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2917 {
2918   rtx_insn_list *temp = *listp;
2919   rtx_insn_list *prev = NULL;
2920 
2921   while (temp)
2922     {
2923       if (node == temp->insn ())
2924 	{
2925 	  /* Splice the node out of the list.  */
2926 	  if (prev)
2927 	    XEXP (prev, 1) = temp->next ();
2928 	  else
2929 	    *listp = temp->next ();
2930 
2931 	  return;
2932 	}
2933 
2934       prev = temp;
2935       temp = temp->next ();
2936     }
2937 }
2938 
2939 /* Nonzero if X contains any volatile instructions.  These are instructions
2940    which may cause unpredictable machine state instructions, and thus no
2941    instructions or register uses should be moved or combined across them.
2942    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2943 
2944 int
volatile_insn_p(const_rtx x)2945 volatile_insn_p (const_rtx x)
2946 {
2947   const RTX_CODE code = GET_CODE (x);
2948   switch (code)
2949     {
2950     case LABEL_REF:
2951     case SYMBOL_REF:
2952     case CONST:
2953     CASE_CONST_ANY:
2954     case PC:
2955     case REG:
2956     case SCRATCH:
2957     case CLOBBER:
2958     case ADDR_VEC:
2959     case ADDR_DIFF_VEC:
2960     case CALL:
2961     case MEM:
2962       return 0;
2963 
2964     case UNSPEC_VOLATILE:
2965       return 1;
2966 
2967     case ASM_INPUT:
2968     case ASM_OPERANDS:
2969       if (MEM_VOLATILE_P (x))
2970 	return 1;
2971 
2972     default:
2973       break;
2974     }
2975 
2976   /* Recursively scan the operands of this expression.  */
2977 
2978   {
2979     const char *const fmt = GET_RTX_FORMAT (code);
2980     int i;
2981 
2982     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2983       {
2984 	if (fmt[i] == 'e')
2985 	  {
2986 	    if (volatile_insn_p (XEXP (x, i)))
2987 	      return 1;
2988 	  }
2989 	else if (fmt[i] == 'E')
2990 	  {
2991 	    int j;
2992 	    for (j = 0; j < XVECLEN (x, i); j++)
2993 	      if (volatile_insn_p (XVECEXP (x, i, j)))
2994 		return 1;
2995 	  }
2996       }
2997   }
2998   return 0;
2999 }
3000 
3001 /* Nonzero if X contains any volatile memory references
3002    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
3003 
3004 int
volatile_refs_p(const_rtx x)3005 volatile_refs_p (const_rtx x)
3006 {
3007   const RTX_CODE code = GET_CODE (x);
3008   switch (code)
3009     {
3010     case LABEL_REF:
3011     case SYMBOL_REF:
3012     case CONST:
3013     CASE_CONST_ANY:
3014     case PC:
3015     case REG:
3016     case SCRATCH:
3017     case CLOBBER:
3018     case ADDR_VEC:
3019     case ADDR_DIFF_VEC:
3020       return 0;
3021 
3022     case UNSPEC_VOLATILE:
3023       return 1;
3024 
3025     case MEM:
3026     case ASM_INPUT:
3027     case ASM_OPERANDS:
3028       if (MEM_VOLATILE_P (x))
3029 	return 1;
3030 
3031     default:
3032       break;
3033     }
3034 
3035   /* Recursively scan the operands of this expression.  */
3036 
3037   {
3038     const char *const fmt = GET_RTX_FORMAT (code);
3039     int i;
3040 
3041     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3042       {
3043 	if (fmt[i] == 'e')
3044 	  {
3045 	    if (volatile_refs_p (XEXP (x, i)))
3046 	      return 1;
3047 	  }
3048 	else if (fmt[i] == 'E')
3049 	  {
3050 	    int j;
3051 	    for (j = 0; j < XVECLEN (x, i); j++)
3052 	      if (volatile_refs_p (XVECEXP (x, i, j)))
3053 		return 1;
3054 	  }
3055       }
3056   }
3057   return 0;
3058 }
3059 
3060 /* Similar to above, except that it also rejects register pre- and post-
3061    incrementing.  */
3062 
3063 int
side_effects_p(const_rtx x)3064 side_effects_p (const_rtx x)
3065 {
3066   const RTX_CODE code = GET_CODE (x);
3067   switch (code)
3068     {
3069     case LABEL_REF:
3070     case SYMBOL_REF:
3071     case CONST:
3072     CASE_CONST_ANY:
3073     case PC:
3074     case REG:
3075     case SCRATCH:
3076     case ADDR_VEC:
3077     case ADDR_DIFF_VEC:
3078     case VAR_LOCATION:
3079       return 0;
3080 
3081     case CLOBBER:
3082       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
3083 	 when some combination can't be done.  If we see one, don't think
3084 	 that we can simplify the expression.  */
3085       return (GET_MODE (x) != VOIDmode);
3086 
3087     case PRE_INC:
3088     case PRE_DEC:
3089     case POST_INC:
3090     case POST_DEC:
3091     case PRE_MODIFY:
3092     case POST_MODIFY:
3093     case CALL:
3094     case UNSPEC_VOLATILE:
3095       return 1;
3096 
3097     case MEM:
3098     case ASM_INPUT:
3099     case ASM_OPERANDS:
3100       if (MEM_VOLATILE_P (x))
3101 	return 1;
3102 
3103     default:
3104       break;
3105     }
3106 
3107   /* Recursively scan the operands of this expression.  */
3108 
3109   {
3110     const char *fmt = GET_RTX_FORMAT (code);
3111     int i;
3112 
3113     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3114       {
3115 	if (fmt[i] == 'e')
3116 	  {
3117 	    if (side_effects_p (XEXP (x, i)))
3118 	      return 1;
3119 	  }
3120 	else if (fmt[i] == 'E')
3121 	  {
3122 	    int j;
3123 	    for (j = 0; j < XVECLEN (x, i); j++)
3124 	      if (side_effects_p (XVECEXP (x, i, j)))
3125 		return 1;
3126 	  }
3127       }
3128   }
3129   return 0;
3130 }
3131 
3132 /* Return nonzero if evaluating rtx X might cause a trap.
3133    FLAGS controls how to consider MEMs.  A nonzero means the context
3134    of the access may have changed from the original, such that the
3135    address may have become invalid.  */
3136 
3137 int
may_trap_p_1(const_rtx x,unsigned flags)3138 may_trap_p_1 (const_rtx x, unsigned flags)
3139 {
3140   int i;
3141   enum rtx_code code;
3142   const char *fmt;
3143 
3144   /* We make no distinction currently, but this function is part of
3145      the internal target-hooks ABI so we keep the parameter as
3146      "unsigned flags".  */
3147   bool code_changed = flags != 0;
3148 
3149   if (x == 0)
3150     return 0;
3151   code = GET_CODE (x);
3152   switch (code)
3153     {
3154       /* Handle these cases quickly.  */
3155     CASE_CONST_ANY:
3156     case SYMBOL_REF:
3157     case LABEL_REF:
3158     case CONST:
3159     case PC:
3160     case REG:
3161     case SCRATCH:
3162       return 0;
3163 
3164     case UNSPEC:
3165       return targetm.unspec_may_trap_p (x, flags);
3166 
3167     case UNSPEC_VOLATILE:
3168     case ASM_INPUT:
3169     case TRAP_IF:
3170       return 1;
3171 
3172     case ASM_OPERANDS:
3173       return MEM_VOLATILE_P (x);
3174 
3175       /* Memory ref can trap unless it's a static var or a stack slot.  */
3176     case MEM:
3177       /* Recognize specific pattern of stack checking probes.  */
3178       if (flag_stack_check
3179 	  && MEM_VOLATILE_P (x)
3180 	  && XEXP (x, 0) == stack_pointer_rtx)
3181 	return 1;
3182       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
3183 	     reference; moving it out of context such as when moving code
3184 	     when optimizing, might cause its address to become invalid.  */
3185 	  code_changed
3186 	  || !MEM_NOTRAP_P (x))
3187 	{
3188 	  poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
3189 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
3190 					GET_MODE (x), code_changed);
3191 	}
3192 
3193       return 0;
3194 
3195       /* Division by a non-constant might trap.  */
3196     case DIV:
3197     case MOD:
3198     case UDIV:
3199     case UMOD:
3200       if (HONOR_SNANS (x))
3201 	return 1;
3202       if (FLOAT_MODE_P (GET_MODE (x)))
3203 	return flag_trapping_math;
3204       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
3205 	return 1;
3206       if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
3207 	{
3208 	  /* For CONST_VECTOR, return 1 if any element is or might be zero.  */
3209 	  unsigned int n_elts;
3210 	  rtx op = XEXP (x, 1);
3211 	  if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
3212 	    {
3213 	      if (!CONST_VECTOR_DUPLICATE_P (op))
3214 		return 1;
3215 	      for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
3216 		if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
3217 		  return 1;
3218 	    }
3219 	  else
3220 	    for (unsigned i = 0; i < n_elts; i++)
3221 	      if (CONST_VECTOR_ELT (op, i) == const0_rtx)
3222 		return 1;
3223 	}
3224       break;
3225 
3226     case EXPR_LIST:
3227       /* An EXPR_LIST is used to represent a function call.  This
3228 	 certainly may trap.  */
3229       return 1;
3230 
3231     case GE:
3232     case GT:
3233     case LE:
3234     case LT:
3235     case LTGT:
3236     case COMPARE:
3237       /* Some floating point comparisons may trap.  */
3238       if (!flag_trapping_math)
3239 	break;
3240       /* ??? There is no machine independent way to check for tests that trap
3241 	 when COMPARE is used, though many targets do make this distinction.
3242 	 For instance, sparc uses CCFPE for compares which generate exceptions
3243 	 and CCFP for compares which do not generate exceptions.  */
3244       if (HONOR_NANS (x))
3245 	return 1;
3246       /* But often the compare has some CC mode, so check operand
3247 	 modes as well.  */
3248       if (HONOR_NANS (XEXP (x, 0))
3249 	  || HONOR_NANS (XEXP (x, 1)))
3250 	return 1;
3251       break;
3252 
3253     case EQ:
3254     case NE:
3255       if (HONOR_SNANS (x))
3256 	return 1;
3257       /* Often comparison is CC mode, so check operand modes.  */
3258       if (HONOR_SNANS (XEXP (x, 0))
3259 	  || HONOR_SNANS (XEXP (x, 1)))
3260 	return 1;
3261       break;
3262 
3263     case FIX:
3264     case UNSIGNED_FIX:
3265       /* Conversion of floating point might trap.  */
3266       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
3267 	return 1;
3268       break;
3269 
3270     case NEG:
3271     case ABS:
3272     case SUBREG:
3273     case VEC_MERGE:
3274     case VEC_SELECT:
3275     case VEC_CONCAT:
3276     case VEC_DUPLICATE:
3277       /* These operations don't trap even with floating point.  */
3278       break;
3279 
3280     default:
3281       /* Any floating arithmetic may trap.  */
3282       if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
3283 	return 1;
3284     }
3285 
3286   fmt = GET_RTX_FORMAT (code);
3287   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3288     {
3289       if (fmt[i] == 'e')
3290 	{
3291 	  if (may_trap_p_1 (XEXP (x, i), flags))
3292 	    return 1;
3293 	}
3294       else if (fmt[i] == 'E')
3295 	{
3296 	  int j;
3297 	  for (j = 0; j < XVECLEN (x, i); j++)
3298 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
3299 	      return 1;
3300 	}
3301     }
3302   return 0;
3303 }
3304 
3305 /* Return nonzero if evaluating rtx X might cause a trap.  */
3306 
3307 int
may_trap_p(const_rtx x)3308 may_trap_p (const_rtx x)
3309 {
3310   return may_trap_p_1 (x, 0);
3311 }
3312 
3313 /* Same as above, but additionally return nonzero if evaluating rtx X might
3314    cause a fault.  We define a fault for the purpose of this function as a
3315    erroneous execution condition that cannot be encountered during the normal
3316    execution of a valid program; the typical example is an unaligned memory
3317    access on a strict alignment machine.  The compiler guarantees that it
3318    doesn't generate code that will fault from a valid program, but this
3319    guarantee doesn't mean anything for individual instructions.  Consider
3320    the following example:
3321 
3322       struct S { int d; union { char *cp; int *ip; }; };
3323 
3324       int foo(struct S *s)
3325       {
3326 	if (s->d == 1)
3327 	  return *s->ip;
3328 	else
3329 	  return *s->cp;
3330       }
3331 
3332    on a strict alignment machine.  In a valid program, foo will never be
3333    invoked on a structure for which d is equal to 1 and the underlying
3334    unique field of the union not aligned on a 4-byte boundary, but the
3335    expression *s->ip might cause a fault if considered individually.
3336 
3337    At the RTL level, potentially problematic expressions will almost always
3338    verify may_trap_p; for example, the above dereference can be emitted as
3339    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3340    However, suppose that foo is inlined in a caller that causes s->cp to
3341    point to a local character variable and guarantees that s->d is not set
3342    to 1; foo may have been effectively translated into pseudo-RTL as:
3343 
3344       if ((reg:SI) == 1)
3345 	(set (reg:SI) (mem:SI (%fp - 7)))
3346       else
3347 	(set (reg:QI) (mem:QI (%fp - 7)))
3348 
3349    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3350    memory reference to a stack slot, but it will certainly cause a fault
3351    on a strict alignment machine.  */
3352 
3353 int
may_trap_or_fault_p(const_rtx x)3354 may_trap_or_fault_p (const_rtx x)
3355 {
3356   return may_trap_p_1 (x, 1);
3357 }
3358 
3359 /* Replace any occurrence of FROM in X with TO.  The function does
3360    not enter into CONST_DOUBLE for the replace.
3361 
3362    Note that copying is not done so X must not be shared unless all copies
3363    are to be modified.
3364 
3365    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3366    those pointer-equal ones.  */
3367 
3368 rtx
replace_rtx(rtx x,rtx from,rtx to,bool all_regs)3369 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3370 {
3371   int i, j;
3372   const char *fmt;
3373 
3374   if (x == from)
3375     return to;
3376 
3377   /* Allow this function to make replacements in EXPR_LISTs.  */
3378   if (x == 0)
3379     return 0;
3380 
3381   if (all_regs
3382       && REG_P (x)
3383       && REG_P (from)
3384       && REGNO (x) == REGNO (from))
3385     {
3386       gcc_assert (GET_MODE (x) == GET_MODE (from));
3387       return to;
3388     }
3389   else if (GET_CODE (x) == SUBREG)
3390     {
3391       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3392 
3393       if (CONST_INT_P (new_rtx))
3394 	{
3395 	  x = simplify_subreg (GET_MODE (x), new_rtx,
3396 			       GET_MODE (SUBREG_REG (x)),
3397 			       SUBREG_BYTE (x));
3398 	  gcc_assert (x);
3399 	}
3400       else
3401 	SUBREG_REG (x) = new_rtx;
3402 
3403       return x;
3404     }
3405   else if (GET_CODE (x) == ZERO_EXTEND)
3406     {
3407       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3408 
3409       if (CONST_INT_P (new_rtx))
3410 	{
3411 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3412 					new_rtx, GET_MODE (XEXP (x, 0)));
3413 	  gcc_assert (x);
3414 	}
3415       else
3416 	XEXP (x, 0) = new_rtx;
3417 
3418       return x;
3419     }
3420 
3421   fmt = GET_RTX_FORMAT (GET_CODE (x));
3422   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3423     {
3424       if (fmt[i] == 'e')
3425 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3426       else if (fmt[i] == 'E')
3427 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3428 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3429 					   from, to, all_regs);
3430     }
3431 
3432   return x;
3433 }
3434 
3435 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3436    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3437 
3438 void
replace_label(rtx * loc,rtx old_label,rtx new_label,bool update_label_nuses)3439 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3440 {
3441   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3442   rtx x = *loc;
3443   if (JUMP_TABLE_DATA_P (x))
3444     {
3445       x = PATTERN (x);
3446       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3447       int len = GET_NUM_ELEM (vec);
3448       for (int i = 0; i < len; ++i)
3449 	{
3450 	  rtx ref = RTVEC_ELT (vec, i);
3451 	  if (XEXP (ref, 0) == old_label)
3452 	    {
3453 	      XEXP (ref, 0) = new_label;
3454 	      if (update_label_nuses)
3455 		{
3456 		  ++LABEL_NUSES (new_label);
3457 		  --LABEL_NUSES (old_label);
3458 		}
3459 	    }
3460 	}
3461       return;
3462     }
3463 
3464   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3465      field.  This is not handled by the iterator because it doesn't
3466      handle unprinted ('0') fields.  */
3467   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3468     JUMP_LABEL (x) = new_label;
3469 
3470   subrtx_ptr_iterator::array_type array;
3471   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3472     {
3473       rtx *loc = *iter;
3474       if (rtx x = *loc)
3475 	{
3476 	  if (GET_CODE (x) == SYMBOL_REF
3477 	      && CONSTANT_POOL_ADDRESS_P (x))
3478 	    {
3479 	      rtx c = get_pool_constant (x);
3480 	      if (rtx_referenced_p (old_label, c))
3481 		{
3482 		  /* Create a copy of constant C; replace the label inside
3483 		     but do not update LABEL_NUSES because uses in constant pool
3484 		     are not counted.  */
3485 		  rtx new_c = copy_rtx (c);
3486 		  replace_label (&new_c, old_label, new_label, false);
3487 
3488 		  /* Add the new constant NEW_C to constant pool and replace
3489 		     the old reference to constant by new reference.  */
3490 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3491 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3492 		}
3493 	    }
3494 
3495 	  if ((GET_CODE (x) == LABEL_REF
3496 	       || GET_CODE (x) == INSN_LIST)
3497 	      && XEXP (x, 0) == old_label)
3498 	    {
3499 	      XEXP (x, 0) = new_label;
3500 	      if (update_label_nuses)
3501 		{
3502 		  ++LABEL_NUSES (new_label);
3503 		  --LABEL_NUSES (old_label);
3504 		}
3505 	    }
3506 	}
3507     }
3508 }
3509 
3510 void
replace_label_in_insn(rtx_insn * insn,rtx_insn * old_label,rtx_insn * new_label,bool update_label_nuses)3511 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3512 		       rtx_insn *new_label, bool update_label_nuses)
3513 {
3514   rtx insn_as_rtx = insn;
3515   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3516   gcc_checking_assert (insn_as_rtx == insn);
3517 }
3518 
3519 /* Return true if X is referenced in BODY.  */
3520 
3521 bool
rtx_referenced_p(const_rtx x,const_rtx body)3522 rtx_referenced_p (const_rtx x, const_rtx body)
3523 {
3524   subrtx_iterator::array_type array;
3525   FOR_EACH_SUBRTX (iter, array, body, ALL)
3526     if (const_rtx y = *iter)
3527       {
3528 	/* Check if a label_ref Y refers to label X.  */
3529 	if (GET_CODE (y) == LABEL_REF
3530 	    && LABEL_P (x)
3531 	    && label_ref_label (y) == x)
3532 	  return true;
3533 
3534 	if (rtx_equal_p (x, y))
3535 	  return true;
3536 
3537 	/* If Y is a reference to pool constant traverse the constant.  */
3538 	if (GET_CODE (y) == SYMBOL_REF
3539 	    && CONSTANT_POOL_ADDRESS_P (y))
3540 	  iter.substitute (get_pool_constant (y));
3541       }
3542   return false;
3543 }
3544 
3545 /* If INSN is a tablejump return true and store the label (before jump table) to
3546    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3547 
3548 bool
tablejump_p(const rtx_insn * insn,rtx_insn ** labelp,rtx_jump_table_data ** tablep)3549 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3550 	     rtx_jump_table_data **tablep)
3551 {
3552   if (!JUMP_P (insn))
3553     return false;
3554 
3555   rtx target = JUMP_LABEL (insn);
3556   if (target == NULL_RTX || ANY_RETURN_P (target))
3557     return false;
3558 
3559   rtx_insn *label = as_a<rtx_insn *> (target);
3560   rtx_insn *table = next_insn (label);
3561   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3562     return false;
3563 
3564   if (labelp)
3565     *labelp = label;
3566   if (tablep)
3567     *tablep = as_a <rtx_jump_table_data *> (table);
3568   return true;
3569 }
3570 
3571 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3572    CASESI.  Return the insn pattern if so, NULL_RTX otherwise.  */
3573 
3574 rtx
tablejump_casesi_pattern(const rtx_insn * insn)3575 tablejump_casesi_pattern (const rtx_insn *insn)
3576 {
3577   rtx tmp;
3578 
3579   if ((tmp = single_set (insn)) != NULL
3580       && SET_DEST (tmp) == pc_rtx
3581       && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3582       && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3583     return tmp;
3584 
3585   return NULL_RTX;
3586 }
3587 
3588 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3589    constant that is not in the constant pool and not in the condition
3590    of an IF_THEN_ELSE.  */
3591 
3592 static int
computed_jump_p_1(const_rtx x)3593 computed_jump_p_1 (const_rtx x)
3594 {
3595   const enum rtx_code code = GET_CODE (x);
3596   int i, j;
3597   const char *fmt;
3598 
3599   switch (code)
3600     {
3601     case LABEL_REF:
3602     case PC:
3603       return 0;
3604 
3605     case CONST:
3606     CASE_CONST_ANY:
3607     case SYMBOL_REF:
3608     case REG:
3609       return 1;
3610 
3611     case MEM:
3612       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3613 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3614 
3615     case IF_THEN_ELSE:
3616       return (computed_jump_p_1 (XEXP (x, 1))
3617 	      || computed_jump_p_1 (XEXP (x, 2)));
3618 
3619     default:
3620       break;
3621     }
3622 
3623   fmt = GET_RTX_FORMAT (code);
3624   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3625     {
3626       if (fmt[i] == 'e'
3627 	  && computed_jump_p_1 (XEXP (x, i)))
3628 	return 1;
3629 
3630       else if (fmt[i] == 'E')
3631 	for (j = 0; j < XVECLEN (x, i); j++)
3632 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3633 	    return 1;
3634     }
3635 
3636   return 0;
3637 }
3638 
3639 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3640 
3641    Tablejumps and casesi insns are not considered indirect jumps;
3642    we can recognize them by a (use (label_ref)).  */
3643 
3644 int
computed_jump_p(const rtx_insn * insn)3645 computed_jump_p (const rtx_insn *insn)
3646 {
3647   int i;
3648   if (JUMP_P (insn))
3649     {
3650       rtx pat = PATTERN (insn);
3651 
3652       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3653       if (JUMP_LABEL (insn) != NULL)
3654 	return 0;
3655 
3656       if (GET_CODE (pat) == PARALLEL)
3657 	{
3658 	  int len = XVECLEN (pat, 0);
3659 	  int has_use_labelref = 0;
3660 
3661 	  for (i = len - 1; i >= 0; i--)
3662 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3663 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3664 		    == LABEL_REF))
3665 	      {
3666 	        has_use_labelref = 1;
3667 	        break;
3668 	      }
3669 
3670 	  if (! has_use_labelref)
3671 	    for (i = len - 1; i >= 0; i--)
3672 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3673 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3674 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3675 		return 1;
3676 	}
3677       else if (GET_CODE (pat) == SET
3678 	       && SET_DEST (pat) == pc_rtx
3679 	       && computed_jump_p_1 (SET_SRC (pat)))
3680 	return 1;
3681     }
3682   return 0;
3683 }
3684 
3685 
3686 
3687 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3688    the equivalent add insn and pass the result to FN, using DATA as the
3689    final argument.  */
3690 
3691 static int
for_each_inc_dec_find_inc_dec(rtx mem,for_each_inc_dec_fn fn,void * data)3692 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3693 {
3694   rtx x = XEXP (mem, 0);
3695   switch (GET_CODE (x))
3696     {
3697     case PRE_INC:
3698     case POST_INC:
3699       {
3700 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3701 	rtx r1 = XEXP (x, 0);
3702 	rtx c = gen_int_mode (size, GET_MODE (r1));
3703 	return fn (mem, x, r1, r1, c, data);
3704       }
3705 
3706     case PRE_DEC:
3707     case POST_DEC:
3708       {
3709 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3710 	rtx r1 = XEXP (x, 0);
3711 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3712 	return fn (mem, x, r1, r1, c, data);
3713       }
3714 
3715     case PRE_MODIFY:
3716     case POST_MODIFY:
3717       {
3718 	rtx r1 = XEXP (x, 0);
3719 	rtx add = XEXP (x, 1);
3720 	return fn (mem, x, r1, add, NULL, data);
3721       }
3722 
3723     default:
3724       gcc_unreachable ();
3725     }
3726 }
3727 
3728 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3729    For each such autoinc operation found, call FN, passing it
3730    the innermost enclosing MEM, the operation itself, the RTX modified
3731    by the operation, two RTXs (the second may be NULL) that, once
3732    added, represent the value to be held by the modified RTX
3733    afterwards, and DATA.  FN is to return 0 to continue the
3734    traversal or any other value to have it returned to the caller of
3735    for_each_inc_dec.  */
3736 
3737 int
for_each_inc_dec(rtx x,for_each_inc_dec_fn fn,void * data)3738 for_each_inc_dec (rtx x,
3739 		  for_each_inc_dec_fn fn,
3740 		  void *data)
3741 {
3742   subrtx_var_iterator::array_type array;
3743   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3744     {
3745       rtx mem = *iter;
3746       if (mem
3747 	  && MEM_P (mem)
3748 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3749 	{
3750 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3751 	  if (res != 0)
3752 	    return res;
3753 	  iter.skip_subrtxes ();
3754 	}
3755     }
3756   return 0;
3757 }
3758 
3759 
3760 /* Searches X for any reference to REGNO, returning the rtx of the
3761    reference found if any.  Otherwise, returns NULL_RTX.  */
3762 
3763 rtx
regno_use_in(unsigned int regno,rtx x)3764 regno_use_in (unsigned int regno, rtx x)
3765 {
3766   const char *fmt;
3767   int i, j;
3768   rtx tem;
3769 
3770   if (REG_P (x) && REGNO (x) == regno)
3771     return x;
3772 
3773   fmt = GET_RTX_FORMAT (GET_CODE (x));
3774   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3775     {
3776       if (fmt[i] == 'e')
3777 	{
3778 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3779 	    return tem;
3780 	}
3781       else if (fmt[i] == 'E')
3782 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3783 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3784 	    return tem;
3785     }
3786 
3787   return NULL_RTX;
3788 }
3789 
3790 /* Return a value indicating whether OP, an operand of a commutative
3791    operation, is preferred as the first or second operand.  The more
3792    positive the value, the stronger the preference for being the first
3793    operand.  */
3794 
3795 int
commutative_operand_precedence(rtx op)3796 commutative_operand_precedence (rtx op)
3797 {
3798   enum rtx_code code = GET_CODE (op);
3799 
3800   /* Constants always become the second operand.  Prefer "nice" constants.  */
3801   if (code == CONST_INT)
3802     return -10;
3803   if (code == CONST_WIDE_INT)
3804     return -9;
3805   if (code == CONST_POLY_INT)
3806     return -8;
3807   if (code == CONST_DOUBLE)
3808     return -8;
3809   if (code == CONST_FIXED)
3810     return -8;
3811   op = avoid_constant_pool_reference (op);
3812   code = GET_CODE (op);
3813 
3814   switch (GET_RTX_CLASS (code))
3815     {
3816     case RTX_CONST_OBJ:
3817       if (code == CONST_INT)
3818 	return -7;
3819       if (code == CONST_WIDE_INT)
3820 	return -6;
3821       if (code == CONST_POLY_INT)
3822 	return -5;
3823       if (code == CONST_DOUBLE)
3824 	return -5;
3825       if (code == CONST_FIXED)
3826 	return -5;
3827       return -4;
3828 
3829     case RTX_EXTRA:
3830       /* SUBREGs of objects should come second.  */
3831       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3832         return -3;
3833       return 0;
3834 
3835     case RTX_OBJ:
3836       /* Complex expressions should be the first, so decrease priority
3837          of objects.  Prefer pointer objects over non pointer objects.  */
3838       if ((REG_P (op) && REG_POINTER (op))
3839 	  || (MEM_P (op) && MEM_POINTER (op)))
3840 	return -1;
3841       return -2;
3842 
3843     case RTX_COMM_ARITH:
3844       /* Prefer operands that are themselves commutative to be first.
3845          This helps to make things linear.  In particular,
3846          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3847       return 4;
3848 
3849     case RTX_BIN_ARITH:
3850       /* If only one operand is a binary expression, it will be the first
3851          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3852          is canonical, although it will usually be further simplified.  */
3853       return 2;
3854 
3855     case RTX_UNARY:
3856       /* Then prefer NEG and NOT.  */
3857       if (code == NEG || code == NOT)
3858         return 1;
3859       /* FALLTHRU */
3860 
3861     default:
3862       return 0;
3863     }
3864 }
3865 
3866 /* Return 1 iff it is necessary to swap operands of commutative operation
3867    in order to canonicalize expression.  */
3868 
3869 bool
swap_commutative_operands_p(rtx x,rtx y)3870 swap_commutative_operands_p (rtx x, rtx y)
3871 {
3872   return (commutative_operand_precedence (x)
3873 	  < commutative_operand_precedence (y));
3874 }
3875 
3876 /* Return 1 if X is an autoincrement side effect and the register is
3877    not the stack pointer.  */
3878 int
auto_inc_p(const_rtx x)3879 auto_inc_p (const_rtx x)
3880 {
3881   switch (GET_CODE (x))
3882     {
3883     case PRE_INC:
3884     case POST_INC:
3885     case PRE_DEC:
3886     case POST_DEC:
3887     case PRE_MODIFY:
3888     case POST_MODIFY:
3889       /* There are no REG_INC notes for SP.  */
3890       if (XEXP (x, 0) != stack_pointer_rtx)
3891 	return 1;
3892     default:
3893       break;
3894     }
3895   return 0;
3896 }
3897 
3898 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3899 int
loc_mentioned_in_p(rtx * loc,const_rtx in)3900 loc_mentioned_in_p (rtx *loc, const_rtx in)
3901 {
3902   enum rtx_code code;
3903   const char *fmt;
3904   int i, j;
3905 
3906   if (!in)
3907     return 0;
3908 
3909   code = GET_CODE (in);
3910   fmt = GET_RTX_FORMAT (code);
3911   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3912     {
3913       if (fmt[i] == 'e')
3914 	{
3915 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3916 	    return 1;
3917 	}
3918       else if (fmt[i] == 'E')
3919 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3920 	  if (loc == &XVECEXP (in, i, j)
3921 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3922 	    return 1;
3923     }
3924   return 0;
3925 }
3926 
3927 /* Reinterpret a subreg as a bit extraction from an integer and return
3928    the position of the least significant bit of the extracted value.
3929    In other words, if the extraction were performed as a shift right
3930    and mask, return the number of bits to shift right.
3931 
3932    The outer value of the subreg has OUTER_BYTES bytes and starts at
3933    byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes.  */
3934 
3935 poly_uint64
subreg_size_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 subreg_byte)3936 subreg_size_lsb (poly_uint64 outer_bytes,
3937 		 poly_uint64 inner_bytes,
3938 		 poly_uint64 subreg_byte)
3939 {
3940   poly_uint64 subreg_end, trailing_bytes, byte_pos;
3941 
3942   /* A paradoxical subreg begins at bit position 0.  */
3943   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3944   if (maybe_gt (outer_bytes, inner_bytes))
3945     {
3946       gcc_checking_assert (known_eq (subreg_byte, 0U));
3947       return 0;
3948     }
3949 
3950   subreg_end = subreg_byte + outer_bytes;
3951   trailing_bytes = inner_bytes - subreg_end;
3952   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3953     byte_pos = trailing_bytes;
3954   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3955     byte_pos = subreg_byte;
3956   else
3957     {
3958       /* When bytes and words have opposite endianness, we must be able
3959 	 to split offsets into words and bytes at compile time.  */
3960       poly_uint64 leading_word_part
3961 	= force_align_down (subreg_byte, UNITS_PER_WORD);
3962       poly_uint64 trailing_word_part
3963 	= force_align_down (trailing_bytes, UNITS_PER_WORD);
3964       /* If the subreg crosses a word boundary ensure that
3965 	 it also begins and ends on a word boundary.  */
3966       gcc_assert (known_le (subreg_end - leading_word_part,
3967 			    (unsigned int) UNITS_PER_WORD)
3968 		  || (known_eq (leading_word_part, subreg_byte)
3969 		      && known_eq (trailing_word_part, trailing_bytes)));
3970       if (WORDS_BIG_ENDIAN)
3971 	byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3972       else
3973 	byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3974     }
3975 
3976   return byte_pos * BITS_PER_UNIT;
3977 }
3978 
3979 /* Given a subreg X, return the bit offset where the subreg begins
3980    (counting from the least significant bit of the reg).  */
3981 
3982 poly_uint64
subreg_lsb(const_rtx x)3983 subreg_lsb (const_rtx x)
3984 {
3985   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3986 		       SUBREG_BYTE (x));
3987 }
3988 
3989 /* Return the subreg byte offset for a subreg whose outer value has
3990    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3991    there are LSB_SHIFT *bits* between the lsb of the outer value and the
3992    lsb of the inner value.  This is the inverse of the calculation
3993    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
3994 
3995 poly_uint64
subreg_size_offset_from_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 lsb_shift)3996 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3997 			     poly_uint64 lsb_shift)
3998 {
3999   /* A paradoxical subreg begins at bit position 0.  */
4000   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
4001   if (maybe_gt (outer_bytes, inner_bytes))
4002     {
4003       gcc_checking_assert (known_eq (lsb_shift, 0U));
4004       return 0;
4005     }
4006 
4007   poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
4008   poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
4009   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
4010     return upper_bytes;
4011   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
4012     return lower_bytes;
4013   else
4014     {
4015       /* When bytes and words have opposite endianness, we must be able
4016 	 to split offsets into words and bytes at compile time.  */
4017       poly_uint64 lower_word_part = force_align_down (lower_bytes,
4018 						      UNITS_PER_WORD);
4019       poly_uint64 upper_word_part = force_align_down (upper_bytes,
4020 						      UNITS_PER_WORD);
4021       if (WORDS_BIG_ENDIAN)
4022 	return upper_word_part + (lower_bytes - lower_word_part);
4023       else
4024 	return lower_word_part + (upper_bytes - upper_word_part);
4025     }
4026 }
4027 
4028 /* Fill in information about a subreg of a hard register.
4029    xregno - A regno of an inner hard subreg_reg (or what will become one).
4030    xmode  - The mode of xregno.
4031    offset - The byte offset.
4032    ymode  - The mode of a top level SUBREG (or what may become one).
4033    info   - Pointer to structure to fill in.
4034 
4035    Rather than considering one particular inner register (and thus one
4036    particular "outer" register) in isolation, this function really uses
4037    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
4038    function does not check whether adding INFO->offset to XREGNO gives
4039    a valid hard register; even if INFO->offset + XREGNO is out of range,
4040    there might be another register of the same type that is in range.
4041    Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
4042    the new register, since that can depend on things like whether the final
4043    register number is even or odd.  Callers that want to check whether
4044    this particular subreg can be replaced by a simple (reg ...) should
4045    use simplify_subreg_regno.  */
4046 
4047 void
subreg_get_info(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode,struct subreg_info * info)4048 subreg_get_info (unsigned int xregno, machine_mode xmode,
4049 		 poly_uint64 offset, machine_mode ymode,
4050 		 struct subreg_info *info)
4051 {
4052   unsigned int nregs_xmode, nregs_ymode;
4053 
4054   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
4055 
4056   poly_uint64 xsize = GET_MODE_SIZE (xmode);
4057   poly_uint64 ysize = GET_MODE_SIZE (ymode);
4058 
4059   bool rknown = false;
4060 
4061   /* If the register representation of a non-scalar mode has holes in it,
4062      we expect the scalar units to be concatenated together, with the holes
4063      distributed evenly among the scalar units.  Each scalar unit must occupy
4064      at least one register.  */
4065   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
4066     {
4067       /* As a consequence, we must be dealing with a constant number of
4068 	 scalars, and thus a constant offset and number of units.  */
4069       HOST_WIDE_INT coffset = offset.to_constant ();
4070       HOST_WIDE_INT cysize = ysize.to_constant ();
4071       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
4072       unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
4073       scalar_mode xmode_unit = GET_MODE_INNER (xmode);
4074       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
4075       gcc_assert (nregs_xmode
4076 		  == (nunits
4077 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
4078       gcc_assert (hard_regno_nregs (xregno, xmode)
4079 		  == hard_regno_nregs (xregno, xmode_unit) * nunits);
4080 
4081       /* You can only ask for a SUBREG of a value with holes in the middle
4082 	 if you don't cross the holes.  (Such a SUBREG should be done by
4083 	 picking a different register class, or doing it in memory if
4084 	 necessary.)  An example of a value with holes is XCmode on 32-bit
4085 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
4086 	 3 for each part, but in memory it's two 128-bit parts.
4087 	 Padding is assumed to be at the end (not necessarily the 'high part')
4088 	 of each unit.  */
4089       if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
4090 	  && (coffset / GET_MODE_SIZE (xmode_unit)
4091 	      != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
4092 	{
4093 	  info->representable_p = false;
4094 	  rknown = true;
4095 	}
4096     }
4097   else
4098     nregs_xmode = hard_regno_nregs (xregno, xmode);
4099 
4100   nregs_ymode = hard_regno_nregs (xregno, ymode);
4101 
4102   /* Subreg sizes must be ordered, so that we can tell whether they are
4103      partial, paradoxical or complete.  */
4104   gcc_checking_assert (ordered_p (xsize, ysize));
4105 
4106   /* Paradoxical subregs are otherwise valid.  */
4107   if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
4108     {
4109       info->representable_p = true;
4110       /* If this is a big endian paradoxical subreg, which uses more
4111 	 actual hard registers than the original register, we must
4112 	 return a negative offset so that we find the proper highpart
4113 	 of the register.
4114 
4115 	 We assume that the ordering of registers within a multi-register
4116 	 value has a consistent endianness: if bytes and register words
4117 	 have different endianness, the hard registers that make up a
4118 	 multi-register value must be at least word-sized.  */
4119       if (REG_WORDS_BIG_ENDIAN)
4120 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
4121       else
4122 	info->offset = 0;
4123       info->nregs = nregs_ymode;
4124       return;
4125     }
4126 
4127   /* If registers store different numbers of bits in the different
4128      modes, we cannot generally form this subreg.  */
4129   poly_uint64 regsize_xmode, regsize_ymode;
4130   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
4131       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
4132       && multiple_p (xsize, nregs_xmode, &regsize_xmode)
4133       && multiple_p (ysize, nregs_ymode, &regsize_ymode))
4134     {
4135       if (!rknown
4136 	  && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
4137 	      || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
4138 	{
4139 	  info->representable_p = false;
4140 	  if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
4141 	      || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
4142 	    /* Checked by validate_subreg.  We must know at compile time
4143 	       which inner registers are being accessed.  */
4144 	    gcc_unreachable ();
4145 	  return;
4146 	}
4147       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
4148 	 would go outside of XMODE.  */
4149       if (!rknown && maybe_gt (ysize + offset, xsize))
4150 	{
4151 	  info->representable_p = false;
4152 	  info->nregs = nregs_ymode;
4153 	  if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
4154 	    /* Checked by validate_subreg.  We must know at compile time
4155 	       which inner registers are being accessed.  */
4156 	    gcc_unreachable ();
4157 	  return;
4158 	}
4159       /* Quick exit for the simple and common case of extracting whole
4160 	 subregisters from a multiregister value.  */
4161       /* ??? It would be better to integrate this into the code below,
4162 	 if we can generalize the concept enough and figure out how
4163 	 odd-sized modes can coexist with the other weird cases we support.  */
4164       HOST_WIDE_INT count;
4165       if (!rknown
4166 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
4167 	  && known_eq (regsize_xmode, regsize_ymode)
4168 	  && constant_multiple_p (offset, regsize_ymode, &count))
4169 	{
4170 	  info->representable_p = true;
4171 	  info->nregs = nregs_ymode;
4172 	  info->offset = count;
4173 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
4174 	  return;
4175 	}
4176     }
4177 
4178   /* Lowpart subregs are otherwise valid.  */
4179   if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
4180     {
4181       info->representable_p = true;
4182       rknown = true;
4183 
4184       if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
4185 	{
4186 	  info->offset = 0;
4187 	  info->nregs = nregs_ymode;
4188 	  return;
4189 	}
4190     }
4191 
4192   /* Set NUM_BLOCKS to the number of independently-representable YMODE
4193      values there are in (reg:XMODE XREGNO).  We can view the register
4194      as consisting of this number of independent "blocks", where each
4195      block occupies NREGS_YMODE registers and contains exactly one
4196      representable YMODE value.  */
4197   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
4198   unsigned int num_blocks = nregs_xmode / nregs_ymode;
4199 
4200   /* Calculate the number of bytes in each block.  This must always
4201      be exact, otherwise we don't know how to verify the constraint.
4202      These conditions may be relaxed but subreg_regno_offset would
4203      need to be redesigned.  */
4204   poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
4205 
4206   /* Get the number of the first block that contains the subreg and the byte
4207      offset of the subreg from the start of that block.  */
4208   unsigned int block_number;
4209   poly_uint64 subblock_offset;
4210   if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
4211 			&subblock_offset))
4212     /* Checked by validate_subreg.  We must know at compile time which
4213        inner registers are being accessed.  */
4214     gcc_unreachable ();
4215 
4216   if (!rknown)
4217     {
4218       /* Only the lowpart of each block is representable.  */
4219       info->representable_p
4220 	= known_eq (subblock_offset,
4221 		    subreg_size_lowpart_offset (ysize, bytes_per_block));
4222       rknown = true;
4223     }
4224 
4225   /* We assume that the ordering of registers within a multi-register
4226      value has a consistent endianness: if bytes and register words
4227      have different endianness, the hard registers that make up a
4228      multi-register value must be at least word-sized.  */
4229   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
4230     /* The block number we calculated above followed memory endianness.
4231        Convert it to register endianness by counting back from the end.
4232        (Note that, because of the assumption above, each block must be
4233        at least word-sized.)  */
4234     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
4235   else
4236     info->offset = block_number * nregs_ymode;
4237   info->nregs = nregs_ymode;
4238 }
4239 
4240 /* This function returns the regno offset of a subreg expression.
4241    xregno - A regno of an inner hard subreg_reg (or what will become one).
4242    xmode  - The mode of xregno.
4243    offset - The byte offset.
4244    ymode  - The mode of a top level SUBREG (or what may become one).
4245    RETURN - The regno offset which would be used.  */
4246 unsigned int
subreg_regno_offset(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)4247 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
4248 		     poly_uint64 offset, machine_mode ymode)
4249 {
4250   struct subreg_info info;
4251   subreg_get_info (xregno, xmode, offset, ymode, &info);
4252   return info.offset;
4253 }
4254 
4255 /* This function returns true when the offset is representable via
4256    subreg_offset in the given regno.
4257    xregno - A regno of an inner hard subreg_reg (or what will become one).
4258    xmode  - The mode of xregno.
4259    offset - The byte offset.
4260    ymode  - The mode of a top level SUBREG (or what may become one).
4261    RETURN - Whether the offset is representable.  */
4262 bool
subreg_offset_representable_p(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)4263 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
4264 			       poly_uint64 offset, machine_mode ymode)
4265 {
4266   struct subreg_info info;
4267   subreg_get_info (xregno, xmode, offset, ymode, &info);
4268   return info.representable_p;
4269 }
4270 
4271 /* Return the number of a YMODE register to which
4272 
4273        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
4274 
4275    can be simplified.  Return -1 if the subreg can't be simplified.
4276 
4277    XREGNO is a hard register number.  */
4278 
4279 int
simplify_subreg_regno(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)4280 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
4281 		       poly_uint64 offset, machine_mode ymode)
4282 {
4283   struct subreg_info info;
4284   unsigned int yregno;
4285 
4286   /* Give the backend a chance to disallow the mode change.  */
4287   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
4288       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
4289       && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode))
4290     return -1;
4291 
4292   /* We shouldn't simplify stack-related registers.  */
4293   if ((!reload_completed || frame_pointer_needed)
4294       && xregno == FRAME_POINTER_REGNUM)
4295     return -1;
4296 
4297   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4298       && xregno == ARG_POINTER_REGNUM)
4299     return -1;
4300 
4301   if (xregno == STACK_POINTER_REGNUM
4302       /* We should convert hard stack register in LRA if it is
4303 	 possible.  */
4304       && ! lra_in_progress)
4305     return -1;
4306 
4307   /* Try to get the register offset.  */
4308   subreg_get_info (xregno, xmode, offset, ymode, &info);
4309   if (!info.representable_p)
4310     return -1;
4311 
4312   /* Make sure that the offsetted register value is in range.  */
4313   yregno = xregno + info.offset;
4314   if (!HARD_REGISTER_NUM_P (yregno))
4315     return -1;
4316 
4317   /* See whether (reg:YMODE YREGNO) is valid.
4318 
4319      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4320      This is a kludge to work around how complex FP arguments are passed
4321      on IA-64 and should be fixed.  See PR target/49226.  */
4322   if (!targetm.hard_regno_mode_ok (yregno, ymode)
4323       && targetm.hard_regno_mode_ok (xregno, xmode))
4324     return -1;
4325 
4326   return (int) yregno;
4327 }
4328 
4329 /* A wrapper around simplify_subreg_regno that uses subreg_lowpart_offset
4330    (xmode, ymode) as the offset.  */
4331 
4332 int
lowpart_subreg_regno(unsigned int regno,machine_mode xmode,machine_mode ymode)4333 lowpart_subreg_regno (unsigned int regno, machine_mode xmode,
4334 		      machine_mode ymode)
4335 {
4336   poly_uint64 offset = subreg_lowpart_offset (xmode, ymode);
4337   return simplify_subreg_regno (regno, xmode, offset, ymode);
4338 }
4339 
4340 /* Return the final regno that a subreg expression refers to.  */
4341 unsigned int
subreg_regno(const_rtx x)4342 subreg_regno (const_rtx x)
4343 {
4344   unsigned int ret;
4345   rtx subreg = SUBREG_REG (x);
4346   int regno = REGNO (subreg);
4347 
4348   ret = regno + subreg_regno_offset (regno,
4349 				     GET_MODE (subreg),
4350 				     SUBREG_BYTE (x),
4351 				     GET_MODE (x));
4352   return ret;
4353 
4354 }
4355 
4356 /* Return the number of registers that a subreg expression refers
4357    to.  */
4358 unsigned int
subreg_nregs(const_rtx x)4359 subreg_nregs (const_rtx x)
4360 {
4361   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4362 }
4363 
4364 /* Return the number of registers that a subreg REG with REGNO
4365    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
4366    changed so that the regno can be passed in. */
4367 
4368 unsigned int
subreg_nregs_with_regno(unsigned int regno,const_rtx x)4369 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4370 {
4371   struct subreg_info info;
4372   rtx subreg = SUBREG_REG (x);
4373 
4374   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4375 		   &info);
4376   return info.nregs;
4377 }
4378 
4379 struct parms_set_data
4380 {
4381   int nregs;
4382   HARD_REG_SET regs;
4383 };
4384 
4385 /* Helper function for noticing stores to parameter registers.  */
4386 static void
parms_set(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)4387 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4388 {
4389   struct parms_set_data *const d = (struct parms_set_data *) data;
4390   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4391       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4392     {
4393       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4394       d->nregs--;
4395     }
4396 }
4397 
4398 /* Look backward for first parameter to be loaded.
4399    Note that loads of all parameters will not necessarily be
4400    found if CSE has eliminated some of them (e.g., an argument
4401    to the outer function is passed down as a parameter).
4402    Do not skip BOUNDARY.  */
4403 rtx_insn *
find_first_parameter_load(rtx_insn * call_insn,rtx_insn * boundary)4404 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4405 {
4406   struct parms_set_data parm;
4407   rtx p;
4408   rtx_insn *before, *first_set;
4409 
4410   /* Since different machines initialize their parameter registers
4411      in different orders, assume nothing.  Collect the set of all
4412      parameter registers.  */
4413   CLEAR_HARD_REG_SET (parm.regs);
4414   parm.nregs = 0;
4415   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4416     if (GET_CODE (XEXP (p, 0)) == USE
4417 	&& REG_P (XEXP (XEXP (p, 0), 0))
4418 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4419       {
4420 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4421 
4422 	/* We only care about registers which can hold function
4423 	   arguments.  */
4424 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4425 	  continue;
4426 
4427 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4428 	parm.nregs++;
4429       }
4430   before = call_insn;
4431   first_set = call_insn;
4432 
4433   /* Search backward for the first set of a register in this set.  */
4434   while (parm.nregs && before != boundary)
4435     {
4436       before = PREV_INSN (before);
4437 
4438       /* It is possible that some loads got CSEed from one call to
4439          another.  Stop in that case.  */
4440       if (CALL_P (before))
4441 	break;
4442 
4443       /* Our caller needs either ensure that we will find all sets
4444          (in case code has not been optimized yet), or take care
4445          for possible labels in a way by setting boundary to preceding
4446          CODE_LABEL.  */
4447       if (LABEL_P (before))
4448 	{
4449 	  gcc_assert (before == boundary);
4450 	  break;
4451 	}
4452 
4453       if (INSN_P (before))
4454 	{
4455 	  int nregs_old = parm.nregs;
4456 	  note_stores (before, parms_set, &parm);
4457 	  /* If we found something that did not set a parameter reg,
4458 	     we're done.  Do not keep going, as that might result
4459 	     in hoisting an insn before the setting of a pseudo
4460 	     that is used by the hoisted insn. */
4461 	  if (nregs_old != parm.nregs)
4462 	    first_set = before;
4463 	  else
4464 	    break;
4465 	}
4466     }
4467   return first_set;
4468 }
4469 
4470 /* Return true if we should avoid inserting code between INSN and preceding
4471    call instruction.  */
4472 
4473 bool
keep_with_call_p(const rtx_insn * insn)4474 keep_with_call_p (const rtx_insn *insn)
4475 {
4476   rtx set;
4477 
4478   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4479     {
4480       if (REG_P (SET_DEST (set))
4481 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4482 	  && fixed_regs[REGNO (SET_DEST (set))]
4483 	  && general_operand (SET_SRC (set), VOIDmode))
4484 	return true;
4485       if (REG_P (SET_SRC (set))
4486 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4487 	  && REG_P (SET_DEST (set))
4488 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4489 	return true;
4490       /* There may be a stack pop just after the call and before the store
4491 	 of the return register.  Search for the actual store when deciding
4492 	 if we can break or not.  */
4493       if (SET_DEST (set) == stack_pointer_rtx)
4494 	{
4495 	  /* This CONST_CAST is okay because next_nonnote_insn just
4496 	     returns its argument and we assign it to a const_rtx
4497 	     variable.  */
4498 	  const rtx_insn *i2
4499 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4500 	  if (i2 && keep_with_call_p (i2))
4501 	    return true;
4502 	}
4503     }
4504   return false;
4505 }
4506 
4507 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4508    to non-complex jumps.  That is, direct unconditional, conditional,
4509    and tablejumps, but not computed jumps or returns.  It also does
4510    not apply to the fallthru case of a conditional jump.  */
4511 
4512 bool
label_is_jump_target_p(const_rtx label,const rtx_insn * jump_insn)4513 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4514 {
4515   rtx tmp = JUMP_LABEL (jump_insn);
4516   rtx_jump_table_data *table;
4517 
4518   if (label == tmp)
4519     return true;
4520 
4521   if (tablejump_p (jump_insn, NULL, &table))
4522     {
4523       rtvec vec = table->get_labels ();
4524       int i, veclen = GET_NUM_ELEM (vec);
4525 
4526       for (i = 0; i < veclen; ++i)
4527 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4528 	  return true;
4529     }
4530 
4531   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4532     return true;
4533 
4534   return false;
4535 }
4536 
4537 
4538 /* Return an estimate of the cost of computing rtx X.
4539    One use is in cse, to decide which expression to keep in the hash table.
4540    Another is in rtl generation, to pick the cheapest way to multiply.
4541    Other uses like the latter are expected in the future.
4542 
4543    X appears as operand OPNO in an expression with code OUTER_CODE.
4544    SPEED specifies whether costs optimized for speed or size should
4545    be returned.  */
4546 
4547 int
rtx_cost(rtx x,machine_mode mode,enum rtx_code outer_code,int opno,bool speed)4548 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4549 	  int opno, bool speed)
4550 {
4551   int i, j;
4552   enum rtx_code code;
4553   const char *fmt;
4554   int total;
4555   int factor;
4556   unsigned mode_size;
4557 
4558   if (x == 0)
4559     return 0;
4560 
4561   if (GET_CODE (x) == SET)
4562     /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4563        the mode for the factor.  */
4564     mode = GET_MODE (SET_DEST (x));
4565   else if (GET_MODE (x) != VOIDmode)
4566     mode = GET_MODE (x);
4567 
4568   mode_size = estimated_poly_value (GET_MODE_SIZE (mode));
4569 
4570   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4571      many insns, taking N times as long.  */
4572   factor = mode_size > UNITS_PER_WORD ? mode_size / UNITS_PER_WORD : 1;
4573 
4574   /* Compute the default costs of certain things.
4575      Note that targetm.rtx_costs can override the defaults.  */
4576 
4577   code = GET_CODE (x);
4578   switch (code)
4579     {
4580     case MULT:
4581       /* Multiplication has time-complexity O(N*N), where N is the
4582 	 number of units (translated from digits) when using
4583 	 schoolbook long multiplication.  */
4584       total = factor * factor * COSTS_N_INSNS (5);
4585       break;
4586     case DIV:
4587     case UDIV:
4588     case MOD:
4589     case UMOD:
4590       /* Similarly, complexity for schoolbook long division.  */
4591       total = factor * factor * COSTS_N_INSNS (7);
4592       break;
4593     case USE:
4594       /* Used in combine.c as a marker.  */
4595       total = 0;
4596       break;
4597     default:
4598       total = factor * COSTS_N_INSNS (1);
4599     }
4600 
4601   switch (code)
4602     {
4603     case REG:
4604       return 0;
4605 
4606     case SUBREG:
4607       total = 0;
4608       /* If we can't tie these modes, make this expensive.  The larger
4609 	 the mode, the more expensive it is.  */
4610       if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4611 	return COSTS_N_INSNS (2 + factor);
4612       break;
4613 
4614     case TRUNCATE:
4615       if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4616 	{
4617 	  total = 0;
4618 	  break;
4619 	}
4620       /* FALLTHRU */
4621     default:
4622       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4623 	return total;
4624       break;
4625     }
4626 
4627   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4628      which is already in total.  */
4629 
4630   fmt = GET_RTX_FORMAT (code);
4631   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4632     if (fmt[i] == 'e')
4633       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4634     else if (fmt[i] == 'E')
4635       for (j = 0; j < XVECLEN (x, i); j++)
4636 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4637 
4638   return total;
4639 }
4640 
4641 /* Fill in the structure C with information about both speed and size rtx
4642    costs for X, which is operand OPNO in an expression with code OUTER.  */
4643 
4644 void
get_full_rtx_cost(rtx x,machine_mode mode,enum rtx_code outer,int opno,struct full_rtx_costs * c)4645 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4646 		   struct full_rtx_costs *c)
4647 {
4648   c->speed = rtx_cost (x, mode, outer, opno, true);
4649   c->size = rtx_cost (x, mode, outer, opno, false);
4650 }
4651 
4652 
4653 /* Return cost of address expression X.
4654    Expect that X is properly formed address reference.
4655 
4656    SPEED parameter specify whether costs optimized for speed or size should
4657    be returned.  */
4658 
4659 int
address_cost(rtx x,machine_mode mode,addr_space_t as,bool speed)4660 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4661 {
4662   /* We may be asked for cost of various unusual addresses, such as operands
4663      of push instruction.  It is not worthwhile to complicate writing
4664      of the target hook by such cases.  */
4665 
4666   if (!memory_address_addr_space_p (mode, x, as))
4667     return 1000;
4668 
4669   return targetm.address_cost (x, mode, as, speed);
4670 }
4671 
4672 /* If the target doesn't override, compute the cost as with arithmetic.  */
4673 
4674 int
default_address_cost(rtx x,machine_mode,addr_space_t,bool speed)4675 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4676 {
4677   return rtx_cost (x, Pmode, MEM, 0, speed);
4678 }
4679 
4680 
4681 unsigned HOST_WIDE_INT
nonzero_bits(const_rtx x,machine_mode mode)4682 nonzero_bits (const_rtx x, machine_mode mode)
4683 {
4684   if (mode == VOIDmode)
4685     mode = GET_MODE (x);
4686   scalar_int_mode int_mode;
4687   if (!is_a <scalar_int_mode> (mode, &int_mode))
4688     return GET_MODE_MASK (mode);
4689   return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4690 }
4691 
4692 unsigned int
num_sign_bit_copies(const_rtx x,machine_mode mode)4693 num_sign_bit_copies (const_rtx x, machine_mode mode)
4694 {
4695   if (mode == VOIDmode)
4696     mode = GET_MODE (x);
4697   scalar_int_mode int_mode;
4698   if (!is_a <scalar_int_mode> (mode, &int_mode))
4699     return 1;
4700   return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4701 }
4702 
4703 /* Return true if nonzero_bits1 might recurse into both operands
4704    of X.  */
4705 
4706 static inline bool
nonzero_bits_binary_arith_p(const_rtx x)4707 nonzero_bits_binary_arith_p (const_rtx x)
4708 {
4709   if (!ARITHMETIC_P (x))
4710     return false;
4711   switch (GET_CODE (x))
4712     {
4713     case AND:
4714     case XOR:
4715     case IOR:
4716     case UMIN:
4717     case UMAX:
4718     case SMIN:
4719     case SMAX:
4720     case PLUS:
4721     case MINUS:
4722     case MULT:
4723     case DIV:
4724     case UDIV:
4725     case MOD:
4726     case UMOD:
4727       return true;
4728     default:
4729       return false;
4730     }
4731 }
4732 
4733 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4734    It avoids exponential behavior in nonzero_bits1 when X has
4735    identical subexpressions on the first or the second level.  */
4736 
4737 static unsigned HOST_WIDE_INT
cached_nonzero_bits(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4738 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4739 		     machine_mode known_mode,
4740 		     unsigned HOST_WIDE_INT known_ret)
4741 {
4742   if (x == known_x && mode == known_mode)
4743     return known_ret;
4744 
4745   /* Try to find identical subexpressions.  If found call
4746      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4747      precomputed value for the subexpression as KNOWN_RET.  */
4748 
4749   if (nonzero_bits_binary_arith_p (x))
4750     {
4751       rtx x0 = XEXP (x, 0);
4752       rtx x1 = XEXP (x, 1);
4753 
4754       /* Check the first level.  */
4755       if (x0 == x1)
4756 	return nonzero_bits1 (x, mode, x0, mode,
4757 			      cached_nonzero_bits (x0, mode, known_x,
4758 						   known_mode, known_ret));
4759 
4760       /* Check the second level.  */
4761       if (nonzero_bits_binary_arith_p (x0)
4762 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4763 	return nonzero_bits1 (x, mode, x1, mode,
4764 			      cached_nonzero_bits (x1, mode, known_x,
4765 						   known_mode, known_ret));
4766 
4767       if (nonzero_bits_binary_arith_p (x1)
4768 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4769 	return nonzero_bits1 (x, mode, x0, mode,
4770 			      cached_nonzero_bits (x0, mode, known_x,
4771 						   known_mode, known_ret));
4772     }
4773 
4774   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4775 }
4776 
4777 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4778    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4779    is less useful.  We can't allow both, because that results in exponential
4780    run time recursion.  There is a nullstone testcase that triggered
4781    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4782 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4783 
4784 /* Given an expression, X, compute which bits in X can be nonzero.
4785    We don't care about bits outside of those defined in MODE.
4786 
4787    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4788    an arithmetic operation, we can do better.  */
4789 
4790 static unsigned HOST_WIDE_INT
nonzero_bits1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4791 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4792 	       machine_mode known_mode,
4793 	       unsigned HOST_WIDE_INT known_ret)
4794 {
4795   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4796   unsigned HOST_WIDE_INT inner_nz;
4797   enum rtx_code code = GET_CODE (x);
4798   machine_mode inner_mode;
4799   unsigned int inner_width;
4800   scalar_int_mode xmode;
4801 
4802   unsigned int mode_width = GET_MODE_PRECISION (mode);
4803 
4804   if (CONST_INT_P (x))
4805     {
4806       if (SHORT_IMMEDIATES_SIGN_EXTEND
4807 	  && INTVAL (x) > 0
4808 	  && mode_width < BITS_PER_WORD
4809 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4810 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4811 
4812       return UINTVAL (x);
4813     }
4814 
4815   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4816     return nonzero;
4817   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4818 
4819   /* If X is wider than MODE, use its mode instead.  */
4820   if (xmode_width > mode_width)
4821     {
4822       mode = xmode;
4823       nonzero = GET_MODE_MASK (mode);
4824       mode_width = xmode_width;
4825     }
4826 
4827   if (mode_width > HOST_BITS_PER_WIDE_INT)
4828     /* Our only callers in this case look for single bit values.  So
4829        just return the mode mask.  Those tests will then be false.  */
4830     return nonzero;
4831 
4832   /* If MODE is wider than X, but both are a single word for both the host
4833      and target machines, we can compute this from which bits of the object
4834      might be nonzero in its own mode, taking into account the fact that, on
4835      CISC machines, accessing an object in a wider mode generally causes the
4836      high-order bits to become undefined, so they are not known to be zero.
4837      We extend this reasoning to RISC machines for operations that might not
4838      operate on the full registers.  */
4839   if (mode_width > xmode_width
4840       && xmode_width <= BITS_PER_WORD
4841       && xmode_width <= HOST_BITS_PER_WIDE_INT
4842       && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4843     {
4844       nonzero &= cached_nonzero_bits (x, xmode,
4845 				      known_x, known_mode, known_ret);
4846       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4847       return nonzero;
4848     }
4849 
4850   /* Please keep nonzero_bits_binary_arith_p above in sync with
4851      the code in the switch below.  */
4852   switch (code)
4853     {
4854     case REG:
4855 #if defined(POINTERS_EXTEND_UNSIGNED)
4856       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4857 	 all the bits above ptr_mode are known to be zero.  */
4858       /* As we do not know which address space the pointer is referring to,
4859 	 we can do this only if the target does not support different pointer
4860 	 or address modes depending on the address space.  */
4861       if (target_default_pointer_address_modes_p ()
4862 	  && POINTERS_EXTEND_UNSIGNED
4863 	  && xmode == Pmode
4864 	  && REG_POINTER (x)
4865 	  && !targetm.have_ptr_extend ())
4866 	nonzero &= GET_MODE_MASK (ptr_mode);
4867 #endif
4868 
4869       /* Include declared information about alignment of pointers.  */
4870       /* ??? We don't properly preserve REG_POINTER changes across
4871 	 pointer-to-integer casts, so we can't trust it except for
4872 	 things that we know must be pointers.  See execute/960116-1.c.  */
4873       if ((x == stack_pointer_rtx
4874 	   || x == frame_pointer_rtx
4875 	   || x == arg_pointer_rtx)
4876 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4877 	{
4878 	  unsigned HOST_WIDE_INT alignment
4879 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4880 
4881 #ifdef PUSH_ROUNDING
4882 	  /* If PUSH_ROUNDING is defined, it is possible for the
4883 	     stack to be momentarily aligned only to that amount,
4884 	     so we pick the least alignment.  */
4885 	  if (x == stack_pointer_rtx && targetm.calls.push_argument (0))
4886 	    {
4887 	      poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4888 	      alignment = MIN (known_alignment (rounded_1), alignment);
4889 	    }
4890 #endif
4891 
4892 	  nonzero &= ~(alignment - 1);
4893 	}
4894 
4895       {
4896 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4897 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4898 						  &nonzero_for_hook);
4899 
4900 	if (new_rtx)
4901 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4902 						   known_mode, known_ret);
4903 
4904 	return nonzero_for_hook;
4905       }
4906 
4907     case MEM:
4908       /* In many, if not most, RISC machines, reading a byte from memory
4909 	 zeros the rest of the register.  Noticing that fact saves a lot
4910 	 of extra zero-extends.  */
4911       if (load_extend_op (xmode) == ZERO_EXTEND)
4912 	nonzero &= GET_MODE_MASK (xmode);
4913       break;
4914 
4915     case EQ:  case NE:
4916     case UNEQ:  case LTGT:
4917     case GT:  case GTU:  case UNGT:
4918     case LT:  case LTU:  case UNLT:
4919     case GE:  case GEU:  case UNGE:
4920     case LE:  case LEU:  case UNLE:
4921     case UNORDERED: case ORDERED:
4922       /* If this produces an integer result, we know which bits are set.
4923 	 Code here used to clear bits outside the mode of X, but that is
4924 	 now done above.  */
4925       /* Mind that MODE is the mode the caller wants to look at this
4926 	 operation in, and not the actual operation mode.  We can wind
4927 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4928 	 that describes the results of a vector compare.  */
4929       if (GET_MODE_CLASS (xmode) == MODE_INT
4930 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4931 	nonzero = STORE_FLAG_VALUE;
4932       break;
4933 
4934     case NEG:
4935 #if 0
4936       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4937 	 and num_sign_bit_copies.  */
4938       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4939 	nonzero = 1;
4940 #endif
4941 
4942       if (xmode_width < mode_width)
4943 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4944       break;
4945 
4946     case ABS:
4947 #if 0
4948       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4949 	 and num_sign_bit_copies.  */
4950       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4951 	nonzero = 1;
4952 #endif
4953       break;
4954 
4955     case TRUNCATE:
4956       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4957 				       known_x, known_mode, known_ret)
4958 		  & GET_MODE_MASK (mode));
4959       break;
4960 
4961     case ZERO_EXTEND:
4962       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4963 				      known_x, known_mode, known_ret);
4964       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4965 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4966       break;
4967 
4968     case SIGN_EXTEND:
4969       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4970 	 Otherwise, show all the bits in the outer mode but not the inner
4971 	 may be nonzero.  */
4972       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4973 				      known_x, known_mode, known_ret);
4974       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4975 	{
4976 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4977 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4978 	    inner_nz |= (GET_MODE_MASK (mode)
4979 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4980 	}
4981 
4982       nonzero &= inner_nz;
4983       break;
4984 
4985     case AND:
4986       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4987 				       known_x, known_mode, known_ret)
4988       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4989 					known_x, known_mode, known_ret);
4990       break;
4991 
4992     case XOR:   case IOR:
4993     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4994       {
4995 	unsigned HOST_WIDE_INT nonzero0
4996 	   = cached_nonzero_bits (XEXP (x, 0), mode,
4997 				  known_x, known_mode, known_ret);
4998 
4999 	/* Don't call nonzero_bits for the second time if it cannot change
5000 	   anything.  */
5001 	if ((nonzero & nonzero0) != nonzero)
5002 	  nonzero &= nonzero0
5003       		     | cached_nonzero_bits (XEXP (x, 1), mode,
5004 					    known_x, known_mode, known_ret);
5005       }
5006       break;
5007 
5008     case PLUS:  case MINUS:
5009     case MULT:
5010     case DIV:   case UDIV:
5011     case MOD:   case UMOD:
5012       /* We can apply the rules of arithmetic to compute the number of
5013 	 high- and low-order zero bits of these operations.  We start by
5014 	 computing the width (position of the highest-order nonzero bit)
5015 	 and the number of low-order zero bits for each value.  */
5016       {
5017 	unsigned HOST_WIDE_INT nz0
5018 	  = cached_nonzero_bits (XEXP (x, 0), mode,
5019 				 known_x, known_mode, known_ret);
5020 	unsigned HOST_WIDE_INT nz1
5021 	  = cached_nonzero_bits (XEXP (x, 1), mode,
5022 				 known_x, known_mode, known_ret);
5023 	int sign_index = xmode_width - 1;
5024 	int width0 = floor_log2 (nz0) + 1;
5025 	int width1 = floor_log2 (nz1) + 1;
5026 	int low0 = ctz_or_zero (nz0);
5027 	int low1 = ctz_or_zero (nz1);
5028 	unsigned HOST_WIDE_INT op0_maybe_minusp
5029 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
5030 	unsigned HOST_WIDE_INT op1_maybe_minusp
5031 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
5032 	unsigned int result_width = mode_width;
5033 	int result_low = 0;
5034 
5035 	switch (code)
5036 	  {
5037 	  case PLUS:
5038 	    result_width = MAX (width0, width1) + 1;
5039 	    result_low = MIN (low0, low1);
5040 	    break;
5041 	  case MINUS:
5042 	    result_low = MIN (low0, low1);
5043 	    break;
5044 	  case MULT:
5045 	    result_width = width0 + width1;
5046 	    result_low = low0 + low1;
5047 	    break;
5048 	  case DIV:
5049 	    if (width1 == 0)
5050 	      break;
5051 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
5052 	      result_width = width0;
5053 	    break;
5054 	  case UDIV:
5055 	    if (width1 == 0)
5056 	      break;
5057 	    result_width = width0;
5058 	    break;
5059 	  case MOD:
5060 	    if (width1 == 0)
5061 	      break;
5062 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
5063 	      result_width = MIN (width0, width1);
5064 	    result_low = MIN (low0, low1);
5065 	    break;
5066 	  case UMOD:
5067 	    if (width1 == 0)
5068 	      break;
5069 	    result_width = MIN (width0, width1);
5070 	    result_low = MIN (low0, low1);
5071 	    break;
5072 	  default:
5073 	    gcc_unreachable ();
5074 	  }
5075 
5076 	/* Note that mode_width <= HOST_BITS_PER_WIDE_INT, see above.  */
5077 	if (result_width < mode_width)
5078 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
5079 
5080 	if (result_low > 0)
5081 	  {
5082 	    if (result_low < HOST_BITS_PER_WIDE_INT)
5083 	      nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
5084 	    else
5085 	      nonzero = 0;
5086 	  }
5087       }
5088       break;
5089 
5090     case ZERO_EXTRACT:
5091       if (CONST_INT_P (XEXP (x, 1))
5092 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
5093 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
5094       break;
5095 
5096     case SUBREG:
5097       /* If this is a SUBREG formed for a promoted variable that has
5098 	 been zero-extended, we know that at least the high-order bits
5099 	 are zero, though others might be too.  */
5100       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
5101 	nonzero = GET_MODE_MASK (xmode)
5102 		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
5103 					 known_x, known_mode, known_ret);
5104 
5105       /* If the inner mode is a single word for both the host and target
5106 	 machines, we can compute this from which bits of the inner
5107 	 object might be nonzero.  */
5108       inner_mode = GET_MODE (SUBREG_REG (x));
5109       if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
5110 	  && inner_width <= BITS_PER_WORD
5111 	  && inner_width <= HOST_BITS_PER_WIDE_INT)
5112 	{
5113 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
5114 					  known_x, known_mode, known_ret);
5115 
5116           /* On a typical CISC machine, accessing an object in a wider mode
5117 	     causes the high-order bits to become undefined.  So they are
5118 	     not known to be zero.
5119 
5120 	     On a typical RISC machine, we only have to worry about the way
5121 	     loads are extended.  Otherwise, if we get a reload for the inner
5122 	     part, it may be loaded from the stack, and then we may lose all
5123 	     the zero bits that existed before the store to the stack.  */
5124 	  rtx_code extend_op;
5125 	  if ((!WORD_REGISTER_OPERATIONS
5126 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
5127 		   ? val_signbit_known_set_p (inner_mode, nonzero)
5128 		   : extend_op != ZERO_EXTEND)
5129 	       || !MEM_P (SUBREG_REG (x)))
5130 	      && xmode_width > inner_width)
5131 	    nonzero
5132 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
5133 	}
5134       break;
5135 
5136     case ASHIFT:
5137     case ASHIFTRT:
5138     case LSHIFTRT:
5139     case ROTATE:
5140     case ROTATERT:
5141       /* The nonzero bits are in two classes: any bits within MODE
5142 	 that aren't in xmode are always significant.  The rest of the
5143 	 nonzero bits are those that are significant in the operand of
5144 	 the shift when shifted the appropriate number of bits.  This
5145 	 shows that high-order bits are cleared by the right shift and
5146 	 low-order bits by left shifts.  */
5147       if (CONST_INT_P (XEXP (x, 1))
5148 	  && INTVAL (XEXP (x, 1)) >= 0
5149 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
5150 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5151 	{
5152 	  int count = INTVAL (XEXP (x, 1));
5153 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
5154 	  unsigned HOST_WIDE_INT op_nonzero
5155 	    = cached_nonzero_bits (XEXP (x, 0), mode,
5156 				   known_x, known_mode, known_ret);
5157 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
5158 	  unsigned HOST_WIDE_INT outer = 0;
5159 
5160 	  if (mode_width > xmode_width)
5161 	    outer = (op_nonzero & nonzero & ~mode_mask);
5162 
5163 	  switch (code)
5164 	    {
5165 	    case ASHIFT:
5166 	      inner <<= count;
5167 	      break;
5168 
5169 	    case LSHIFTRT:
5170 	      inner >>= count;
5171 	      break;
5172 
5173 	    case ASHIFTRT:
5174 	      inner >>= count;
5175 
5176 	      /* If the sign bit may have been nonzero before the shift, we
5177 		 need to mark all the places it could have been copied to
5178 		 by the shift as possibly nonzero.  */
5179 	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
5180 		inner |= (((HOST_WIDE_INT_1U << count) - 1)
5181 			  << (xmode_width - count));
5182 	      break;
5183 
5184 	    case ROTATE:
5185 	      inner = (inner << (count % xmode_width)
5186 		       | (inner >> (xmode_width - (count % xmode_width))))
5187 		      & mode_mask;
5188 	      break;
5189 
5190 	    case ROTATERT:
5191 	      inner = (inner >> (count % xmode_width)
5192 		       | (inner << (xmode_width - (count % xmode_width))))
5193 		      & mode_mask;
5194 	      break;
5195 
5196 	    default:
5197 	      gcc_unreachable ();
5198 	    }
5199 
5200 	  nonzero &= (outer | inner);
5201 	}
5202       break;
5203 
5204     case FFS:
5205     case POPCOUNT:
5206       /* This is at most the number of bits in the mode.  */
5207       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
5208       break;
5209 
5210     case CLZ:
5211       /* If CLZ has a known value at zero, then the nonzero bits are
5212 	 that value, plus the number of bits in the mode minus one.  */
5213       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
5214 	nonzero
5215 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
5216       else
5217 	nonzero = -1;
5218       break;
5219 
5220     case CTZ:
5221       /* If CTZ has a known value at zero, then the nonzero bits are
5222 	 that value, plus the number of bits in the mode minus one.  */
5223       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
5224 	nonzero
5225 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
5226       else
5227 	nonzero = -1;
5228       break;
5229 
5230     case CLRSB:
5231       /* This is at most the number of bits in the mode minus 1.  */
5232       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
5233       break;
5234 
5235     case PARITY:
5236       nonzero = 1;
5237       break;
5238 
5239     case IF_THEN_ELSE:
5240       {
5241 	unsigned HOST_WIDE_INT nonzero_true
5242 	  = cached_nonzero_bits (XEXP (x, 1), mode,
5243 				 known_x, known_mode, known_ret);
5244 
5245 	/* Don't call nonzero_bits for the second time if it cannot change
5246 	   anything.  */
5247 	if ((nonzero & nonzero_true) != nonzero)
5248 	  nonzero &= nonzero_true
5249       		     | cached_nonzero_bits (XEXP (x, 2), mode,
5250 					    known_x, known_mode, known_ret);
5251       }
5252       break;
5253 
5254     default:
5255       break;
5256     }
5257 
5258   return nonzero;
5259 }
5260 
5261 /* See the macro definition above.  */
5262 #undef cached_num_sign_bit_copies
5263 
5264 
5265 /* Return true if num_sign_bit_copies1 might recurse into both operands
5266    of X.  */
5267 
5268 static inline bool
num_sign_bit_copies_binary_arith_p(const_rtx x)5269 num_sign_bit_copies_binary_arith_p (const_rtx x)
5270 {
5271   if (!ARITHMETIC_P (x))
5272     return false;
5273   switch (GET_CODE (x))
5274     {
5275     case IOR:
5276     case AND:
5277     case XOR:
5278     case SMIN:
5279     case SMAX:
5280     case UMIN:
5281     case UMAX:
5282     case PLUS:
5283     case MINUS:
5284     case MULT:
5285       return true;
5286     default:
5287       return false;
5288     }
5289 }
5290 
5291 /* The function cached_num_sign_bit_copies is a wrapper around
5292    num_sign_bit_copies1.  It avoids exponential behavior in
5293    num_sign_bit_copies1 when X has identical subexpressions on the
5294    first or the second level.  */
5295 
5296 static unsigned int
cached_num_sign_bit_copies(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)5297 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
5298 			    const_rtx known_x, machine_mode known_mode,
5299 			    unsigned int known_ret)
5300 {
5301   if (x == known_x && mode == known_mode)
5302     return known_ret;
5303 
5304   /* Try to find identical subexpressions.  If found call
5305      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5306      the precomputed value for the subexpression as KNOWN_RET.  */
5307 
5308   if (num_sign_bit_copies_binary_arith_p (x))
5309     {
5310       rtx x0 = XEXP (x, 0);
5311       rtx x1 = XEXP (x, 1);
5312 
5313       /* Check the first level.  */
5314       if (x0 == x1)
5315 	return
5316 	  num_sign_bit_copies1 (x, mode, x0, mode,
5317 				cached_num_sign_bit_copies (x0, mode, known_x,
5318 							    known_mode,
5319 							    known_ret));
5320 
5321       /* Check the second level.  */
5322       if (num_sign_bit_copies_binary_arith_p (x0)
5323 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
5324 	return
5325 	  num_sign_bit_copies1 (x, mode, x1, mode,
5326 				cached_num_sign_bit_copies (x1, mode, known_x,
5327 							    known_mode,
5328 							    known_ret));
5329 
5330       if (num_sign_bit_copies_binary_arith_p (x1)
5331 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
5332 	return
5333 	  num_sign_bit_copies1 (x, mode, x0, mode,
5334 				cached_num_sign_bit_copies (x0, mode, known_x,
5335 							    known_mode,
5336 							    known_ret));
5337     }
5338 
5339   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5340 }
5341 
5342 /* Return the number of bits at the high-order end of X that are known to
5343    be equal to the sign bit.  X will be used in mode MODE.  The returned
5344    value will always be between 1 and the number of bits in MODE.  */
5345 
5346 static unsigned int
num_sign_bit_copies1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)5347 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5348 		      machine_mode known_mode,
5349 		      unsigned int known_ret)
5350 {
5351   enum rtx_code code = GET_CODE (x);
5352   unsigned int bitwidth = GET_MODE_PRECISION (mode);
5353   int num0, num1, result;
5354   unsigned HOST_WIDE_INT nonzero;
5355 
5356   if (CONST_INT_P (x))
5357     {
5358       /* If the constant is negative, take its 1's complement and remask.
5359 	 Then see how many zero bits we have.  */
5360       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5361       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5362 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5363 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5364 
5365       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5366     }
5367 
5368   scalar_int_mode xmode, inner_mode;
5369   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5370     return 1;
5371 
5372   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5373 
5374   /* For a smaller mode, just ignore the high bits.  */
5375   if (bitwidth < xmode_width)
5376     {
5377       num0 = cached_num_sign_bit_copies (x, xmode,
5378 					 known_x, known_mode, known_ret);
5379       return MAX (1, num0 - (int) (xmode_width - bitwidth));
5380     }
5381 
5382   if (bitwidth > xmode_width)
5383     {
5384       /* If this machine does not do all register operations on the entire
5385 	 register and MODE is wider than the mode of X, we can say nothing
5386 	 at all about the high-order bits.  We extend this reasoning to RISC
5387 	 machines for operations that might not operate on full registers.  */
5388       if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5389 	return 1;
5390 
5391       /* Likewise on machines that do, if the mode of the object is smaller
5392 	 than a word and loads of that size don't sign extend, we can say
5393 	 nothing about the high order bits.  */
5394       if (xmode_width < BITS_PER_WORD
5395 	  && load_extend_op (xmode) != SIGN_EXTEND)
5396 	return 1;
5397     }
5398 
5399   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5400      the code in the switch below.  */
5401   switch (code)
5402     {
5403     case REG:
5404 
5405 #if defined(POINTERS_EXTEND_UNSIGNED)
5406       /* If pointers extend signed and this is a pointer in Pmode, say that
5407 	 all the bits above ptr_mode are known to be sign bit copies.  */
5408       /* As we do not know which address space the pointer is referring to,
5409 	 we can do this only if the target does not support different pointer
5410 	 or address modes depending on the address space.  */
5411       if (target_default_pointer_address_modes_p ()
5412 	  && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5413 	  && mode == Pmode && REG_POINTER (x)
5414 	  && !targetm.have_ptr_extend ())
5415 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5416 #endif
5417 
5418       {
5419 	unsigned int copies_for_hook = 1, copies = 1;
5420 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5421 							 &copies_for_hook);
5422 
5423 	if (new_rtx)
5424 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5425 					       known_mode, known_ret);
5426 
5427 	if (copies > 1 || copies_for_hook > 1)
5428 	  return MAX (copies, copies_for_hook);
5429 
5430 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
5431       }
5432       break;
5433 
5434     case MEM:
5435       /* Some RISC machines sign-extend all loads of smaller than a word.  */
5436       if (load_extend_op (xmode) == SIGN_EXTEND)
5437 	return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5438       break;
5439 
5440     case SUBREG:
5441       /* If this is a SUBREG for a promoted object that is sign-extended
5442 	 and we are looking at it in a wider mode, we know that at least the
5443 	 high-order bits are known to be sign bit copies.  */
5444 
5445       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5446 	{
5447 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5448 					     known_x, known_mode, known_ret);
5449 	  return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5450 	}
5451 
5452       if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5453 	{
5454 	  /* For a smaller object, just ignore the high bits.  */
5455 	  if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5456 	    {
5457 	      num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5458 						 known_x, known_mode,
5459 						 known_ret);
5460 	      return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5461 					   - bitwidth));
5462 	    }
5463 
5464 	  /* For paradoxical SUBREGs on machines where all register operations
5465 	     affect the entire register, just look inside.  Note that we are
5466 	     passing MODE to the recursive call, so the number of sign bit
5467 	     copies will remain relative to that mode, not the inner mode.
5468 
5469 	     This works only if loads sign extend.  Otherwise, if we get a
5470 	     reload for the inner part, it may be loaded from the stack, and
5471 	     then we lose all sign bit copies that existed before the store
5472 	     to the stack.  */
5473 	  if (WORD_REGISTER_OPERATIONS
5474 	      && load_extend_op (inner_mode) == SIGN_EXTEND
5475 	      && paradoxical_subreg_p (x)
5476 	      && MEM_P (SUBREG_REG (x)))
5477 	    return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5478 					       known_x, known_mode, known_ret);
5479 	}
5480       break;
5481 
5482     case SIGN_EXTRACT:
5483       if (CONST_INT_P (XEXP (x, 1)))
5484 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5485       break;
5486 
5487     case SIGN_EXTEND:
5488       if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5489 	return (bitwidth - GET_MODE_PRECISION (inner_mode)
5490 		+ cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5491 					      known_x, known_mode, known_ret));
5492       break;
5493 
5494     case TRUNCATE:
5495       /* For a smaller object, just ignore the high bits.  */
5496       inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5497       num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5498 					 known_x, known_mode, known_ret);
5499       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5500 				    - bitwidth)));
5501 
5502     case NOT:
5503       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5504 					 known_x, known_mode, known_ret);
5505 
5506     case ROTATE:       case ROTATERT:
5507       /* If we are rotating left by a number of bits less than the number
5508 	 of sign bit copies, we can just subtract that amount from the
5509 	 number.  */
5510       if (CONST_INT_P (XEXP (x, 1))
5511 	  && INTVAL (XEXP (x, 1)) >= 0
5512 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5513 	{
5514 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5515 					     known_x, known_mode, known_ret);
5516 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5517 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5518 	}
5519       break;
5520 
5521     case NEG:
5522       /* In general, this subtracts one sign bit copy.  But if the value
5523 	 is known to be positive, the number of sign bit copies is the
5524 	 same as that of the input.  Finally, if the input has just one bit
5525 	 that might be nonzero, all the bits are copies of the sign bit.  */
5526       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5527 					 known_x, known_mode, known_ret);
5528       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5529 	return num0 > 1 ? num0 - 1 : 1;
5530 
5531       nonzero = nonzero_bits (XEXP (x, 0), mode);
5532       if (nonzero == 1)
5533 	return bitwidth;
5534 
5535       if (num0 > 1
5536 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5537 	num0--;
5538 
5539       return num0;
5540 
5541     case IOR:   case AND:   case XOR:
5542     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5543       /* Logical operations will preserve the number of sign-bit copies.
5544 	 MIN and MAX operations always return one of the operands.  */
5545       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5546 					 known_x, known_mode, known_ret);
5547       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5548 					 known_x, known_mode, known_ret);
5549 
5550       /* If num1 is clearing some of the top bits then regardless of
5551 	 the other term, we are guaranteed to have at least that many
5552 	 high-order zero bits.  */
5553       if (code == AND
5554 	  && num1 > 1
5555 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5556 	  && CONST_INT_P (XEXP (x, 1))
5557 	  && (UINTVAL (XEXP (x, 1))
5558 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5559 	return num1;
5560 
5561       /* Similarly for IOR when setting high-order bits.  */
5562       if (code == IOR
5563 	  && num1 > 1
5564 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5565 	  && CONST_INT_P (XEXP (x, 1))
5566 	  && (UINTVAL (XEXP (x, 1))
5567 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5568 	return num1;
5569 
5570       return MIN (num0, num1);
5571 
5572     case PLUS:  case MINUS:
5573       /* For addition and subtraction, we can have a 1-bit carry.  However,
5574 	 if we are subtracting 1 from a positive number, there will not
5575 	 be such a carry.  Furthermore, if the positive number is known to
5576 	 be 0 or 1, we know the result is either -1 or 0.  */
5577 
5578       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5579 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5580 	{
5581 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5582 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5583 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5584 		    : bitwidth - floor_log2 (nonzero) - 1);
5585 	}
5586 
5587       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5588 					 known_x, known_mode, known_ret);
5589       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5590 					 known_x, known_mode, known_ret);
5591       result = MAX (1, MIN (num0, num1) - 1);
5592 
5593       return result;
5594 
5595     case MULT:
5596       /* The number of bits of the product is the sum of the number of
5597 	 bits of both terms.  However, unless one of the terms if known
5598 	 to be positive, we must allow for an additional bit since negating
5599 	 a negative number can remove one sign bit copy.  */
5600 
5601       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5602 					 known_x, known_mode, known_ret);
5603       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5604 					 known_x, known_mode, known_ret);
5605 
5606       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5607       if (result > 0
5608 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5609 	      || (((nonzero_bits (XEXP (x, 0), mode)
5610 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5611 		  && ((nonzero_bits (XEXP (x, 1), mode)
5612 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5613 		      != 0))))
5614 	result--;
5615 
5616       return MAX (1, result);
5617 
5618     case UDIV:
5619       /* The result must be <= the first operand.  If the first operand
5620 	 has the high bit set, we know nothing about the number of sign
5621 	 bit copies.  */
5622       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5623 	return 1;
5624       else if ((nonzero_bits (XEXP (x, 0), mode)
5625 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5626 	return 1;
5627       else
5628 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5629 					   known_x, known_mode, known_ret);
5630 
5631     case UMOD:
5632       /* The result must be <= the second operand.  If the second operand
5633 	 has (or just might have) the high bit set, we know nothing about
5634 	 the number of sign bit copies.  */
5635       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5636 	return 1;
5637       else if ((nonzero_bits (XEXP (x, 1), mode)
5638 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5639 	return 1;
5640       else
5641 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5642 					   known_x, known_mode, known_ret);
5643 
5644     case DIV:
5645       /* Similar to unsigned division, except that we have to worry about
5646 	 the case where the divisor is negative, in which case we have
5647 	 to add 1.  */
5648       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5649 					   known_x, known_mode, known_ret);
5650       if (result > 1
5651 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5652 	      || (nonzero_bits (XEXP (x, 1), mode)
5653 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5654 	result--;
5655 
5656       return result;
5657 
5658     case MOD:
5659       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5660 					   known_x, known_mode, known_ret);
5661       if (result > 1
5662 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5663 	      || (nonzero_bits (XEXP (x, 1), mode)
5664 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5665 	result--;
5666 
5667       return result;
5668 
5669     case ASHIFTRT:
5670       /* Shifts by a constant add to the number of bits equal to the
5671 	 sign bit.  */
5672       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5673 					 known_x, known_mode, known_ret);
5674       if (CONST_INT_P (XEXP (x, 1))
5675 	  && INTVAL (XEXP (x, 1)) > 0
5676 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5677 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5678 
5679       return num0;
5680 
5681     case ASHIFT:
5682       /* Left shifts destroy copies.  */
5683       if (!CONST_INT_P (XEXP (x, 1))
5684 	  || INTVAL (XEXP (x, 1)) < 0
5685 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5686 	  || INTVAL (XEXP (x, 1)) >= xmode_width)
5687 	return 1;
5688 
5689       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5690 					 known_x, known_mode, known_ret);
5691       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5692 
5693     case IF_THEN_ELSE:
5694       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5695 					 known_x, known_mode, known_ret);
5696       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5697 					 known_x, known_mode, known_ret);
5698       return MIN (num0, num1);
5699 
5700     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5701     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5702     case GEU: case GTU: case LEU: case LTU:
5703     case UNORDERED: case ORDERED:
5704       /* If the constant is negative, take its 1's complement and remask.
5705 	 Then see how many zero bits we have.  */
5706       nonzero = STORE_FLAG_VALUE;
5707       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5708 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5709 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5710 
5711       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5712 
5713     default:
5714       break;
5715     }
5716 
5717   /* If we haven't been able to figure it out by one of the above rules,
5718      see if some of the high-order bits are known to be zero.  If so,
5719      count those bits and return one less than that amount.  If we can't
5720      safely compute the mask for this mode, always return BITWIDTH.  */
5721 
5722   bitwidth = GET_MODE_PRECISION (mode);
5723   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5724     return 1;
5725 
5726   nonzero = nonzero_bits (x, mode);
5727   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5728 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5729 }
5730 
5731 /* Calculate the rtx_cost of a single instruction pattern.  A return value of
5732    zero indicates an instruction pattern without a known cost.  */
5733 
5734 int
pattern_cost(rtx pat,bool speed)5735 pattern_cost (rtx pat, bool speed)
5736 {
5737   int i, cost;
5738   rtx set;
5739 
5740   /* Extract the single set rtx from the instruction pattern.  We
5741      can't use single_set since we only have the pattern.  We also
5742      consider PARALLELs of a normal set and a single comparison.  In
5743      that case we use the cost of the non-comparison SET operation,
5744      which is most-likely to be the real cost of this operation.  */
5745   if (GET_CODE (pat) == SET)
5746     set = pat;
5747   else if (GET_CODE (pat) == PARALLEL)
5748     {
5749       set = NULL_RTX;
5750       rtx comparison = NULL_RTX;
5751 
5752       for (i = 0; i < XVECLEN (pat, 0); i++)
5753 	{
5754 	  rtx x = XVECEXP (pat, 0, i);
5755 	  if (GET_CODE (x) == SET)
5756 	    {
5757 	      if (GET_CODE (SET_SRC (x)) == COMPARE)
5758 		{
5759 		  if (comparison)
5760 		    return 0;
5761 		  comparison = x;
5762 		}
5763 	      else
5764 		{
5765 		  if (set)
5766 		    return 0;
5767 		  set = x;
5768 		}
5769 	    }
5770 	}
5771 
5772       if (!set && comparison)
5773 	set = comparison;
5774 
5775       if (!set)
5776 	return 0;
5777     }
5778   else
5779     return 0;
5780 
5781   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5782   return cost > 0 ? cost : COSTS_N_INSNS (1);
5783 }
5784 
5785 /* Calculate the cost of a single instruction.  A return value of zero
5786    indicates an instruction pattern without a known cost.  */
5787 
5788 int
insn_cost(rtx_insn * insn,bool speed)5789 insn_cost (rtx_insn *insn, bool speed)
5790 {
5791   if (targetm.insn_cost)
5792     return targetm.insn_cost (insn, speed);
5793 
5794   return pattern_cost (PATTERN (insn), speed);
5795 }
5796 
5797 /* Returns estimate on cost of computing SEQ.  */
5798 
5799 unsigned
seq_cost(const rtx_insn * seq,bool speed)5800 seq_cost (const rtx_insn *seq, bool speed)
5801 {
5802   unsigned cost = 0;
5803   rtx set;
5804 
5805   for (; seq; seq = NEXT_INSN (seq))
5806     {
5807       set = single_set (seq);
5808       if (set)
5809         cost += set_rtx_cost (set, speed);
5810       else if (NONDEBUG_INSN_P (seq))
5811 	{
5812 	  int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5813 	  if (this_cost > 0)
5814 	    cost += this_cost;
5815 	  else
5816 	    cost++;
5817 	}
5818     }
5819 
5820   return cost;
5821 }
5822 
5823 /* Given an insn INSN and condition COND, return the condition in a
5824    canonical form to simplify testing by callers.  Specifically:
5825 
5826    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5827    (2) Both operands will be machine operands.
5828    (3) If an operand is a constant, it will be the second operand.
5829    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5830        for GE, GEU, and LEU.
5831 
5832    If the condition cannot be understood, or is an inequality floating-point
5833    comparison which needs to be reversed, 0 will be returned.
5834 
5835    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5836 
5837    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5838    insn used in locating the condition was found.  If a replacement test
5839    of the condition is desired, it should be placed in front of that
5840    insn and we will be sure that the inputs are still valid.
5841 
5842    If WANT_REG is nonzero, we wish the condition to be relative to that
5843    register, if possible.  Therefore, do not canonicalize the condition
5844    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5845    to be a compare to a CC mode register.
5846 
5847    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5848    and at INSN.  */
5849 
5850 rtx
canonicalize_condition(rtx_insn * insn,rtx cond,int reverse,rtx_insn ** earliest,rtx want_reg,int allow_cc_mode,int valid_at_insn_p)5851 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5852 			rtx_insn **earliest,
5853 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5854 {
5855   enum rtx_code code;
5856   rtx_insn *prev = insn;
5857   const_rtx set;
5858   rtx tem;
5859   rtx op0, op1;
5860   int reverse_code = 0;
5861   machine_mode mode;
5862   basic_block bb = BLOCK_FOR_INSN (insn);
5863 
5864   code = GET_CODE (cond);
5865   mode = GET_MODE (cond);
5866   op0 = XEXP (cond, 0);
5867   op1 = XEXP (cond, 1);
5868 
5869   if (reverse)
5870     code = reversed_comparison_code (cond, insn);
5871   if (code == UNKNOWN)
5872     return 0;
5873 
5874   if (earliest)
5875     *earliest = insn;
5876 
5877   /* If we are comparing a register with zero, see if the register is set
5878      in the previous insn to a COMPARE or a comparison operation.  Perform
5879      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5880      in cse.c  */
5881 
5882   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5883 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5884 	 && op1 == CONST0_RTX (GET_MODE (op0))
5885 	 && op0 != want_reg)
5886     {
5887       /* Set nonzero when we find something of interest.  */
5888       rtx x = 0;
5889 
5890       /* If this is a COMPARE, pick up the two things being compared.  */
5891       if (GET_CODE (op0) == COMPARE)
5892 	{
5893 	  op1 = XEXP (op0, 1);
5894 	  op0 = XEXP (op0, 0);
5895 	  continue;
5896 	}
5897       else if (!REG_P (op0))
5898 	break;
5899 
5900       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5901 	 stop if it isn't a single set or if it has a REG_INC note because
5902 	 we don't want to bother dealing with it.  */
5903 
5904       prev = prev_nonnote_nondebug_insn (prev);
5905 
5906       if (prev == 0
5907 	  || !NONJUMP_INSN_P (prev)
5908 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5909 	  /* In cfglayout mode, there do not have to be labels at the
5910 	     beginning of a block, or jumps at the end, so the previous
5911 	     conditions would not stop us when we reach bb boundary.  */
5912 	  || BLOCK_FOR_INSN (prev) != bb)
5913 	break;
5914 
5915       set = set_of (op0, prev);
5916 
5917       if (set
5918 	  && (GET_CODE (set) != SET
5919 	      || !rtx_equal_p (SET_DEST (set), op0)))
5920 	break;
5921 
5922       /* If this is setting OP0, get what it sets it to if it looks
5923 	 relevant.  */
5924       if (set)
5925 	{
5926 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5927 #ifdef FLOAT_STORE_FLAG_VALUE
5928 	  REAL_VALUE_TYPE fsfv;
5929 #endif
5930 
5931 	  /* ??? We may not combine comparisons done in a CCmode with
5932 	     comparisons not done in a CCmode.  This is to aid targets
5933 	     like Alpha that have an IEEE compliant EQ instruction, and
5934 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5935 	     actually artificial, simply to prevent the combination, but
5936 	     should not affect other platforms.
5937 
5938 	     However, we must allow VOIDmode comparisons to match either
5939 	     CCmode or non-CCmode comparison, because some ports have
5940 	     modeless comparisons inside branch patterns.
5941 
5942 	     ??? This mode check should perhaps look more like the mode check
5943 	     in simplify_comparison in combine.  */
5944 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5945 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5946 	      && mode != VOIDmode
5947 	      && inner_mode != VOIDmode)
5948 	    break;
5949 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5950 	      || (((code == NE
5951 		    || (code == LT
5952 			&& val_signbit_known_set_p (inner_mode,
5953 						    STORE_FLAG_VALUE))
5954 #ifdef FLOAT_STORE_FLAG_VALUE
5955 		    || (code == LT
5956 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5957 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5958 			    REAL_VALUE_NEGATIVE (fsfv)))
5959 #endif
5960 		    ))
5961 		  && COMPARISON_P (SET_SRC (set))))
5962 	    x = SET_SRC (set);
5963 	  else if (((code == EQ
5964 		     || (code == GE
5965 			 && val_signbit_known_set_p (inner_mode,
5966 						     STORE_FLAG_VALUE))
5967 #ifdef FLOAT_STORE_FLAG_VALUE
5968 		     || (code == GE
5969 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5970 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5971 			     REAL_VALUE_NEGATIVE (fsfv)))
5972 #endif
5973 		     ))
5974 		   && COMPARISON_P (SET_SRC (set)))
5975 	    {
5976 	      reverse_code = 1;
5977 	      x = SET_SRC (set);
5978 	    }
5979 	  else if ((code == EQ || code == NE)
5980 		   && GET_CODE (SET_SRC (set)) == XOR)
5981 	    /* Handle sequences like:
5982 
5983 	       (set op0 (xor X Y))
5984 	       ...(eq|ne op0 (const_int 0))...
5985 
5986 	       in which case:
5987 
5988 	       (eq op0 (const_int 0)) reduces to (eq X Y)
5989 	       (ne op0 (const_int 0)) reduces to (ne X Y)
5990 
5991 	       This is the form used by MIPS16, for example.  */
5992 	    x = SET_SRC (set);
5993 	  else
5994 	    break;
5995 	}
5996 
5997       else if (reg_set_p (op0, prev))
5998 	/* If this sets OP0, but not directly, we have to give up.  */
5999 	break;
6000 
6001       if (x)
6002 	{
6003 	  /* If the caller is expecting the condition to be valid at INSN,
6004 	     make sure X doesn't change before INSN.  */
6005 	  if (valid_at_insn_p)
6006 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
6007 	      break;
6008 	  if (COMPARISON_P (x))
6009 	    code = GET_CODE (x);
6010 	  if (reverse_code)
6011 	    {
6012 	      code = reversed_comparison_code (x, prev);
6013 	      if (code == UNKNOWN)
6014 		return 0;
6015 	      reverse_code = 0;
6016 	    }
6017 
6018 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6019 	  if (earliest)
6020 	    *earliest = prev;
6021 	}
6022     }
6023 
6024   /* If constant is first, put it last.  */
6025   if (CONSTANT_P (op0))
6026     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6027 
6028   /* If OP0 is the result of a comparison, we weren't able to find what
6029      was really being compared, so fail.  */
6030   if (!allow_cc_mode
6031       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6032     return 0;
6033 
6034   /* Canonicalize any ordered comparison with integers involving equality
6035      if we can do computations in the relevant mode and we do not
6036      overflow.  */
6037 
6038   scalar_int_mode op0_mode;
6039   if (CONST_INT_P (op1)
6040       && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
6041       && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
6042     {
6043       HOST_WIDE_INT const_val = INTVAL (op1);
6044       unsigned HOST_WIDE_INT uconst_val = const_val;
6045       unsigned HOST_WIDE_INT max_val
6046 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
6047 
6048       switch (code)
6049 	{
6050 	case LE:
6051 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
6052 	    code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
6053 	  break;
6054 
6055 	/* When cross-compiling, const_val might be sign-extended from
6056 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
6057 	case GE:
6058 	  if ((const_val & max_val)
6059 	      != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
6060 	    code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
6061 	  break;
6062 
6063 	case LEU:
6064 	  if (uconst_val < max_val)
6065 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
6066 	  break;
6067 
6068 	case GEU:
6069 	  if (uconst_val != 0)
6070 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
6071 	  break;
6072 
6073 	default:
6074 	  break;
6075 	}
6076     }
6077 
6078   /* We promised to return a comparison.  */
6079   rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
6080   if (COMPARISON_P (ret))
6081     return ret;
6082   return 0;
6083 }
6084 
6085 /* Given a jump insn JUMP, return the condition that will cause it to branch
6086    to its JUMP_LABEL.  If the condition cannot be understood, or is an
6087    inequality floating-point comparison which needs to be reversed, 0 will
6088    be returned.
6089 
6090    If EARLIEST is nonzero, it is a pointer to a place where the earliest
6091    insn used in locating the condition was found.  If a replacement test
6092    of the condition is desired, it should be placed in front of that
6093    insn and we will be sure that the inputs are still valid.  If EARLIEST
6094    is null, the returned condition will be valid at INSN.
6095 
6096    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
6097    compare CC mode register.
6098 
6099    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
6100 
6101 rtx
get_condition(rtx_insn * jump,rtx_insn ** earliest,int allow_cc_mode,int valid_at_insn_p)6102 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
6103 	       int valid_at_insn_p)
6104 {
6105   rtx cond;
6106   int reverse;
6107   rtx set;
6108 
6109   /* If this is not a standard conditional jump, we can't parse it.  */
6110   if (!JUMP_P (jump)
6111       || ! any_condjump_p (jump))
6112     return 0;
6113   set = pc_set (jump);
6114 
6115   cond = XEXP (SET_SRC (set), 0);
6116 
6117   /* If this branches to JUMP_LABEL when the condition is false, reverse
6118      the condition.  */
6119   reverse
6120     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
6121       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
6122 
6123   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
6124 				 allow_cc_mode, valid_at_insn_p);
6125 }
6126 
6127 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
6128    TARGET_MODE_REP_EXTENDED.
6129 
6130    Note that we assume that the property of
6131    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
6132    narrower than mode B.  I.e., if A is a mode narrower than B then in
6133    order to be able to operate on it in mode B, mode A needs to
6134    satisfy the requirements set by the representation of mode B.  */
6135 
6136 static void
init_num_sign_bit_copies_in_rep(void)6137 init_num_sign_bit_copies_in_rep (void)
6138 {
6139   opt_scalar_int_mode in_mode_iter;
6140   scalar_int_mode mode;
6141 
6142   FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
6143     FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
6144       {
6145 	scalar_int_mode in_mode = in_mode_iter.require ();
6146 	scalar_int_mode i;
6147 
6148 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
6149 	   extends to the next widest mode.  */
6150 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
6151 		    || GET_MODE_WIDER_MODE (mode).require () == in_mode);
6152 
6153 	/* We are in in_mode.  Count how many bits outside of mode
6154 	   have to be copies of the sign-bit.  */
6155 	FOR_EACH_MODE (i, mode, in_mode)
6156 	  {
6157 	    /* This must always exist (for the last iteration it will be
6158 	       IN_MODE).  */
6159 	    scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
6160 
6161 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
6162 		/* We can only check sign-bit copies starting from the
6163 		   top-bit.  In order to be able to check the bits we
6164 		   have already seen we pretend that subsequent bits
6165 		   have to be sign-bit copies too.  */
6166 		|| num_sign_bit_copies_in_rep [in_mode][mode])
6167 	      num_sign_bit_copies_in_rep [in_mode][mode]
6168 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
6169 	  }
6170       }
6171 }
6172 
6173 /* Suppose that truncation from the machine mode of X to MODE is not a
6174    no-op.  See if there is anything special about X so that we can
6175    assume it already contains a truncated value of MODE.  */
6176 
6177 bool
truncated_to_mode(machine_mode mode,const_rtx x)6178 truncated_to_mode (machine_mode mode, const_rtx x)
6179 {
6180   /* This register has already been used in MODE without explicit
6181      truncation.  */
6182   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
6183     return true;
6184 
6185   /* See if we already satisfy the requirements of MODE.  If yes we
6186      can just switch to MODE.  */
6187   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
6188       && (num_sign_bit_copies (x, GET_MODE (x))
6189 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
6190     return true;
6191 
6192   return false;
6193 }
6194 
6195 /* Return true if RTX code CODE has a single sequence of zero or more
6196    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
6197    entry in that case.  */
6198 
6199 static bool
setup_reg_subrtx_bounds(unsigned int code)6200 setup_reg_subrtx_bounds (unsigned int code)
6201 {
6202   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
6203   unsigned int i = 0;
6204   for (; format[i] != 'e'; ++i)
6205     {
6206       if (!format[i])
6207 	/* No subrtxes.  Leave start and count as 0.  */
6208 	return true;
6209       if (format[i] == 'E' || format[i] == 'V')
6210 	return false;
6211     }
6212 
6213   /* Record the sequence of 'e's.  */
6214   rtx_all_subrtx_bounds[code].start = i;
6215   do
6216     ++i;
6217   while (format[i] == 'e');
6218   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
6219   /* rtl-iter.h relies on this.  */
6220   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
6221 
6222   for (; format[i]; ++i)
6223     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
6224       return false;
6225 
6226   return true;
6227 }
6228 
6229 /* Initialize rtx_all_subrtx_bounds.  */
6230 void
init_rtlanal(void)6231 init_rtlanal (void)
6232 {
6233   int i;
6234   for (i = 0; i < NUM_RTX_CODE; i++)
6235     {
6236       if (!setup_reg_subrtx_bounds (i))
6237 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
6238       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
6239 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
6240     }
6241 
6242   init_num_sign_bit_copies_in_rep ();
6243 }
6244 
6245 /* Check whether this is a constant pool constant.  */
6246 bool
constant_pool_constant_p(rtx x)6247 constant_pool_constant_p (rtx x)
6248 {
6249   x = avoid_constant_pool_reference (x);
6250   return CONST_DOUBLE_P (x);
6251 }
6252 
6253 /* If M is a bitmask that selects a field of low-order bits within an item but
6254    not the entire word, return the length of the field.  Return -1 otherwise.
6255    M is used in machine mode MODE.  */
6256 
6257 int
low_bitmask_len(machine_mode mode,unsigned HOST_WIDE_INT m)6258 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
6259 {
6260   if (mode != VOIDmode)
6261     {
6262       if (!HWI_COMPUTABLE_MODE_P (mode))
6263 	return -1;
6264       m &= GET_MODE_MASK (mode);
6265     }
6266 
6267   return exact_log2 (m + 1);
6268 }
6269 
6270 /* Return the mode of MEM's address.  */
6271 
6272 scalar_int_mode
get_address_mode(rtx mem)6273 get_address_mode (rtx mem)
6274 {
6275   machine_mode mode;
6276 
6277   gcc_assert (MEM_P (mem));
6278   mode = GET_MODE (XEXP (mem, 0));
6279   if (mode != VOIDmode)
6280     return as_a <scalar_int_mode> (mode);
6281   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
6282 }
6283 
6284 /* Split up a CONST_DOUBLE or integer constant rtx
6285    into two rtx's for single words,
6286    storing in *FIRST the word that comes first in memory in the target
6287    and in *SECOND the other.
6288 
6289    TODO: This function needs to be rewritten to work on any size
6290    integer.  */
6291 
6292 void
split_double(rtx value,rtx * first,rtx * second)6293 split_double (rtx value, rtx *first, rtx *second)
6294 {
6295   if (CONST_INT_P (value))
6296     {
6297       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
6298 	{
6299 	  /* In this case the CONST_INT holds both target words.
6300 	     Extract the bits from it into two word-sized pieces.
6301 	     Sign extend each half to HOST_WIDE_INT.  */
6302 	  unsigned HOST_WIDE_INT low, high;
6303 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
6304 	  unsigned bits_per_word = BITS_PER_WORD;
6305 
6306 	  /* Set sign_bit to the most significant bit of a word.  */
6307 	  sign_bit = 1;
6308 	  sign_bit <<= bits_per_word - 1;
6309 
6310 	  /* Set mask so that all bits of the word are set.  We could
6311 	     have used 1 << BITS_PER_WORD instead of basing the
6312 	     calculation on sign_bit.  However, on machines where
6313 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6314 	     compiler warning, even though the code would never be
6315 	     executed.  */
6316 	  mask = sign_bit << 1;
6317 	  mask--;
6318 
6319 	  /* Set sign_extend as any remaining bits.  */
6320 	  sign_extend = ~mask;
6321 
6322 	  /* Pick the lower word and sign-extend it.  */
6323 	  low = INTVAL (value);
6324 	  low &= mask;
6325 	  if (low & sign_bit)
6326 	    low |= sign_extend;
6327 
6328 	  /* Pick the higher word, shifted to the least significant
6329 	     bits, and sign-extend it.  */
6330 	  high = INTVAL (value);
6331 	  high >>= bits_per_word - 1;
6332 	  high >>= 1;
6333 	  high &= mask;
6334 	  if (high & sign_bit)
6335 	    high |= sign_extend;
6336 
6337 	  /* Store the words in the target machine order.  */
6338 	  if (WORDS_BIG_ENDIAN)
6339 	    {
6340 	      *first = GEN_INT (high);
6341 	      *second = GEN_INT (low);
6342 	    }
6343 	  else
6344 	    {
6345 	      *first = GEN_INT (low);
6346 	      *second = GEN_INT (high);
6347 	    }
6348 	}
6349       else
6350 	{
6351 	  /* The rule for using CONST_INT for a wider mode
6352 	     is that we regard the value as signed.
6353 	     So sign-extend it.  */
6354 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6355 	  if (WORDS_BIG_ENDIAN)
6356 	    {
6357 	      *first = high;
6358 	      *second = value;
6359 	    }
6360 	  else
6361 	    {
6362 	      *first = value;
6363 	      *second = high;
6364 	    }
6365 	}
6366     }
6367   else if (GET_CODE (value) == CONST_WIDE_INT)
6368     {
6369       /* All of this is scary code and needs to be converted to
6370 	 properly work with any size integer.  */
6371       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6372       if (WORDS_BIG_ENDIAN)
6373 	{
6374 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6375 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6376 	}
6377       else
6378 	{
6379 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6380 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6381 	}
6382     }
6383   else if (!CONST_DOUBLE_P (value))
6384     {
6385       if (WORDS_BIG_ENDIAN)
6386 	{
6387 	  *first = const0_rtx;
6388 	  *second = value;
6389 	}
6390       else
6391 	{
6392 	  *first = value;
6393 	  *second = const0_rtx;
6394 	}
6395     }
6396   else if (GET_MODE (value) == VOIDmode
6397 	   /* This is the old way we did CONST_DOUBLE integers.  */
6398 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6399     {
6400       /* In an integer, the words are defined as most and least significant.
6401 	 So order them by the target's convention.  */
6402       if (WORDS_BIG_ENDIAN)
6403 	{
6404 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6405 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
6406 	}
6407       else
6408 	{
6409 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
6410 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6411 	}
6412     }
6413   else
6414     {
6415       long l[2];
6416 
6417       /* Note, this converts the REAL_VALUE_TYPE to the target's
6418 	 format, splits up the floating point double and outputs
6419 	 exactly 32 bits of it into each of l[0] and l[1] --
6420 	 not necessarily BITS_PER_WORD bits.  */
6421       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6422 
6423       /* If 32 bits is an entire word for the target, but not for the host,
6424 	 then sign-extend on the host so that the number will look the same
6425 	 way on the host that it would on the target.  See for instance
6426 	 simplify_unary_operation.  The #if is needed to avoid compiler
6427 	 warnings.  */
6428 
6429 #if HOST_BITS_PER_LONG > 32
6430       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6431 	{
6432 	  if (l[0] & ((long) 1 << 31))
6433 	    l[0] |= ((unsigned long) (-1) << 32);
6434 	  if (l[1] & ((long) 1 << 31))
6435 	    l[1] |= ((unsigned long) (-1) << 32);
6436 	}
6437 #endif
6438 
6439       *first = GEN_INT (l[0]);
6440       *second = GEN_INT (l[1]);
6441     }
6442 }
6443 
6444 /* Return true if X is a sign_extract or zero_extract from the least
6445    significant bit.  */
6446 
6447 static bool
lsb_bitfield_op_p(rtx x)6448 lsb_bitfield_op_p (rtx x)
6449 {
6450   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6451     {
6452       machine_mode mode = GET_MODE (XEXP (x, 0));
6453       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6454       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6455       poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6456 
6457       return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6458     }
6459   return false;
6460 }
6461 
6462 /* Strip outer address "mutations" from LOC and return a pointer to the
6463    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
6464    stripped expression there.
6465 
6466    "Mutations" either convert between modes or apply some kind of
6467    extension, truncation or alignment.  */
6468 
6469 rtx *
strip_address_mutations(rtx * loc,enum rtx_code * outer_code)6470 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6471 {
6472   for (;;)
6473     {
6474       enum rtx_code code = GET_CODE (*loc);
6475       if (GET_RTX_CLASS (code) == RTX_UNARY)
6476 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6477 	   used to convert between pointer sizes.  */
6478 	loc = &XEXP (*loc, 0);
6479       else if (lsb_bitfield_op_p (*loc))
6480 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6481 	   acts as a combined truncation and extension.  */
6482 	loc = &XEXP (*loc, 0);
6483       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6484 	/* (and ... (const_int -X)) is used to align to X bytes.  */
6485 	loc = &XEXP (*loc, 0);
6486       else if (code == SUBREG
6487                && !OBJECT_P (SUBREG_REG (*loc))
6488                && subreg_lowpart_p (*loc))
6489 	/* (subreg (operator ...) ...) inside and is used for mode
6490 	   conversion too.  */
6491 	loc = &SUBREG_REG (*loc);
6492       else
6493 	return loc;
6494       if (outer_code)
6495 	*outer_code = code;
6496     }
6497 }
6498 
6499 /* Return true if CODE applies some kind of scale.  The scaled value is
6500    is the first operand and the scale is the second.  */
6501 
6502 static bool
binary_scale_code_p(enum rtx_code code)6503 binary_scale_code_p (enum rtx_code code)
6504 {
6505   return (code == MULT
6506           || code == ASHIFT
6507           /* Needed by ARM targets.  */
6508           || code == ASHIFTRT
6509           || code == LSHIFTRT
6510           || code == ROTATE
6511           || code == ROTATERT);
6512 }
6513 
6514 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6515    (see address_info).  Return null otherwise.  */
6516 
6517 static rtx *
get_base_term(rtx * inner)6518 get_base_term (rtx *inner)
6519 {
6520   if (GET_CODE (*inner) == LO_SUM)
6521     inner = strip_address_mutations (&XEXP (*inner, 0));
6522   if (REG_P (*inner)
6523       || MEM_P (*inner)
6524       || GET_CODE (*inner) == SUBREG
6525       || GET_CODE (*inner) == SCRATCH)
6526     return inner;
6527   return 0;
6528 }
6529 
6530 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6531    (see address_info).  Return null otherwise.  */
6532 
6533 static rtx *
get_index_term(rtx * inner)6534 get_index_term (rtx *inner)
6535 {
6536   /* At present, only constant scales are allowed.  */
6537   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6538     inner = strip_address_mutations (&XEXP (*inner, 0));
6539   if (REG_P (*inner)
6540       || MEM_P (*inner)
6541       || GET_CODE (*inner) == SUBREG
6542       || GET_CODE (*inner) == SCRATCH)
6543     return inner;
6544   return 0;
6545 }
6546 
6547 /* Set the segment part of address INFO to LOC, given that INNER is the
6548    unmutated value.  */
6549 
6550 static void
set_address_segment(struct address_info * info,rtx * loc,rtx * inner)6551 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6552 {
6553   gcc_assert (!info->segment);
6554   info->segment = loc;
6555   info->segment_term = inner;
6556 }
6557 
6558 /* Set the base part of address INFO to LOC, given that INNER is the
6559    unmutated value.  */
6560 
6561 static void
set_address_base(struct address_info * info,rtx * loc,rtx * inner)6562 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6563 {
6564   gcc_assert (!info->base);
6565   info->base = loc;
6566   info->base_term = inner;
6567 }
6568 
6569 /* Set the index part of address INFO to LOC, given that INNER is the
6570    unmutated value.  */
6571 
6572 static void
set_address_index(struct address_info * info,rtx * loc,rtx * inner)6573 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6574 {
6575   gcc_assert (!info->index);
6576   info->index = loc;
6577   info->index_term = inner;
6578 }
6579 
6580 /* Set the displacement part of address INFO to LOC, given that INNER
6581    is the constant term.  */
6582 
6583 static void
set_address_disp(struct address_info * info,rtx * loc,rtx * inner)6584 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6585 {
6586   gcc_assert (!info->disp);
6587   info->disp = loc;
6588   info->disp_term = inner;
6589 }
6590 
6591 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6592    rest of INFO accordingly.  */
6593 
6594 static void
decompose_incdec_address(struct address_info * info)6595 decompose_incdec_address (struct address_info *info)
6596 {
6597   info->autoinc_p = true;
6598 
6599   rtx *base = &XEXP (*info->inner, 0);
6600   set_address_base (info, base, base);
6601   gcc_checking_assert (info->base == info->base_term);
6602 
6603   /* These addresses are only valid when the size of the addressed
6604      value is known.  */
6605   gcc_checking_assert (info->mode != VOIDmode);
6606 }
6607 
6608 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6609    of INFO accordingly.  */
6610 
6611 static void
decompose_automod_address(struct address_info * info)6612 decompose_automod_address (struct address_info *info)
6613 {
6614   info->autoinc_p = true;
6615 
6616   rtx *base = &XEXP (*info->inner, 0);
6617   set_address_base (info, base, base);
6618   gcc_checking_assert (info->base == info->base_term);
6619 
6620   rtx plus = XEXP (*info->inner, 1);
6621   gcc_assert (GET_CODE (plus) == PLUS);
6622 
6623   info->base_term2 = &XEXP (plus, 0);
6624   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6625 
6626   rtx *step = &XEXP (plus, 1);
6627   rtx *inner_step = strip_address_mutations (step);
6628   if (CONSTANT_P (*inner_step))
6629     set_address_disp (info, step, inner_step);
6630   else
6631     set_address_index (info, step, inner_step);
6632 }
6633 
6634 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6635    values in [PTR, END).  Return a pointer to the end of the used array.  */
6636 
6637 static rtx **
extract_plus_operands(rtx * loc,rtx ** ptr,rtx ** end)6638 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6639 {
6640   rtx x = *loc;
6641   if (GET_CODE (x) == PLUS)
6642     {
6643       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6644       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6645     }
6646   else
6647     {
6648       gcc_assert (ptr != end);
6649       *ptr++ = loc;
6650     }
6651   return ptr;
6652 }
6653 
6654 /* Evaluate the likelihood of X being a base or index value, returning
6655    positive if it is likely to be a base, negative if it is likely to be
6656    an index, and 0 if we can't tell.  Make the magnitude of the return
6657    value reflect the amount of confidence we have in the answer.
6658 
6659    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6660 
6661 static int
baseness(rtx x,machine_mode mode,addr_space_t as,enum rtx_code outer_code,enum rtx_code index_code)6662 baseness (rtx x, machine_mode mode, addr_space_t as,
6663 	  enum rtx_code outer_code, enum rtx_code index_code)
6664 {
6665   /* Believe *_POINTER unless the address shape requires otherwise.  */
6666   if (REG_P (x) && REG_POINTER (x))
6667     return 2;
6668   if (MEM_P (x) && MEM_POINTER (x))
6669     return 2;
6670 
6671   if (REG_P (x) && HARD_REGISTER_P (x))
6672     {
6673       /* X is a hard register.  If it only fits one of the base
6674 	 or index classes, choose that interpretation.  */
6675       int regno = REGNO (x);
6676       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6677       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6678       if (base_p != index_p)
6679 	return base_p ? 1 : -1;
6680     }
6681   return 0;
6682 }
6683 
6684 /* INFO->INNER describes a normal, non-automodified address.
6685    Fill in the rest of INFO accordingly.  */
6686 
6687 static void
decompose_normal_address(struct address_info * info)6688 decompose_normal_address (struct address_info *info)
6689 {
6690   /* Treat the address as the sum of up to four values.  */
6691   rtx *ops[4];
6692   size_t n_ops = extract_plus_operands (info->inner, ops,
6693 					ops + ARRAY_SIZE (ops)) - ops;
6694 
6695   /* If there is more than one component, any base component is in a PLUS.  */
6696   if (n_ops > 1)
6697     info->base_outer_code = PLUS;
6698 
6699   /* Try to classify each sum operand now.  Leave those that could be
6700      either a base or an index in OPS.  */
6701   rtx *inner_ops[4];
6702   size_t out = 0;
6703   for (size_t in = 0; in < n_ops; ++in)
6704     {
6705       rtx *loc = ops[in];
6706       rtx *inner = strip_address_mutations (loc);
6707       if (CONSTANT_P (*inner))
6708 	set_address_disp (info, loc, inner);
6709       else if (GET_CODE (*inner) == UNSPEC)
6710 	set_address_segment (info, loc, inner);
6711       else
6712 	{
6713 	  /* The only other possibilities are a base or an index.  */
6714 	  rtx *base_term = get_base_term (inner);
6715 	  rtx *index_term = get_index_term (inner);
6716 	  gcc_assert (base_term || index_term);
6717 	  if (!base_term)
6718 	    set_address_index (info, loc, index_term);
6719 	  else if (!index_term)
6720 	    set_address_base (info, loc, base_term);
6721 	  else
6722 	    {
6723 	      gcc_assert (base_term == index_term);
6724 	      ops[out] = loc;
6725 	      inner_ops[out] = base_term;
6726 	      ++out;
6727 	    }
6728 	}
6729     }
6730 
6731   /* Classify the remaining OPS members as bases and indexes.  */
6732   if (out == 1)
6733     {
6734       /* If we haven't seen a base or an index yet, assume that this is
6735 	 the base.  If we were confident that another term was the base
6736 	 or index, treat the remaining operand as the other kind.  */
6737       if (!info->base)
6738 	set_address_base (info, ops[0], inner_ops[0]);
6739       else
6740 	set_address_index (info, ops[0], inner_ops[0]);
6741     }
6742   else if (out == 2)
6743     {
6744       /* In the event of a tie, assume the base comes first.  */
6745       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6746 		    GET_CODE (*ops[1]))
6747 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6748 		       GET_CODE (*ops[0])))
6749 	{
6750 	  set_address_base (info, ops[0], inner_ops[0]);
6751 	  set_address_index (info, ops[1], inner_ops[1]);
6752 	}
6753       else
6754 	{
6755 	  set_address_base (info, ops[1], inner_ops[1]);
6756 	  set_address_index (info, ops[0], inner_ops[0]);
6757 	}
6758     }
6759   else
6760     gcc_assert (out == 0);
6761 }
6762 
6763 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6764    or VOIDmode if not known.  AS is the address space associated with LOC.
6765    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6766 
6767 void
decompose_address(struct address_info * info,rtx * loc,machine_mode mode,addr_space_t as,enum rtx_code outer_code)6768 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6769 		   addr_space_t as, enum rtx_code outer_code)
6770 {
6771   memset (info, 0, sizeof (*info));
6772   info->mode = mode;
6773   info->as = as;
6774   info->addr_outer_code = outer_code;
6775   info->outer = loc;
6776   info->inner = strip_address_mutations (loc, &outer_code);
6777   info->base_outer_code = outer_code;
6778   switch (GET_CODE (*info->inner))
6779     {
6780     case PRE_DEC:
6781     case PRE_INC:
6782     case POST_DEC:
6783     case POST_INC:
6784       decompose_incdec_address (info);
6785       break;
6786 
6787     case PRE_MODIFY:
6788     case POST_MODIFY:
6789       decompose_automod_address (info);
6790       break;
6791 
6792     default:
6793       decompose_normal_address (info);
6794       break;
6795     }
6796 }
6797 
6798 /* Describe address operand LOC in INFO.  */
6799 
6800 void
decompose_lea_address(struct address_info * info,rtx * loc)6801 decompose_lea_address (struct address_info *info, rtx *loc)
6802 {
6803   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6804 }
6805 
6806 /* Describe the address of MEM X in INFO.  */
6807 
6808 void
decompose_mem_address(struct address_info * info,rtx x)6809 decompose_mem_address (struct address_info *info, rtx x)
6810 {
6811   gcc_assert (MEM_P (x));
6812   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6813 		     MEM_ADDR_SPACE (x), MEM);
6814 }
6815 
6816 /* Update INFO after a change to the address it describes.  */
6817 
6818 void
update_address(struct address_info * info)6819 update_address (struct address_info *info)
6820 {
6821   decompose_address (info, info->outer, info->mode, info->as,
6822 		     info->addr_outer_code);
6823 }
6824 
6825 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6826    more complicated than that.  */
6827 
6828 HOST_WIDE_INT
get_index_scale(const struct address_info * info)6829 get_index_scale (const struct address_info *info)
6830 {
6831   rtx index = *info->index;
6832   if (GET_CODE (index) == MULT
6833       && CONST_INT_P (XEXP (index, 1))
6834       && info->index_term == &XEXP (index, 0))
6835     return INTVAL (XEXP (index, 1));
6836 
6837   if (GET_CODE (index) == ASHIFT
6838       && CONST_INT_P (XEXP (index, 1))
6839       && info->index_term == &XEXP (index, 0))
6840     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6841 
6842   if (info->index == info->index_term)
6843     return 1;
6844 
6845   return 0;
6846 }
6847 
6848 /* Return the "index code" of INFO, in the form required by
6849    ok_for_base_p_1.  */
6850 
6851 enum rtx_code
get_index_code(const struct address_info * info)6852 get_index_code (const struct address_info *info)
6853 {
6854   if (info->index)
6855     return GET_CODE (*info->index);
6856 
6857   if (info->disp)
6858     return GET_CODE (*info->disp);
6859 
6860   return SCRATCH;
6861 }
6862 
6863 /* Return true if RTL X contains a SYMBOL_REF.  */
6864 
6865 bool
contains_symbol_ref_p(const_rtx x)6866 contains_symbol_ref_p (const_rtx x)
6867 {
6868   subrtx_iterator::array_type array;
6869   FOR_EACH_SUBRTX (iter, array, x, ALL)
6870     if (SYMBOL_REF_P (*iter))
6871       return true;
6872 
6873   return false;
6874 }
6875 
6876 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6877 
6878 bool
contains_symbolic_reference_p(const_rtx x)6879 contains_symbolic_reference_p (const_rtx x)
6880 {
6881   subrtx_iterator::array_type array;
6882   FOR_EACH_SUBRTX (iter, array, x, ALL)
6883     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6884       return true;
6885 
6886   return false;
6887 }
6888 
6889 /* Return true if RTL X contains a constant pool address.  */
6890 
6891 bool
contains_constant_pool_address_p(const_rtx x)6892 contains_constant_pool_address_p (const_rtx x)
6893 {
6894   subrtx_iterator::array_type array;
6895   FOR_EACH_SUBRTX (iter, array, x, ALL)
6896     if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6897       return true;
6898 
6899   return false;
6900 }
6901 
6902 
6903 /* Return true if X contains a thread-local symbol.  */
6904 
6905 bool
tls_referenced_p(const_rtx x)6906 tls_referenced_p (const_rtx x)
6907 {
6908   if (!targetm.have_tls)
6909     return false;
6910 
6911   subrtx_iterator::array_type array;
6912   FOR_EACH_SUBRTX (iter, array, x, ALL)
6913     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6914       return true;
6915   return false;
6916 }
6917 
6918 /* Process recursively X of INSN and add REG_INC notes if necessary.  */
6919 void
add_auto_inc_notes(rtx_insn * insn,rtx x)6920 add_auto_inc_notes (rtx_insn *insn, rtx x)
6921 {
6922   enum rtx_code code = GET_CODE (x);
6923   const char *fmt;
6924   int i, j;
6925 
6926   if (code == MEM && auto_inc_p (XEXP (x, 0)))
6927     {
6928       add_reg_note (insn, REG_INC, XEXP (XEXP (x, 0), 0));
6929       return;
6930     }
6931 
6932   /* Scan all X sub-expressions.  */
6933   fmt = GET_RTX_FORMAT (code);
6934   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6935     {
6936       if (fmt[i] == 'e')
6937 	add_auto_inc_notes (insn, XEXP (x, i));
6938       else if (fmt[i] == 'E')
6939 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6940 	  add_auto_inc_notes (insn, XVECEXP (x, i, j));
6941     }
6942 }
6943 
6944 /* Return true if X is register asm.  */
6945 
6946 bool
register_asm_p(const_rtx x)6947 register_asm_p (const_rtx x)
6948 {
6949   return (REG_P (x)
6950 	  && REG_EXPR (x) != NULL_TREE
6951 	  && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (x))
6952 	  && DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (x))
6953 	  && DECL_REGISTER (REG_EXPR (x)));
6954 }
6955 
6956 /* Return true if, for all OP of mode OP_MODE:
6957 
6958      (vec_select:RESULT_MODE OP SEL)
6959 
6960    is equivalent to the highpart RESULT_MODE of OP.  */
6961 
6962 bool
vec_series_highpart_p(machine_mode result_mode,machine_mode op_mode,rtx sel)6963 vec_series_highpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel)
6964 {
6965   int nunits;
6966   if (GET_MODE_NUNITS (op_mode).is_constant (&nunits)
6967       && targetm.can_change_mode_class (op_mode, result_mode, ALL_REGS))
6968     {
6969       int offset = BYTES_BIG_ENDIAN ? 0 : nunits - XVECLEN (sel, 0);
6970       return rtvec_series_p (XVEC (sel, 0), offset);
6971     }
6972   return false;
6973 }
6974 
6975 /* Return true if, for all OP of mode OP_MODE:
6976 
6977      (vec_select:RESULT_MODE OP SEL)
6978 
6979    is equivalent to the lowpart RESULT_MODE of OP.  */
6980 
6981 bool
vec_series_lowpart_p(machine_mode result_mode,machine_mode op_mode,rtx sel)6982 vec_series_lowpart_p (machine_mode result_mode, machine_mode op_mode, rtx sel)
6983 {
6984   int nunits;
6985   if (GET_MODE_NUNITS (op_mode).is_constant (&nunits)
6986       && targetm.can_change_mode_class (op_mode, result_mode, ALL_REGS))
6987     {
6988       int offset = BYTES_BIG_ENDIAN ? nunits - XVECLEN (sel, 0) : 0;
6989       return rtvec_series_p (XVEC (sel, 0), offset);
6990     }
6991   return false;
6992 }
6993