1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2021 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "rtlanal.h"
28 #include "tree.h"
29 #include "predict.h"
30 #include "df.h"
31 #include "memmodel.h"
32 #include "tm_p.h"
33 #include "insn-config.h"
34 #include "regs.h"
35 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
36 #include "recog.h"
37 #include "addresses.h"
38 #include "rtl-iter.h"
39 #include "hard-reg-set.h"
40 #include "function-abi.h"
41 
42 /* Forward declarations */
43 static void set_of_1 (rtx, const_rtx, void *);
44 static bool covers_regno_p (const_rtx, unsigned int);
45 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46 static int computed_jump_p_1 (const_rtx);
47 static void parms_set (rtx, const_rtx, void *);
48 
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
50                                                    const_rtx, machine_mode,
51                                                    unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
53 					     const_rtx, machine_mode,
54                                              unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
56 						const_rtx, machine_mode,
57                                                 unsigned int);
58 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
59 					  const_rtx, machine_mode,
60 					  unsigned int);
61 
62 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
63 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
64 
65 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67    SIGN_EXTEND then while narrowing we also have to enforce the
68    representation and sign-extend the value to mode DESTINATION_REP.
69 
70    If the value is already sign-extended to DESTINATION_REP mode we
71    can just switch to DESTINATION mode on it.  For each pair of
72    integral modes SOURCE and DESTINATION, when truncating from SOURCE
73    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74    contains the number of high-order bits in SOURCE that have to be
75    copies of the sign-bit so that we can do this mode-switch to
76    DESTINATION.  */
77 
78 static unsigned int
79 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80 
81 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
82    elements.  Return the new base of ARRAY.  */
83 
84 template <typename T>
85 typename T::value_type *
add_single_to_queue(array_type & array,value_type * base,size_t i,value_type x)86 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
87 						  value_type *base,
88 						  size_t i, value_type x)
89 {
90   if (base == array.stack)
91     {
92       if (i < LOCAL_ELEMS)
93 	{
94 	  base[i] = x;
95 	  return base;
96 	}
97       gcc_checking_assert (i == LOCAL_ELEMS);
98       /* A previous iteration might also have moved from the stack to the
99 	 heap, in which case the heap array will already be big enough.  */
100       if (vec_safe_length (array.heap) <= i)
101 	vec_safe_grow (array.heap, i + 1, true);
102       base = array.heap->address ();
103       memcpy (base, array.stack, sizeof (array.stack));
104       base[LOCAL_ELEMS] = x;
105       return base;
106     }
107   unsigned int length = array.heap->length ();
108   if (length > i)
109     {
110       gcc_checking_assert (base == array.heap->address ());
111       base[i] = x;
112       return base;
113     }
114   else
115     {
116       gcc_checking_assert (i == length);
117       vec_safe_push (array.heap, x);
118       return array.heap->address ();
119     }
120 }
121 
122 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
123    number of elements added to the worklist.  */
124 
125 template <typename T>
126 size_t
add_subrtxes_to_queue(array_type & array,value_type * base,size_t end,rtx_type x)127 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
128 						    value_type *base,
129 						    size_t end, rtx_type x)
130 {
131   enum rtx_code code = GET_CODE (x);
132   const char *format = GET_RTX_FORMAT (code);
133   size_t orig_end = end;
134   if (__builtin_expect (INSN_P (x), false))
135     {
136       /* Put the pattern at the top of the queue, since that's what
137 	 we're likely to want most.  It also allows for the SEQUENCE
138 	 code below.  */
139       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
140 	if (format[i] == 'e')
141 	  {
142 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
143 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
144 	      base[end++] = subx;
145 	    else
146 	      base = add_single_to_queue (array, base, end++, subx);
147 	  }
148     }
149   else
150     for (int i = 0; format[i]; ++i)
151       if (format[i] == 'e')
152 	{
153 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
154 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
155 	    base[end++] = subx;
156 	  else
157 	    base = add_single_to_queue (array, base, end++, subx);
158 	}
159       else if (format[i] == 'E')
160 	{
161 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
162 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
163 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
164 	    for (unsigned int j = 0; j < length; j++)
165 	      base[end++] = T::get_value (vec[j]);
166 	  else
167 	    for (unsigned int j = 0; j < length; j++)
168 	      base = add_single_to_queue (array, base, end++,
169 					  T::get_value (vec[j]));
170 	  if (code == SEQUENCE && end == length)
171 	    /* If the subrtxes of the sequence fill the entire array then
172 	       we know that no other parts of a containing insn are queued.
173 	       The caller is therefore iterating over the sequence as a
174 	       PATTERN (...), so we also want the patterns of the
175 	       subinstructions.  */
176 	    for (unsigned int j = 0; j < length; j++)
177 	      {
178 		typename T::rtx_type x = T::get_rtx (base[j]);
179 		if (INSN_P (x))
180 		  base[j] = T::get_value (PATTERN (x));
181 	      }
182 	}
183   return end - orig_end;
184 }
185 
186 template <typename T>
187 void
free_array(array_type & array)188 generic_subrtx_iterator <T>::free_array (array_type &array)
189 {
190   vec_free (array.heap);
191 }
192 
193 template <typename T>
194 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
195 
196 template class generic_subrtx_iterator <const_rtx_accessor>;
197 template class generic_subrtx_iterator <rtx_var_accessor>;
198 template class generic_subrtx_iterator <rtx_ptr_accessor>;
199 
200 /* Return 1 if the value of X is unstable
201    (would be different at a different point in the program).
202    The frame pointer, arg pointer, etc. are considered stable
203    (within one function) and so is anything marked `unchanging'.  */
204 
205 int
rtx_unstable_p(const_rtx x)206 rtx_unstable_p (const_rtx x)
207 {
208   const RTX_CODE code = GET_CODE (x);
209   int i;
210   const char *fmt;
211 
212   switch (code)
213     {
214     case MEM:
215       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
216 
217     case CONST:
218     CASE_CONST_ANY:
219     case SYMBOL_REF:
220     case LABEL_REF:
221       return 0;
222 
223     case REG:
224       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
225       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
226 	  /* The arg pointer varies if it is not a fixed register.  */
227 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
228 	return 0;
229       /* ??? When call-clobbered, the value is stable modulo the restore
230 	 that must happen after a call.  This currently screws up local-alloc
231 	 into believing that the restore is not needed.  */
232       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
233 	return 0;
234       return 1;
235 
236     case ASM_OPERANDS:
237       if (MEM_VOLATILE_P (x))
238 	return 1;
239 
240       /* Fall through.  */
241 
242     default:
243       break;
244     }
245 
246   fmt = GET_RTX_FORMAT (code);
247   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
248     if (fmt[i] == 'e')
249       {
250 	if (rtx_unstable_p (XEXP (x, i)))
251 	  return 1;
252       }
253     else if (fmt[i] == 'E')
254       {
255 	int j;
256 	for (j = 0; j < XVECLEN (x, i); j++)
257 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
258 	    return 1;
259       }
260 
261   return 0;
262 }
263 
264 /* Return 1 if X has a value that can vary even between two
265    executions of the program.  0 means X can be compared reliably
266    against certain constants or near-constants.
267    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
268    zero, we are slightly more conservative.
269    The frame pointer and the arg pointer are considered constant.  */
270 
271 bool
rtx_varies_p(const_rtx x,bool for_alias)272 rtx_varies_p (const_rtx x, bool for_alias)
273 {
274   RTX_CODE code;
275   int i;
276   const char *fmt;
277 
278   if (!x)
279     return 0;
280 
281   code = GET_CODE (x);
282   switch (code)
283     {
284     case MEM:
285       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
286 
287     case CONST:
288     CASE_CONST_ANY:
289     case SYMBOL_REF:
290     case LABEL_REF:
291       return 0;
292 
293     case REG:
294       /* Note that we have to test for the actual rtx used for the frame
295 	 and arg pointers and not just the register number in case we have
296 	 eliminated the frame and/or arg pointer and are using it
297 	 for pseudos.  */
298       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
299 	  /* The arg pointer varies if it is not a fixed register.  */
300 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
301 	return 0;
302       if (x == pic_offset_table_rtx
303 	  /* ??? When call-clobbered, the value is stable modulo the restore
304 	     that must happen after a call.  This currently screws up
305 	     local-alloc into believing that the restore is not needed, so we
306 	     must return 0 only if we are called from alias analysis.  */
307 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
308 	return 0;
309       return 1;
310 
311     case LO_SUM:
312       /* The operand 0 of a LO_SUM is considered constant
313 	 (in fact it is related specifically to operand 1)
314 	 during alias analysis.  */
315       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
316 	     || rtx_varies_p (XEXP (x, 1), for_alias);
317 
318     case ASM_OPERANDS:
319       if (MEM_VOLATILE_P (x))
320 	return 1;
321 
322       /* Fall through.  */
323 
324     default:
325       break;
326     }
327 
328   fmt = GET_RTX_FORMAT (code);
329   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
330     if (fmt[i] == 'e')
331       {
332 	if (rtx_varies_p (XEXP (x, i), for_alias))
333 	  return 1;
334       }
335     else if (fmt[i] == 'E')
336       {
337 	int j;
338 	for (j = 0; j < XVECLEN (x, i); j++)
339 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
340 	    return 1;
341       }
342 
343   return 0;
344 }
345 
346 /* Compute an approximation for the offset between the register
347    FROM and TO for the current function, as it was at the start
348    of the routine.  */
349 
350 static poly_int64
get_initial_register_offset(int from,int to)351 get_initial_register_offset (int from, int to)
352 {
353   static const struct elim_table_t
354   {
355     const int from;
356     const int to;
357   } table[] = ELIMINABLE_REGS;
358   poly_int64 offset1, offset2;
359   unsigned int i, j;
360 
361   if (to == from)
362     return 0;
363 
364   /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
365      is completed, but we need to give at least an estimate for the stack
366      pointer based on the frame size.  */
367   if (!epilogue_completed)
368     {
369       offset1 = crtl->outgoing_args_size + get_frame_size ();
370 #if !STACK_GROWS_DOWNWARD
371       offset1 = - offset1;
372 #endif
373       if (to == STACK_POINTER_REGNUM)
374 	return offset1;
375       else if (from == STACK_POINTER_REGNUM)
376 	return - offset1;
377       else
378 	return 0;
379      }
380 
381   for (i = 0; i < ARRAY_SIZE (table); i++)
382       if (table[i].from == from)
383 	{
384 	  if (table[i].to == to)
385 	    {
386 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
387 					  offset1);
388 	      return offset1;
389 	    }
390 	  for (j = 0; j < ARRAY_SIZE (table); j++)
391 	    {
392 	      if (table[j].to == to
393 		  && table[j].from == table[i].to)
394 		{
395 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
396 					      offset1);
397 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
398 					      offset2);
399 		  return offset1 + offset2;
400 		}
401 	      if (table[j].from == to
402 		  && table[j].to == table[i].to)
403 		{
404 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
405 					      offset1);
406 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
407 					      offset2);
408 		  return offset1 - offset2;
409 		}
410 	    }
411 	}
412       else if (table[i].to == from)
413 	{
414 	  if (table[i].from == to)
415 	    {
416 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
417 					  offset1);
418 	      return - offset1;
419 	    }
420 	  for (j = 0; j < ARRAY_SIZE (table); j++)
421 	    {
422 	      if (table[j].to == to
423 		  && table[j].from == table[i].from)
424 		{
425 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
426 					      offset1);
427 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
428 					      offset2);
429 		  return - offset1 + offset2;
430 		}
431 	      if (table[j].from == to
432 		  && table[j].to == table[i].from)
433 		{
434 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
435 					      offset1);
436 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
437 					      offset2);
438 		  return - offset1 - offset2;
439 		}
440 	    }
441 	}
442 
443   /* If the requested register combination was not found,
444      try a different more simple combination.  */
445   if (from == ARG_POINTER_REGNUM)
446     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
447   else if (to == ARG_POINTER_REGNUM)
448     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
449   else if (from == HARD_FRAME_POINTER_REGNUM)
450     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
451   else if (to == HARD_FRAME_POINTER_REGNUM)
452     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
453   else
454     return 0;
455 }
456 
457 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
458    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
459    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
460    references on strict alignment machines.  */
461 
462 static int
rtx_addr_can_trap_p_1(const_rtx x,poly_int64 offset,poly_int64 size,machine_mode mode,bool unaligned_mems)463 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
464 		       machine_mode mode, bool unaligned_mems)
465 {
466   enum rtx_code code = GET_CODE (x);
467   gcc_checking_assert (mode == BLKmode
468 		       || mode == VOIDmode
469 		       || known_size_p (size));
470   poly_int64 const_x1;
471 
472   /* The offset must be a multiple of the mode size if we are considering
473      unaligned memory references on strict alignment machines.  */
474   if (STRICT_ALIGNMENT
475       && unaligned_mems
476       && mode != BLKmode
477       && mode != VOIDmode)
478     {
479       poly_int64 actual_offset = offset;
480 
481 #ifdef SPARC_STACK_BOUNDARY_HACK
482       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
483 	     the real alignment of %sp.  However, when it does this, the
484 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
485       if (SPARC_STACK_BOUNDARY_HACK
486 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
487 	actual_offset -= STACK_POINTER_OFFSET;
488 #endif
489 
490       if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
491 	return 1;
492     }
493 
494   switch (code)
495     {
496     case SYMBOL_REF:
497       if (SYMBOL_REF_WEAK (x))
498 	return 1;
499       if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
500 	{
501 	  tree decl;
502 	  poly_int64 decl_size;
503 
504 	  if (maybe_lt (offset, 0))
505 	    return 1;
506 	  if (!known_size_p (size))
507 	    return maybe_ne (offset, 0);
508 
509 	  /* If the size of the access or of the symbol is unknown,
510 	     assume the worst.  */
511 	  decl = SYMBOL_REF_DECL (x);
512 
513 	  /* Else check that the access is in bounds.  TODO: restructure
514 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
515 	  if (!decl)
516 	    decl_size = -1;
517 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
518 	    {
519 	      if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
520 		decl_size = -1;
521 	    }
522 	  else if (TREE_CODE (decl) == STRING_CST)
523 	    decl_size = TREE_STRING_LENGTH (decl);
524 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
525 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
526 	  else
527 	    decl_size = -1;
528 
529 	  return (!known_size_p (decl_size) || known_eq (decl_size, 0)
530 		  ? maybe_ne (offset, 0)
531 		  : !known_subrange_p (offset, size, 0, decl_size));
532         }
533 
534       return 0;
535 
536     case LABEL_REF:
537       return 0;
538 
539     case REG:
540       /* Stack references are assumed not to trap, but we need to deal with
541 	 nonsensical offsets.  */
542       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
543 	 || x == stack_pointer_rtx
544 	 /* The arg pointer varies if it is not a fixed register.  */
545 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
546 	{
547 #ifdef RED_ZONE_SIZE
548 	  poly_int64 red_zone_size = RED_ZONE_SIZE;
549 #else
550 	  poly_int64 red_zone_size = 0;
551 #endif
552 	  poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
553 	  poly_int64 low_bound, high_bound;
554 
555 	  if (!known_size_p (size))
556 	    return 1;
557 
558 	  if (x == frame_pointer_rtx)
559 	    {
560 	      if (FRAME_GROWS_DOWNWARD)
561 		{
562 		  high_bound = targetm.starting_frame_offset ();
563 		  low_bound  = high_bound - get_frame_size ();
564 		}
565 	      else
566 		{
567 		  low_bound  = targetm.starting_frame_offset ();
568 		  high_bound = low_bound + get_frame_size ();
569 		}
570 	    }
571 	  else if (x == hard_frame_pointer_rtx)
572 	    {
573 	      poly_int64 sp_offset
574 		= get_initial_register_offset (STACK_POINTER_REGNUM,
575 					       HARD_FRAME_POINTER_REGNUM);
576 	      poly_int64 ap_offset
577 		= get_initial_register_offset (ARG_POINTER_REGNUM,
578 					       HARD_FRAME_POINTER_REGNUM);
579 
580 #if STACK_GROWS_DOWNWARD
581 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
582 	      high_bound = ap_offset
583 			   + FIRST_PARM_OFFSET (current_function_decl)
584 #if !ARGS_GROW_DOWNWARD
585 			   + crtl->args.size
586 #endif
587 			   + stack_boundary;
588 #else
589 	      high_bound = sp_offset + red_zone_size + stack_boundary;
590 	      low_bound  = ap_offset
591 			   + FIRST_PARM_OFFSET (current_function_decl)
592 #if ARGS_GROW_DOWNWARD
593 			   - crtl->args.size
594 #endif
595 			   - stack_boundary;
596 #endif
597 	    }
598 	  else if (x == stack_pointer_rtx)
599 	    {
600 	      poly_int64 ap_offset
601 		= get_initial_register_offset (ARG_POINTER_REGNUM,
602 					       STACK_POINTER_REGNUM);
603 
604 #if STACK_GROWS_DOWNWARD
605 	      low_bound  = - red_zone_size - stack_boundary;
606 	      high_bound = ap_offset
607 			   + FIRST_PARM_OFFSET (current_function_decl)
608 #if !ARGS_GROW_DOWNWARD
609 			   + crtl->args.size
610 #endif
611 			   + stack_boundary;
612 #else
613 	      high_bound = red_zone_size + stack_boundary;
614 	      low_bound  = ap_offset
615 			   + FIRST_PARM_OFFSET (current_function_decl)
616 #if ARGS_GROW_DOWNWARD
617 			   - crtl->args.size
618 #endif
619 			   - stack_boundary;
620 #endif
621 	    }
622 	  else
623 	    {
624 	      /* We assume that accesses are safe to at least the
625 		 next stack boundary.
626 		 Examples are varargs and __builtin_return_address.  */
627 #if ARGS_GROW_DOWNWARD
628 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 			   + stack_boundary;
630 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
631 			   - crtl->args.size - stack_boundary;
632 #else
633 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
634 			   - stack_boundary;
635 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
636 			   + crtl->args.size + stack_boundary;
637 #endif
638 	    }
639 
640 	  if (known_ge (offset, low_bound)
641 	      && known_le (offset, high_bound - size))
642 	    return 0;
643 	  return 1;
644 	}
645       /* All of the virtual frame registers are stack references.  */
646       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
647 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
648 	return 0;
649       return 1;
650 
651     case CONST:
652       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
653 				    mode, unaligned_mems);
654 
655     case PLUS:
656       /* An address is assumed not to trap if:
657 	 - it is the pic register plus a const unspec without offset.  */
658       if (XEXP (x, 0) == pic_offset_table_rtx
659 	  && GET_CODE (XEXP (x, 1)) == CONST
660 	  && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
661 	  && known_eq (offset, 0))
662 	return 0;
663 
664       /* - or it is an address that can't trap plus a constant integer.  */
665       if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
666 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
667 				     size, mode, unaligned_mems))
668 	return 0;
669 
670       return 1;
671 
672     case LO_SUM:
673     case PRE_MODIFY:
674       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
675 				    mode, unaligned_mems);
676 
677     case PRE_DEC:
678     case PRE_INC:
679     case POST_DEC:
680     case POST_INC:
681     case POST_MODIFY:
682       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
683 				    mode, unaligned_mems);
684 
685     default:
686       break;
687     }
688 
689   /* If it isn't one of the case above, it can cause a trap.  */
690   return 1;
691 }
692 
693 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
694 
695 int
rtx_addr_can_trap_p(const_rtx x)696 rtx_addr_can_trap_p (const_rtx x)
697 {
698   return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
699 }
700 
701 /* Return true if X contains a MEM subrtx.  */
702 
703 bool
contains_mem_rtx_p(rtx x)704 contains_mem_rtx_p (rtx x)
705 {
706   subrtx_iterator::array_type array;
707   FOR_EACH_SUBRTX (iter, array, x, ALL)
708     if (MEM_P (*iter))
709       return true;
710 
711   return false;
712 }
713 
714 /* Return true if X is an address that is known to not be zero.  */
715 
716 bool
nonzero_address_p(const_rtx x)717 nonzero_address_p (const_rtx x)
718 {
719   const enum rtx_code code = GET_CODE (x);
720 
721   switch (code)
722     {
723     case SYMBOL_REF:
724       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
725 
726     case LABEL_REF:
727       return true;
728 
729     case REG:
730       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
731       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
732 	  || x == stack_pointer_rtx
733 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
734 	return true;
735       /* All of the virtual frame registers are stack references.  */
736       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
737 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
738 	return true;
739       return false;
740 
741     case CONST:
742       return nonzero_address_p (XEXP (x, 0));
743 
744     case PLUS:
745       /* Handle PIC references.  */
746       if (XEXP (x, 0) == pic_offset_table_rtx
747 	       && CONSTANT_P (XEXP (x, 1)))
748 	return true;
749       return false;
750 
751     case PRE_MODIFY:
752       /* Similar to the above; allow positive offsets.  Further, since
753 	 auto-inc is only allowed in memories, the register must be a
754 	 pointer.  */
755       if (CONST_INT_P (XEXP (x, 1))
756 	  && INTVAL (XEXP (x, 1)) > 0)
757 	return true;
758       return nonzero_address_p (XEXP (x, 0));
759 
760     case PRE_INC:
761       /* Similarly.  Further, the offset is always positive.  */
762       return true;
763 
764     case PRE_DEC:
765     case POST_DEC:
766     case POST_INC:
767     case POST_MODIFY:
768       return nonzero_address_p (XEXP (x, 0));
769 
770     case LO_SUM:
771       return nonzero_address_p (XEXP (x, 1));
772 
773     default:
774       break;
775     }
776 
777   /* If it isn't one of the case above, might be zero.  */
778   return false;
779 }
780 
781 /* Return 1 if X refers to a memory location whose address
782    cannot be compared reliably with constant addresses,
783    or if X refers to a BLKmode memory object.
784    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
785    zero, we are slightly more conservative.  */
786 
787 bool
rtx_addr_varies_p(const_rtx x,bool for_alias)788 rtx_addr_varies_p (const_rtx x, bool for_alias)
789 {
790   enum rtx_code code;
791   int i;
792   const char *fmt;
793 
794   if (x == 0)
795     return 0;
796 
797   code = GET_CODE (x);
798   if (code == MEM)
799     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
800 
801   fmt = GET_RTX_FORMAT (code);
802   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
803     if (fmt[i] == 'e')
804       {
805 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
806 	  return 1;
807       }
808     else if (fmt[i] == 'E')
809       {
810 	int j;
811 	for (j = 0; j < XVECLEN (x, i); j++)
812 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
813 	    return 1;
814       }
815   return 0;
816 }
817 
818 /* Return the CALL in X if there is one.  */
819 
820 rtx
get_call_rtx_from(const rtx_insn * insn)821 get_call_rtx_from (const rtx_insn *insn)
822 {
823   rtx x = PATTERN (insn);
824   if (GET_CODE (x) == PARALLEL)
825     x = XVECEXP (x, 0, 0);
826   if (GET_CODE (x) == SET)
827     x = SET_SRC (x);
828   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
829     return x;
830   return NULL_RTX;
831 }
832 
833 /* Get the declaration of the function called by INSN.  */
834 
835 tree
get_call_fndecl(const rtx_insn * insn)836 get_call_fndecl (const rtx_insn *insn)
837 {
838   rtx note, datum;
839 
840   note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX);
841   if (note == NULL_RTX)
842     return NULL_TREE;
843 
844   datum = XEXP (note, 0);
845   if (datum != NULL_RTX)
846     return SYMBOL_REF_DECL (datum);
847 
848   return NULL_TREE;
849 }
850 
851 /* Return the value of the integer term in X, if one is apparent;
852    otherwise return 0.
853    Only obvious integer terms are detected.
854    This is used in cse.c with the `related_value' field.  */
855 
856 HOST_WIDE_INT
get_integer_term(const_rtx x)857 get_integer_term (const_rtx x)
858 {
859   if (GET_CODE (x) == CONST)
860     x = XEXP (x, 0);
861 
862   if (GET_CODE (x) == MINUS
863       && CONST_INT_P (XEXP (x, 1)))
864     return - INTVAL (XEXP (x, 1));
865   if (GET_CODE (x) == PLUS
866       && CONST_INT_P (XEXP (x, 1)))
867     return INTVAL (XEXP (x, 1));
868   return 0;
869 }
870 
871 /* If X is a constant, return the value sans apparent integer term;
872    otherwise return 0.
873    Only obvious integer terms are detected.  */
874 
875 rtx
get_related_value(const_rtx x)876 get_related_value (const_rtx x)
877 {
878   if (GET_CODE (x) != CONST)
879     return 0;
880   x = XEXP (x, 0);
881   if (GET_CODE (x) == PLUS
882       && CONST_INT_P (XEXP (x, 1)))
883     return XEXP (x, 0);
884   else if (GET_CODE (x) == MINUS
885 	   && CONST_INT_P (XEXP (x, 1)))
886     return XEXP (x, 0);
887   return 0;
888 }
889 
890 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
891    to somewhere in the same object or object_block as SYMBOL.  */
892 
893 bool
offset_within_block_p(const_rtx symbol,HOST_WIDE_INT offset)894 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
895 {
896   tree decl;
897 
898   if (GET_CODE (symbol) != SYMBOL_REF)
899     return false;
900 
901   if (offset == 0)
902     return true;
903 
904   if (offset > 0)
905     {
906       if (CONSTANT_POOL_ADDRESS_P (symbol)
907 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
908 	return true;
909 
910       decl = SYMBOL_REF_DECL (symbol);
911       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
912 	return true;
913     }
914 
915   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
916       && SYMBOL_REF_BLOCK (symbol)
917       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
918       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
919 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
920     return true;
921 
922   return false;
923 }
924 
925 /* Split X into a base and a constant offset, storing them in *BASE_OUT
926    and *OFFSET_OUT respectively.  */
927 
928 void
split_const(rtx x,rtx * base_out,rtx * offset_out)929 split_const (rtx x, rtx *base_out, rtx *offset_out)
930 {
931   if (GET_CODE (x) == CONST)
932     {
933       x = XEXP (x, 0);
934       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
935 	{
936 	  *base_out = XEXP (x, 0);
937 	  *offset_out = XEXP (x, 1);
938 	  return;
939 	}
940     }
941   *base_out = x;
942   *offset_out = const0_rtx;
943 }
944 
945 /* Express integer value X as some value Y plus a polynomial offset,
946    where Y is either const0_rtx, X or something within X (as opposed
947    to a new rtx).  Return the Y and store the offset in *OFFSET_OUT.  */
948 
949 rtx
strip_offset(rtx x,poly_int64_pod * offset_out)950 strip_offset (rtx x, poly_int64_pod *offset_out)
951 {
952   rtx base = const0_rtx;
953   rtx test = x;
954   if (GET_CODE (test) == CONST)
955     test = XEXP (test, 0);
956   if (GET_CODE (test) == PLUS)
957     {
958       base = XEXP (test, 0);
959       test = XEXP (test, 1);
960     }
961   if (poly_int_rtx_p (test, offset_out))
962     return base;
963   *offset_out = 0;
964   return x;
965 }
966 
967 /* Return the argument size in REG_ARGS_SIZE note X.  */
968 
969 poly_int64
get_args_size(const_rtx x)970 get_args_size (const_rtx x)
971 {
972   gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
973   return rtx_to_poly_int64 (XEXP (x, 0));
974 }
975 
976 /* Return the number of places FIND appears within X.  If COUNT_DEST is
977    zero, we do not count occurrences inside the destination of a SET.  */
978 
979 int
count_occurrences(const_rtx x,const_rtx find,int count_dest)980 count_occurrences (const_rtx x, const_rtx find, int count_dest)
981 {
982   int i, j;
983   enum rtx_code code;
984   const char *format_ptr;
985   int count;
986 
987   if (x == find)
988     return 1;
989 
990   code = GET_CODE (x);
991 
992   switch (code)
993     {
994     case REG:
995     CASE_CONST_ANY:
996     case SYMBOL_REF:
997     case CODE_LABEL:
998     case PC:
999     case CC0:
1000       return 0;
1001 
1002     case EXPR_LIST:
1003       count = count_occurrences (XEXP (x, 0), find, count_dest);
1004       if (XEXP (x, 1))
1005 	count += count_occurrences (XEXP (x, 1), find, count_dest);
1006       return count;
1007 
1008     case MEM:
1009       if (MEM_P (find) && rtx_equal_p (x, find))
1010 	return 1;
1011       break;
1012 
1013     case SET:
1014       if (SET_DEST (x) == find && ! count_dest)
1015 	return count_occurrences (SET_SRC (x), find, count_dest);
1016       break;
1017 
1018     default:
1019       break;
1020     }
1021 
1022   format_ptr = GET_RTX_FORMAT (code);
1023   count = 0;
1024 
1025   for (i = 0; i < GET_RTX_LENGTH (code); i++)
1026     {
1027       switch (*format_ptr++)
1028 	{
1029 	case 'e':
1030 	  count += count_occurrences (XEXP (x, i), find, count_dest);
1031 	  break;
1032 
1033 	case 'E':
1034 	  for (j = 0; j < XVECLEN (x, i); j++)
1035 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1036 	  break;
1037 	}
1038     }
1039   return count;
1040 }
1041 
1042 
1043 /* Return TRUE if OP is a register or subreg of a register that
1044    holds an unsigned quantity.  Otherwise, return FALSE.  */
1045 
1046 bool
unsigned_reg_p(rtx op)1047 unsigned_reg_p (rtx op)
1048 {
1049   if (REG_P (op)
1050       && REG_EXPR (op)
1051       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1052     return true;
1053 
1054   if (GET_CODE (op) == SUBREG
1055       && SUBREG_PROMOTED_SIGN (op))
1056     return true;
1057 
1058   return false;
1059 }
1060 
1061 
1062 /* Nonzero if register REG appears somewhere within IN.
1063    Also works if REG is not a register; in this case it checks
1064    for a subexpression of IN that is Lisp "equal" to REG.  */
1065 
1066 int
reg_mentioned_p(const_rtx reg,const_rtx in)1067 reg_mentioned_p (const_rtx reg, const_rtx in)
1068 {
1069   const char *fmt;
1070   int i;
1071   enum rtx_code code;
1072 
1073   if (in == 0)
1074     return 0;
1075 
1076   if (reg == in)
1077     return 1;
1078 
1079   if (GET_CODE (in) == LABEL_REF)
1080     return reg == label_ref_label (in);
1081 
1082   code = GET_CODE (in);
1083 
1084   switch (code)
1085     {
1086       /* Compare registers by number.  */
1087     case REG:
1088       return REG_P (reg) && REGNO (in) == REGNO (reg);
1089 
1090       /* These codes have no constituent expressions
1091 	 and are unique.  */
1092     case SCRATCH:
1093     case CC0:
1094     case PC:
1095       return 0;
1096 
1097     CASE_CONST_ANY:
1098       /* These are kept unique for a given value.  */
1099       return 0;
1100 
1101     default:
1102       break;
1103     }
1104 
1105   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1106     return 1;
1107 
1108   fmt = GET_RTX_FORMAT (code);
1109 
1110   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1111     {
1112       if (fmt[i] == 'E')
1113 	{
1114 	  int j;
1115 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1116 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1117 	      return 1;
1118 	}
1119       else if (fmt[i] == 'e'
1120 	       && reg_mentioned_p (reg, XEXP (in, i)))
1121 	return 1;
1122     }
1123   return 0;
1124 }
1125 
1126 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1127    no CODE_LABEL insn.  */
1128 
1129 int
no_labels_between_p(const rtx_insn * beg,const rtx_insn * end)1130 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1131 {
1132   rtx_insn *p;
1133   if (beg == end)
1134     return 0;
1135   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1136     if (LABEL_P (p))
1137       return 0;
1138   return 1;
1139 }
1140 
1141 /* Nonzero if register REG is used in an insn between
1142    FROM_INSN and TO_INSN (exclusive of those two).  */
1143 
1144 int
reg_used_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1145 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1146 		    const rtx_insn *to_insn)
1147 {
1148   rtx_insn *insn;
1149 
1150   if (from_insn == to_insn)
1151     return 0;
1152 
1153   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1154     if (NONDEBUG_INSN_P (insn)
1155 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1156 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1157       return 1;
1158   return 0;
1159 }
1160 
1161 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1162    is entirely replaced by a new value and the only use is as a SET_DEST,
1163    we do not consider it a reference.  */
1164 
1165 int
reg_referenced_p(const_rtx x,const_rtx body)1166 reg_referenced_p (const_rtx x, const_rtx body)
1167 {
1168   int i;
1169 
1170   switch (GET_CODE (body))
1171     {
1172     case SET:
1173       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1174 	return 1;
1175 
1176       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1177 	 of a REG that occupies all of the REG, the insn references X if
1178 	 it is mentioned in the destination.  */
1179       if (GET_CODE (SET_DEST (body)) != CC0
1180 	  && GET_CODE (SET_DEST (body)) != PC
1181 	  && !REG_P (SET_DEST (body))
1182 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1183 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1184 		&& !read_modify_subreg_p (SET_DEST (body)))
1185 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1186 	return 1;
1187       return 0;
1188 
1189     case ASM_OPERANDS:
1190       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1191 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1192 	  return 1;
1193       return 0;
1194 
1195     case CALL:
1196     case USE:
1197     case IF_THEN_ELSE:
1198       return reg_overlap_mentioned_p (x, body);
1199 
1200     case TRAP_IF:
1201       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1202 
1203     case PREFETCH:
1204       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1205 
1206     case UNSPEC:
1207     case UNSPEC_VOLATILE:
1208       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1209 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1210 	  return 1;
1211       return 0;
1212 
1213     case PARALLEL:
1214       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1215 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1216 	  return 1;
1217       return 0;
1218 
1219     case CLOBBER:
1220       if (MEM_P (XEXP (body, 0)))
1221 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1222 	  return 1;
1223       return 0;
1224 
1225     case COND_EXEC:
1226       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1227 	return 1;
1228       return reg_referenced_p (x, COND_EXEC_CODE (body));
1229 
1230     default:
1231       return 0;
1232     }
1233 }
1234 
1235 /* Nonzero if register REG is set or clobbered in an insn between
1236    FROM_INSN and TO_INSN (exclusive of those two).  */
1237 
1238 int
reg_set_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1239 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1240 		   const rtx_insn *to_insn)
1241 {
1242   const rtx_insn *insn;
1243 
1244   if (from_insn == to_insn)
1245     return 0;
1246 
1247   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1248     if (INSN_P (insn) && reg_set_p (reg, insn))
1249       return 1;
1250   return 0;
1251 }
1252 
1253 /* Return true if REG is set or clobbered inside INSN.  */
1254 
1255 int
reg_set_p(const_rtx reg,const_rtx insn)1256 reg_set_p (const_rtx reg, const_rtx insn)
1257 {
1258   /* After delay slot handling, call and branch insns might be in a
1259      sequence.  Check all the elements there.  */
1260   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1261     {
1262       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1263 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1264 	  return true;
1265 
1266       return false;
1267     }
1268 
1269   /* We can be passed an insn or part of one.  If we are passed an insn,
1270      check if a side-effect of the insn clobbers REG.  */
1271   if (INSN_P (insn)
1272       && (FIND_REG_INC_NOTE (insn, reg)
1273 	  || (CALL_P (insn)
1274 	      && ((REG_P (reg)
1275 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1276 		   && (insn_callee_abi (as_a<const rtx_insn *> (insn))
1277 		       .clobbers_reg_p (GET_MODE (reg), REGNO (reg))))
1278 		  || MEM_P (reg)
1279 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1280     return true;
1281 
1282   /* There are no REG_INC notes for SP autoinc.  */
1283   if (reg == stack_pointer_rtx && INSN_P (insn))
1284     {
1285       subrtx_var_iterator::array_type array;
1286       FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1287 	{
1288 	  rtx mem = *iter;
1289 	  if (mem
1290 	      && MEM_P (mem)
1291 	      && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1292 	    {
1293 	      if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1294 		return true;
1295 	      iter.skip_subrtxes ();
1296 	    }
1297 	}
1298     }
1299 
1300   return set_of (reg, insn) != NULL_RTX;
1301 }
1302 
1303 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1304    only if none of them are modified between START and END.  Return 1 if
1305    X contains a MEM; this routine does use memory aliasing.  */
1306 
1307 int
modified_between_p(const_rtx x,const rtx_insn * start,const rtx_insn * end)1308 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1309 {
1310   const enum rtx_code code = GET_CODE (x);
1311   const char *fmt;
1312   int i, j;
1313   rtx_insn *insn;
1314 
1315   if (start == end)
1316     return 0;
1317 
1318   switch (code)
1319     {
1320     CASE_CONST_ANY:
1321     case CONST:
1322     case SYMBOL_REF:
1323     case LABEL_REF:
1324       return 0;
1325 
1326     case PC:
1327     case CC0:
1328       return 1;
1329 
1330     case MEM:
1331       if (modified_between_p (XEXP (x, 0), start, end))
1332 	return 1;
1333       if (MEM_READONLY_P (x))
1334 	return 0;
1335       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1336 	if (memory_modified_in_insn_p (x, insn))
1337 	  return 1;
1338       return 0;
1339 
1340     case REG:
1341       return reg_set_between_p (x, start, end);
1342 
1343     default:
1344       break;
1345     }
1346 
1347   fmt = GET_RTX_FORMAT (code);
1348   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1349     {
1350       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1351 	return 1;
1352 
1353       else if (fmt[i] == 'E')
1354 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1355 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1356 	    return 1;
1357     }
1358 
1359   return 0;
1360 }
1361 
1362 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1363    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1364    does use memory aliasing.  */
1365 
1366 int
modified_in_p(const_rtx x,const_rtx insn)1367 modified_in_p (const_rtx x, const_rtx insn)
1368 {
1369   const enum rtx_code code = GET_CODE (x);
1370   const char *fmt;
1371   int i, j;
1372 
1373   switch (code)
1374     {
1375     CASE_CONST_ANY:
1376     case CONST:
1377     case SYMBOL_REF:
1378     case LABEL_REF:
1379       return 0;
1380 
1381     case PC:
1382     case CC0:
1383       return 1;
1384 
1385     case MEM:
1386       if (modified_in_p (XEXP (x, 0), insn))
1387 	return 1;
1388       if (MEM_READONLY_P (x))
1389 	return 0;
1390       if (memory_modified_in_insn_p (x, insn))
1391 	return 1;
1392       return 0;
1393 
1394     case REG:
1395       return reg_set_p (x, insn);
1396 
1397     default:
1398       break;
1399     }
1400 
1401   fmt = GET_RTX_FORMAT (code);
1402   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1403     {
1404       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1405 	return 1;
1406 
1407       else if (fmt[i] == 'E')
1408 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1409 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1410 	    return 1;
1411     }
1412 
1413   return 0;
1414 }
1415 
1416 /* Return true if X is a SUBREG and if storing a value to X would
1417    preserve some of its SUBREG_REG.  For example, on a normal 32-bit
1418    target, using a SUBREG to store to one half of a DImode REG would
1419    preserve the other half.  */
1420 
1421 bool
read_modify_subreg_p(const_rtx x)1422 read_modify_subreg_p (const_rtx x)
1423 {
1424   if (GET_CODE (x) != SUBREG)
1425     return false;
1426   poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1427   poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1428   poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1429   /* The inner and outer modes of a subreg must be ordered, so that we
1430      can tell whether they're paradoxical or partial.  */
1431   gcc_checking_assert (ordered_p (isize, osize));
1432   return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1433 }
1434 
1435 /* Helper function for set_of.  */
1436 struct set_of_data
1437   {
1438     const_rtx found;
1439     const_rtx pat;
1440   };
1441 
1442 static void
set_of_1(rtx x,const_rtx pat,void * data1)1443 set_of_1 (rtx x, const_rtx pat, void *data1)
1444 {
1445   struct set_of_data *const data = (struct set_of_data *) (data1);
1446   if (rtx_equal_p (x, data->pat)
1447       || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1448     data->found = pat;
1449 }
1450 
1451 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1452    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1453 const_rtx
set_of(const_rtx pat,const_rtx insn)1454 set_of (const_rtx pat, const_rtx insn)
1455 {
1456   struct set_of_data data;
1457   data.found = NULL_RTX;
1458   data.pat = pat;
1459   note_pattern_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1460   return data.found;
1461 }
1462 
1463 /* Check whether instruction pattern PAT contains a SET with the following
1464    properties:
1465 
1466    - the SET is executed unconditionally; and
1467    - either:
1468      - the destination of the SET is a REG that contains REGNO; or
1469      - both:
1470        - the destination of the SET is a SUBREG of such a REG; and
1471        - writing to the subreg clobbers all of the SUBREG_REG
1472 	 (in other words, read_modify_subreg_p is false).
1473 
1474    If PAT does have a SET like that, return the set, otherwise return null.
1475 
1476    This is intended to be an alternative to single_set for passes that
1477    can handle patterns with multiple_sets.  */
1478 rtx
simple_regno_set(rtx pat,unsigned int regno)1479 simple_regno_set (rtx pat, unsigned int regno)
1480 {
1481   if (GET_CODE (pat) == PARALLEL)
1482     {
1483       int last = XVECLEN (pat, 0) - 1;
1484       for (int i = 0; i < last; ++i)
1485 	if (rtx set = simple_regno_set (XVECEXP (pat, 0, i), regno))
1486 	  return set;
1487 
1488       pat = XVECEXP (pat, 0, last);
1489     }
1490 
1491   if (GET_CODE (pat) == SET
1492       && covers_regno_no_parallel_p (SET_DEST (pat), regno))
1493     return pat;
1494 
1495   return nullptr;
1496 }
1497 
1498 /* Add all hard register in X to *PSET.  */
1499 void
find_all_hard_regs(const_rtx x,HARD_REG_SET * pset)1500 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1501 {
1502   subrtx_iterator::array_type array;
1503   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1504     {
1505       const_rtx x = *iter;
1506       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1507 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1508     }
1509 }
1510 
1511 /* This function, called through note_stores, collects sets and
1512    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1513    by DATA.  */
1514 void
record_hard_reg_sets(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)1515 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1516 {
1517   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1518   if (REG_P (x) && HARD_REGISTER_P (x))
1519     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1520 }
1521 
1522 /* Examine INSN, and compute the set of hard registers written by it.
1523    Store it in *PSET.  Should only be called after reload.
1524 
1525    IMPLICIT is true if we should include registers that are fully-clobbered
1526    by calls.  This should be used with caution, since it doesn't include
1527    partially-clobbered registers.  */
1528 void
find_all_hard_reg_sets(const rtx_insn * insn,HARD_REG_SET * pset,bool implicit)1529 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1530 {
1531   rtx link;
1532 
1533   CLEAR_HARD_REG_SET (*pset);
1534   note_stores (insn, record_hard_reg_sets, pset);
1535   if (CALL_P (insn) && implicit)
1536     *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1537   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1538     if (REG_NOTE_KIND (link) == REG_INC)
1539       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1540 }
1541 
1542 /* Like record_hard_reg_sets, but called through note_uses.  */
1543 void
record_hard_reg_uses(rtx * px,void * data)1544 record_hard_reg_uses (rtx *px, void *data)
1545 {
1546   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1547 }
1548 
1549 /* Given an INSN, return a SET expression if this insn has only a single SET.
1550    It may also have CLOBBERs, USEs, or SET whose output
1551    will not be used, which we ignore.  */
1552 
1553 rtx
single_set_2(const rtx_insn * insn,const_rtx pat)1554 single_set_2 (const rtx_insn *insn, const_rtx pat)
1555 {
1556   rtx set = NULL;
1557   int set_verified = 1;
1558   int i;
1559 
1560   if (GET_CODE (pat) == PARALLEL)
1561     {
1562       for (i = 0; i < XVECLEN (pat, 0); i++)
1563 	{
1564 	  rtx sub = XVECEXP (pat, 0, i);
1565 	  switch (GET_CODE (sub))
1566 	    {
1567 	    case USE:
1568 	    case CLOBBER:
1569 	      break;
1570 
1571 	    case SET:
1572 	      /* We can consider insns having multiple sets, where all
1573 		 but one are dead as single set insns.  In common case
1574 		 only single set is present in the pattern so we want
1575 		 to avoid checking for REG_UNUSED notes unless necessary.
1576 
1577 		 When we reach set first time, we just expect this is
1578 		 the single set we are looking for and only when more
1579 		 sets are found in the insn, we check them.  */
1580 	      if (!set_verified)
1581 		{
1582 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1583 		      && !side_effects_p (set))
1584 		    set = NULL;
1585 		  else
1586 		    set_verified = 1;
1587 		}
1588 	      if (!set)
1589 		set = sub, set_verified = 0;
1590 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1591 		       || side_effects_p (sub))
1592 		return NULL_RTX;
1593 	      break;
1594 
1595 	    default:
1596 	      return NULL_RTX;
1597 	    }
1598 	}
1599     }
1600   return set;
1601 }
1602 
1603 /* Given an INSN, return nonzero if it has more than one SET, else return
1604    zero.  */
1605 
1606 int
multiple_sets(const_rtx insn)1607 multiple_sets (const_rtx insn)
1608 {
1609   int found;
1610   int i;
1611 
1612   /* INSN must be an insn.  */
1613   if (! INSN_P (insn))
1614     return 0;
1615 
1616   /* Only a PARALLEL can have multiple SETs.  */
1617   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1618     {
1619       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1620 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1621 	  {
1622 	    /* If we have already found a SET, then return now.  */
1623 	    if (found)
1624 	      return 1;
1625 	    else
1626 	      found = 1;
1627 	  }
1628     }
1629 
1630   /* Either zero or one SET.  */
1631   return 0;
1632 }
1633 
1634 /* Return nonzero if the destination of SET equals the source
1635    and there are no side effects.  */
1636 
1637 int
set_noop_p(const_rtx set)1638 set_noop_p (const_rtx set)
1639 {
1640   rtx src = SET_SRC (set);
1641   rtx dst = SET_DEST (set);
1642 
1643   if (dst == pc_rtx && src == pc_rtx)
1644     return 1;
1645 
1646   if (MEM_P (dst) && MEM_P (src))
1647     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1648 
1649   if (GET_CODE (dst) == ZERO_EXTRACT)
1650     return rtx_equal_p (XEXP (dst, 0), src)
1651 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1652 	   && !side_effects_p (src);
1653 
1654   if (GET_CODE (dst) == STRICT_LOW_PART)
1655     dst = XEXP (dst, 0);
1656 
1657   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1658     {
1659       if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1660 	return 0;
1661       src = SUBREG_REG (src);
1662       dst = SUBREG_REG (dst);
1663       if (GET_MODE (src) != GET_MODE (dst))
1664 	/* It is hard to tell whether subregs refer to the same bits, so act
1665 	   conservatively and return 0.  */
1666 	return 0;
1667     }
1668 
1669   /* It is a NOOP if destination overlaps with selected src vector
1670      elements.  */
1671   if (GET_CODE (src) == VEC_SELECT
1672       && REG_P (XEXP (src, 0)) && REG_P (dst)
1673       && HARD_REGISTER_P (XEXP (src, 0))
1674       && HARD_REGISTER_P (dst))
1675     {
1676       int i;
1677       rtx par = XEXP (src, 1);
1678       rtx src0 = XEXP (src, 0);
1679       poly_int64 c0;
1680       if (!poly_int_rtx_p (XVECEXP (par, 0, 0), &c0))
1681 	return 0;
1682       poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1683 
1684       for (i = 1; i < XVECLEN (par, 0); i++)
1685 	{
1686 	  poly_int64 c0i;
1687 	  if (!poly_int_rtx_p (XVECEXP (par, 0, i), &c0i)
1688 	      || maybe_ne (c0i, c0 + i))
1689 	    return 0;
1690 	}
1691       return
1692 	REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1693 	&& simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1694 				  offset, GET_MODE (dst)) == (int) REGNO (dst);
1695     }
1696 
1697   return (REG_P (src) && REG_P (dst)
1698 	  && REGNO (src) == REGNO (dst));
1699 }
1700 
1701 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1702    value to itself.  */
1703 
1704 int
noop_move_p(const rtx_insn * insn)1705 noop_move_p (const rtx_insn *insn)
1706 {
1707   rtx pat = PATTERN (insn);
1708 
1709   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1710     return 1;
1711 
1712   /* Check the code to be executed for COND_EXEC.  */
1713   if (GET_CODE (pat) == COND_EXEC)
1714     pat = COND_EXEC_CODE (pat);
1715 
1716   if (GET_CODE (pat) == SET && set_noop_p (pat))
1717     return 1;
1718 
1719   if (GET_CODE (pat) == PARALLEL)
1720     {
1721       int i;
1722       /* If nothing but SETs of registers to themselves,
1723 	 this insn can also be deleted.  */
1724       for (i = 0; i < XVECLEN (pat, 0); i++)
1725 	{
1726 	  rtx tem = XVECEXP (pat, 0, i);
1727 
1728 	  if (GET_CODE (tem) == USE || GET_CODE (tem) == CLOBBER)
1729 	    continue;
1730 
1731 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1732 	    return 0;
1733 	}
1734 
1735       return 1;
1736     }
1737   return 0;
1738 }
1739 
1740 
1741 /* Return nonzero if register in range [REGNO, ENDREGNO)
1742    appears either explicitly or implicitly in X
1743    other than being stored into.
1744 
1745    References contained within the substructure at LOC do not count.
1746    LOC may be zero, meaning don't ignore anything.  */
1747 
1748 bool
refers_to_regno_p(unsigned int regno,unsigned int endregno,const_rtx x,rtx * loc)1749 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1750 		   rtx *loc)
1751 {
1752   int i;
1753   unsigned int x_regno;
1754   RTX_CODE code;
1755   const char *fmt;
1756 
1757  repeat:
1758   /* The contents of a REG_NONNEG note is always zero, so we must come here
1759      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1760   if (x == 0)
1761     return false;
1762 
1763   code = GET_CODE (x);
1764 
1765   switch (code)
1766     {
1767     case REG:
1768       x_regno = REGNO (x);
1769 
1770       /* If we modifying the stack, frame, or argument pointer, it will
1771 	 clobber a virtual register.  In fact, we could be more precise,
1772 	 but it isn't worth it.  */
1773       if ((x_regno == STACK_POINTER_REGNUM
1774 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1775 	       && x_regno == ARG_POINTER_REGNUM)
1776 	   || x_regno == FRAME_POINTER_REGNUM)
1777 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1778 	return true;
1779 
1780       return endregno > x_regno && regno < END_REGNO (x);
1781 
1782     case SUBREG:
1783       /* If this is a SUBREG of a hard reg, we can see exactly which
1784 	 registers are being modified.  Otherwise, handle normally.  */
1785       if (REG_P (SUBREG_REG (x))
1786 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1787 	{
1788 	  unsigned int inner_regno = subreg_regno (x);
1789 	  unsigned int inner_endregno
1790 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1791 			     ? subreg_nregs (x) : 1);
1792 
1793 	  return endregno > inner_regno && regno < inner_endregno;
1794 	}
1795       break;
1796 
1797     case CLOBBER:
1798     case SET:
1799       if (&SET_DEST (x) != loc
1800 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1801 	     a pseudo but not for hard registers since we can
1802 	     treat each word individually.  */
1803 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1804 	       && loc != &SUBREG_REG (SET_DEST (x))
1805 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1806 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1807 	       && refers_to_regno_p (regno, endregno,
1808 				     SUBREG_REG (SET_DEST (x)), loc))
1809 	      || (!REG_P (SET_DEST (x))
1810 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1811 	return true;
1812 
1813       if (code == CLOBBER || loc == &SET_SRC (x))
1814 	return false;
1815       x = SET_SRC (x);
1816       goto repeat;
1817 
1818     default:
1819       break;
1820     }
1821 
1822   /* X does not match, so try its subexpressions.  */
1823 
1824   fmt = GET_RTX_FORMAT (code);
1825   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1826     {
1827       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1828 	{
1829 	  if (i == 0)
1830 	    {
1831 	      x = XEXP (x, 0);
1832 	      goto repeat;
1833 	    }
1834 	  else
1835 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1836 	      return true;
1837 	}
1838       else if (fmt[i] == 'E')
1839 	{
1840 	  int j;
1841 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1842 	    if (loc != &XVECEXP (x, i, j)
1843 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1844 	      return true;
1845 	}
1846     }
1847   return false;
1848 }
1849 
1850 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1851    we check if any register number in X conflicts with the relevant register
1852    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1853    contains a MEM (we don't bother checking for memory addresses that can't
1854    conflict because we expect this to be a rare case.  */
1855 
1856 int
reg_overlap_mentioned_p(const_rtx x,const_rtx in)1857 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1858 {
1859   unsigned int regno, endregno;
1860 
1861   /* If either argument is a constant, then modifying X cannot
1862      affect IN.  Here we look at IN, we can profitably combine
1863      CONSTANT_P (x) with the switch statement below.  */
1864   if (CONSTANT_P (in))
1865     return 0;
1866 
1867  recurse:
1868   switch (GET_CODE (x))
1869     {
1870     case CLOBBER:
1871     case STRICT_LOW_PART:
1872     case ZERO_EXTRACT:
1873     case SIGN_EXTRACT:
1874       /* Overly conservative.  */
1875       x = XEXP (x, 0);
1876       goto recurse;
1877 
1878     case SUBREG:
1879       regno = REGNO (SUBREG_REG (x));
1880       if (regno < FIRST_PSEUDO_REGISTER)
1881 	regno = subreg_regno (x);
1882       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1883 			  ? subreg_nregs (x) : 1);
1884       goto do_reg;
1885 
1886     case REG:
1887       regno = REGNO (x);
1888       endregno = END_REGNO (x);
1889     do_reg:
1890       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1891 
1892     case MEM:
1893       {
1894 	const char *fmt;
1895 	int i;
1896 
1897 	if (MEM_P (in))
1898 	  return 1;
1899 
1900 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1901 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1902 	  if (fmt[i] == 'e')
1903 	    {
1904 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1905 		return 1;
1906 	    }
1907 	  else if (fmt[i] == 'E')
1908 	    {
1909 	      int j;
1910 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1911 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1912 		  return 1;
1913 	    }
1914 
1915 	return 0;
1916       }
1917 
1918     case SCRATCH:
1919     case PC:
1920     case CC0:
1921       return reg_mentioned_p (x, in);
1922 
1923     case PARALLEL:
1924       {
1925 	int i;
1926 
1927 	/* If any register in here refers to it we return true.  */
1928 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1929 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1930 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1931 	    return 1;
1932 	return 0;
1933       }
1934 
1935     default:
1936       gcc_assert (CONSTANT_P (x));
1937       return 0;
1938     }
1939 }
1940 
1941 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1942    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1943    ignored by note_stores, but passed to FUN.
1944 
1945    FUN receives three arguments:
1946    1. the REG, MEM, CC0 or PC being stored in or clobbered,
1947    2. the SET or CLOBBER rtx that does the store,
1948    3. the pointer DATA provided to note_stores.
1949 
1950   If the item being stored in or clobbered is a SUBREG of a hard register,
1951   the SUBREG will be passed.  */
1952 
1953 void
note_pattern_stores(const_rtx x,void (* fun)(rtx,const_rtx,void *),void * data)1954 note_pattern_stores (const_rtx x,
1955 		     void (*fun) (rtx, const_rtx, void *), void *data)
1956 {
1957   int i;
1958 
1959   if (GET_CODE (x) == COND_EXEC)
1960     x = COND_EXEC_CODE (x);
1961 
1962   if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1963     {
1964       rtx dest = SET_DEST (x);
1965 
1966       while ((GET_CODE (dest) == SUBREG
1967 	      && (!REG_P (SUBREG_REG (dest))
1968 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1969 	     || GET_CODE (dest) == ZERO_EXTRACT
1970 	     || GET_CODE (dest) == STRICT_LOW_PART)
1971 	dest = XEXP (dest, 0);
1972 
1973       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1974 	 each of whose first operand is a register.  */
1975       if (GET_CODE (dest) == PARALLEL)
1976 	{
1977 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1978 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1979 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1980 	}
1981       else
1982 	(*fun) (dest, x, data);
1983     }
1984 
1985   else if (GET_CODE (x) == PARALLEL)
1986     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1987       note_pattern_stores (XVECEXP (x, 0, i), fun, data);
1988 }
1989 
1990 /* Same, but for an instruction.  If the instruction is a call, include
1991    any CLOBBERs in its CALL_INSN_FUNCTION_USAGE.  */
1992 
1993 void
note_stores(const rtx_insn * insn,void (* fun)(rtx,const_rtx,void *),void * data)1994 note_stores (const rtx_insn *insn,
1995 	     void (*fun) (rtx, const_rtx, void *), void *data)
1996 {
1997   if (CALL_P (insn))
1998     for (rtx link = CALL_INSN_FUNCTION_USAGE (insn);
1999 	 link; link = XEXP (link, 1))
2000       if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2001 	note_pattern_stores (XEXP (link, 0), fun, data);
2002   note_pattern_stores (PATTERN (insn), fun, data);
2003 }
2004 
2005 /* Like notes_stores, but call FUN for each expression that is being
2006    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
2007    FUN for each expression, not any interior subexpressions.  FUN receives a
2008    pointer to the expression and the DATA passed to this function.
2009 
2010    Note that this is not quite the same test as that done in reg_referenced_p
2011    since that considers something as being referenced if it is being
2012    partially set, while we do not.  */
2013 
2014 void
note_uses(rtx * pbody,void (* fun)(rtx *,void *),void * data)2015 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
2016 {
2017   rtx body = *pbody;
2018   int i;
2019 
2020   switch (GET_CODE (body))
2021     {
2022     case COND_EXEC:
2023       (*fun) (&COND_EXEC_TEST (body), data);
2024       note_uses (&COND_EXEC_CODE (body), fun, data);
2025       return;
2026 
2027     case PARALLEL:
2028       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2029 	note_uses (&XVECEXP (body, 0, i), fun, data);
2030       return;
2031 
2032     case SEQUENCE:
2033       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2034 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
2035       return;
2036 
2037     case USE:
2038       (*fun) (&XEXP (body, 0), data);
2039       return;
2040 
2041     case ASM_OPERANDS:
2042       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
2043 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
2044       return;
2045 
2046     case TRAP_IF:
2047       (*fun) (&TRAP_CONDITION (body), data);
2048       return;
2049 
2050     case PREFETCH:
2051       (*fun) (&XEXP (body, 0), data);
2052       return;
2053 
2054     case UNSPEC:
2055     case UNSPEC_VOLATILE:
2056       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
2057 	(*fun) (&XVECEXP (body, 0, i), data);
2058       return;
2059 
2060     case CLOBBER:
2061       if (MEM_P (XEXP (body, 0)))
2062 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
2063       return;
2064 
2065     case SET:
2066       {
2067 	rtx dest = SET_DEST (body);
2068 
2069 	/* For sets we replace everything in source plus registers in memory
2070 	   expression in store and operands of a ZERO_EXTRACT.  */
2071 	(*fun) (&SET_SRC (body), data);
2072 
2073 	if (GET_CODE (dest) == ZERO_EXTRACT)
2074 	  {
2075 	    (*fun) (&XEXP (dest, 1), data);
2076 	    (*fun) (&XEXP (dest, 2), data);
2077 	  }
2078 
2079 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2080 	  dest = XEXP (dest, 0);
2081 
2082 	if (MEM_P (dest))
2083 	  (*fun) (&XEXP (dest, 0), data);
2084       }
2085       return;
2086 
2087     default:
2088       /* All the other possibilities never store.  */
2089       (*fun) (pbody, data);
2090       return;
2091     }
2092 }
2093 
2094 /* Try to add a description of REG X to this object, stopping once
2095    the REF_END limit has been reached.  FLAGS is a bitmask of
2096    rtx_obj_reference flags that describe the context.  */
2097 
2098 void
try_to_add_reg(const_rtx x,unsigned int flags)2099 rtx_properties::try_to_add_reg (const_rtx x, unsigned int flags)
2100 {
2101   if (REG_NREGS (x) != 1)
2102     flags |= rtx_obj_flags::IS_MULTIREG;
2103   machine_mode mode = GET_MODE (x);
2104   unsigned int start_regno = REGNO (x);
2105   unsigned int end_regno = END_REGNO (x);
2106   for (unsigned int regno = start_regno; regno < end_regno; ++regno)
2107     if (ref_iter != ref_end)
2108       *ref_iter++ = rtx_obj_reference (regno, flags, mode,
2109 				       regno - start_regno);
2110 }
2111 
2112 /* Add a description of destination X to this object.  FLAGS is a bitmask
2113    of rtx_obj_reference flags that describe the context.
2114 
2115    This routine accepts all rtxes that can legitimately appear in a
2116    SET_DEST.  */
2117 
2118 void
try_to_add_dest(const_rtx x,unsigned int flags)2119 rtx_properties::try_to_add_dest (const_rtx x, unsigned int flags)
2120 {
2121   /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
2122      each of whose first operand is a register.  */
2123   if (__builtin_expect (GET_CODE (x) == PARALLEL, 0))
2124     {
2125       for (int i = XVECLEN (x, 0) - 1; i >= 0; --i)
2126 	if (rtx dest = XEXP (XVECEXP (x, 0, i), 0))
2127 	  try_to_add_dest (dest, flags);
2128       return;
2129     }
2130 
2131   unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2132   flags |= rtx_obj_flags::IS_WRITE;
2133   for (;;)
2134     if (GET_CODE (x) == ZERO_EXTRACT)
2135       {
2136 	try_to_add_src (XEXP (x, 1), base_flags);
2137 	try_to_add_src (XEXP (x, 2), base_flags);
2138 	flags |= rtx_obj_flags::IS_READ;
2139 	x = XEXP (x, 0);
2140       }
2141     else if (GET_CODE (x) == STRICT_LOW_PART)
2142       {
2143 	flags |= rtx_obj_flags::IS_READ;
2144 	x = XEXP (x, 0);
2145       }
2146     else if (GET_CODE (x) == SUBREG)
2147       {
2148 	flags |= rtx_obj_flags::IN_SUBREG;
2149 	if (read_modify_subreg_p (x))
2150 	  flags |= rtx_obj_flags::IS_READ;
2151 	x = SUBREG_REG (x);
2152       }
2153     else
2154       break;
2155 
2156   if (MEM_P (x))
2157     {
2158       if (ref_iter != ref_end)
2159 	*ref_iter++ = rtx_obj_reference (MEM_REGNO, flags, GET_MODE (x));
2160 
2161       unsigned int addr_flags = base_flags | rtx_obj_flags::IN_MEM_STORE;
2162       if (flags & rtx_obj_flags::IS_READ)
2163 	addr_flags |= rtx_obj_flags::IN_MEM_LOAD;
2164       try_to_add_src (XEXP (x, 0), addr_flags);
2165       return;
2166     }
2167 
2168   if (__builtin_expect (REG_P (x), 1))
2169     {
2170       /* We want to keep sp alive everywhere -  by making all
2171 	 writes to sp also use sp. */
2172       if (REGNO (x) == STACK_POINTER_REGNUM)
2173 	flags |= rtx_obj_flags::IS_READ;
2174       try_to_add_reg (x, flags);
2175       return;
2176     }
2177 }
2178 
2179 /* Try to add a description of source X to this object, stopping once
2180    the REF_END limit has been reached.  FLAGS is a bitmask of
2181    rtx_obj_reference flags that describe the context.
2182 
2183    This routine accepts all rtxes that can legitimately appear in a SET_SRC.  */
2184 
2185 void
try_to_add_src(const_rtx x,unsigned int flags)2186 rtx_properties::try_to_add_src (const_rtx x, unsigned int flags)
2187 {
2188   unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2189   subrtx_iterator::array_type array;
2190   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
2191     {
2192       const_rtx x = *iter;
2193       rtx_code code = GET_CODE (x);
2194       if (code == REG)
2195 	try_to_add_reg (x, flags | rtx_obj_flags::IS_READ);
2196       else if (code == MEM)
2197 	{
2198 	  if (MEM_VOLATILE_P (x))
2199 	    has_volatile_refs = true;
2200 
2201 	  if (!MEM_READONLY_P (x) && ref_iter != ref_end)
2202 	    {
2203 	      auto mem_flags = flags | rtx_obj_flags::IS_READ;
2204 	      *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags,
2205 					       GET_MODE (x));
2206 	    }
2207 
2208 	  try_to_add_src (XEXP (x, 0),
2209 			  base_flags | rtx_obj_flags::IN_MEM_LOAD);
2210 	  iter.skip_subrtxes ();
2211 	}
2212       else if (code == SUBREG)
2213 	{
2214 	  try_to_add_src (SUBREG_REG (x), flags | rtx_obj_flags::IN_SUBREG);
2215 	  iter.skip_subrtxes ();
2216 	}
2217       else if (code == UNSPEC_VOLATILE)
2218 	has_volatile_refs = true;
2219       else if (code == ASM_INPUT || code == ASM_OPERANDS)
2220 	{
2221 	  has_asm = true;
2222 	  if (MEM_VOLATILE_P (x))
2223 	    has_volatile_refs = true;
2224 	}
2225       else if (code == PRE_INC
2226 	       || code == PRE_DEC
2227 	       || code == POST_INC
2228 	       || code == POST_DEC
2229 	       || code == PRE_MODIFY
2230 	       || code == POST_MODIFY)
2231 	{
2232 	  has_pre_post_modify = true;
2233 
2234 	  unsigned int addr_flags = (base_flags
2235 				     | rtx_obj_flags::IS_PRE_POST_MODIFY
2236 				     | rtx_obj_flags::IS_READ);
2237 	  try_to_add_dest (XEXP (x, 0), addr_flags);
2238 	  if (code == PRE_MODIFY || code == POST_MODIFY)
2239 	    iter.substitute (XEXP (XEXP (x, 1), 1));
2240 	  else
2241 	    iter.skip_subrtxes ();
2242 	}
2243       else if (code == CALL)
2244 	has_call = true;
2245     }
2246 }
2247 
2248 /* Try to add a description of instruction pattern PAT to this object,
2249    stopping once the REF_END limit has been reached.  */
2250 
2251 void
try_to_add_pattern(const_rtx pat)2252 rtx_properties::try_to_add_pattern (const_rtx pat)
2253 {
2254   switch (GET_CODE (pat))
2255     {
2256     case COND_EXEC:
2257       try_to_add_src (COND_EXEC_TEST (pat));
2258       try_to_add_pattern (COND_EXEC_CODE (pat));
2259       break;
2260 
2261     case PARALLEL:
2262       {
2263 	int last = XVECLEN (pat, 0) - 1;
2264 	for (int i = 0; i < last; ++i)
2265 	  try_to_add_pattern (XVECEXP (pat, 0, i));
2266 	try_to_add_pattern (XVECEXP (pat, 0, last));
2267 	break;
2268       }
2269 
2270     case ASM_OPERANDS:
2271       for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (pat); i < len; ++i)
2272 	try_to_add_src (ASM_OPERANDS_INPUT (pat, i));
2273       break;
2274 
2275     case CLOBBER:
2276       try_to_add_dest (XEXP (pat, 0), rtx_obj_flags::IS_CLOBBER);
2277       break;
2278 
2279     case SET:
2280       try_to_add_dest (SET_DEST (pat));
2281       try_to_add_src (SET_SRC (pat));
2282       break;
2283 
2284     default:
2285       /* All the other possibilities never store and can use a normal
2286 	 rtx walk.  This includes:
2287 
2288 	 - USE
2289 	 - TRAP_IF
2290 	 - PREFETCH
2291 	 - UNSPEC
2292 	 - UNSPEC_VOLATILE.  */
2293       try_to_add_src (pat);
2294       break;
2295     }
2296 }
2297 
2298 /* Try to add a description of INSN to this object, stopping once
2299    the REF_END limit has been reached.  INCLUDE_NOTES is true if the
2300    description should include REG_EQUAL and REG_EQUIV notes; all such
2301    references will then be marked with rtx_obj_flags::IN_NOTE.
2302 
2303    For calls, this description includes all accesses in
2304    CALL_INSN_FUNCTION_USAGE.  It also include all implicit accesses
2305    to global registers by the target function.  However, it does not
2306    include clobbers performed by the target function; callers that want
2307    this information should instead use the function_abi interface.  */
2308 
2309 void
try_to_add_insn(const rtx_insn * insn,bool include_notes)2310 rtx_properties::try_to_add_insn (const rtx_insn *insn, bool include_notes)
2311 {
2312   if (CALL_P (insn))
2313     {
2314       /* Non-const functions can read from global registers.  Impure
2315 	 functions can also set them.
2316 
2317 	 Adding the global registers first removes a situation in which
2318 	 a fixed-form clobber of register R could come before a real set
2319 	 of register R.  */
2320       if (!hard_reg_set_empty_p (global_reg_set)
2321 	  && !RTL_CONST_CALL_P (insn))
2322 	{
2323 	  unsigned int flags = rtx_obj_flags::IS_READ;
2324 	  if (!RTL_PURE_CALL_P (insn))
2325 	    flags |= rtx_obj_flags::IS_WRITE;
2326 	  for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2327 	    /* As a special case, the stack pointer is invariant across calls
2328 	       even if it has been marked global; see the corresponding
2329 	       handling in df_get_call_refs.  */
2330 	    if (regno != STACK_POINTER_REGNUM
2331 		&& global_regs[regno]
2332 		&& ref_iter != ref_end)
2333 	      *ref_iter++ = rtx_obj_reference (regno, flags,
2334 					       reg_raw_mode[regno], 0);
2335 	}
2336       /* Untyped calls implicitly set all function value registers.
2337 	 Again, we add them first in case the main pattern contains
2338 	 a fixed-form clobber.  */
2339       if (find_reg_note (insn, REG_UNTYPED_CALL, NULL_RTX))
2340 	for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
2341 	  if (targetm.calls.function_value_regno_p (regno)
2342 	      && ref_iter != ref_end)
2343 	    *ref_iter++ = rtx_obj_reference (regno, rtx_obj_flags::IS_WRITE,
2344 					     reg_raw_mode[regno], 0);
2345       if (ref_iter != ref_end && !RTL_CONST_CALL_P (insn))
2346 	{
2347 	  auto mem_flags = rtx_obj_flags::IS_READ;
2348 	  if (!RTL_PURE_CALL_P (insn))
2349 	    mem_flags |= rtx_obj_flags::IS_WRITE;
2350 	  *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags, BLKmode);
2351 	}
2352       try_to_add_pattern (PATTERN (insn));
2353       for (rtx link = CALL_INSN_FUNCTION_USAGE (insn); link;
2354 	   link = XEXP (link, 1))
2355 	{
2356 	  rtx x = XEXP (link, 0);
2357 	  if (GET_CODE (x) == CLOBBER)
2358 	    try_to_add_dest (XEXP (x, 0), rtx_obj_flags::IS_CLOBBER);
2359 	  else if (GET_CODE (x) == USE)
2360 	    try_to_add_src (XEXP (x, 0));
2361 	}
2362     }
2363   else
2364     try_to_add_pattern (PATTERN (insn));
2365 
2366   if (include_notes)
2367     for (rtx note = REG_NOTES (insn); note; note = XEXP (note, 1))
2368       if (REG_NOTE_KIND (note) == REG_EQUAL
2369 	  || REG_NOTE_KIND (note) == REG_EQUIV)
2370 	try_to_add_note (XEXP (note, 0));
2371 }
2372 
2373 /* Grow the storage by a bit while keeping the contents of the first
2374    START elements.  */
2375 
2376 void
grow(ptrdiff_t start)2377 vec_rtx_properties_base::grow (ptrdiff_t start)
2378 {
2379   /* The same heuristic that vec uses.  */
2380   ptrdiff_t new_elems = (ref_end - ref_begin) * 3 / 2;
2381   if (ref_begin == m_storage)
2382     {
2383       ref_begin = XNEWVEC (rtx_obj_reference, new_elems);
2384       if (start)
2385 	memcpy (ref_begin, m_storage, start * sizeof (rtx_obj_reference));
2386     }
2387   else
2388     ref_begin = reinterpret_cast<rtx_obj_reference *>
2389       (xrealloc (ref_begin, new_elems * sizeof (rtx_obj_reference)));
2390   ref_iter = ref_begin + start;
2391   ref_end = ref_begin + new_elems;
2392 }
2393 
2394 /* Return nonzero if X's old contents don't survive after INSN.
2395    This will be true if X is (cc0) or if X is a register and
2396    X dies in INSN or because INSN entirely sets X.
2397 
2398    "Entirely set" means set directly and not through a SUBREG, or
2399    ZERO_EXTRACT, so no trace of the old contents remains.
2400    Likewise, REG_INC does not count.
2401 
2402    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
2403    but for this use that makes no difference, since regs don't overlap
2404    during their lifetimes.  Therefore, this function may be used
2405    at any time after deaths have been computed.
2406 
2407    If REG is a hard reg that occupies multiple machine registers, this
2408    function will only return 1 if each of those registers will be replaced
2409    by INSN.  */
2410 
2411 int
dead_or_set_p(const rtx_insn * insn,const_rtx x)2412 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2413 {
2414   unsigned int regno, end_regno;
2415   unsigned int i;
2416 
2417   /* Can't use cc0_rtx below since this file is used by genattrtab.c.  */
2418   if (GET_CODE (x) == CC0)
2419     return 1;
2420 
2421   gcc_assert (REG_P (x));
2422 
2423   regno = REGNO (x);
2424   end_regno = END_REGNO (x);
2425   for (i = regno; i < end_regno; i++)
2426     if (! dead_or_set_regno_p (insn, i))
2427       return 0;
2428 
2429   return 1;
2430 }
2431 
2432 /* Return TRUE iff DEST is a register or subreg of a register, is a
2433    complete rather than read-modify-write destination, and contains
2434    register TEST_REGNO.  */
2435 
2436 static bool
covers_regno_no_parallel_p(const_rtx dest,unsigned int test_regno)2437 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2438 {
2439   unsigned int regno, endregno;
2440 
2441   if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2442     dest = SUBREG_REG (dest);
2443 
2444   if (!REG_P (dest))
2445     return false;
2446 
2447   regno = REGNO (dest);
2448   endregno = END_REGNO (dest);
2449   return (test_regno >= regno && test_regno < endregno);
2450 }
2451 
2452 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2453    any member matches the covers_regno_no_parallel_p criteria.  */
2454 
2455 static bool
covers_regno_p(const_rtx dest,unsigned int test_regno)2456 covers_regno_p (const_rtx dest, unsigned int test_regno)
2457 {
2458   if (GET_CODE (dest) == PARALLEL)
2459     {
2460       /* Some targets place small structures in registers for return
2461 	 values of functions, and those registers are wrapped in
2462 	 PARALLELs that we may see as the destination of a SET.  */
2463       int i;
2464 
2465       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2466 	{
2467 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2468 	  if (inner != NULL_RTX
2469 	      && covers_regno_no_parallel_p (inner, test_regno))
2470 	    return true;
2471 	}
2472 
2473       return false;
2474     }
2475   else
2476     return covers_regno_no_parallel_p (dest, test_regno);
2477 }
2478 
2479 /* Utility function for dead_or_set_p to check an individual register. */
2480 
2481 int
dead_or_set_regno_p(const rtx_insn * insn,unsigned int test_regno)2482 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2483 {
2484   const_rtx pattern;
2485 
2486   /* See if there is a death note for something that includes TEST_REGNO.  */
2487   if (find_regno_note (insn, REG_DEAD, test_regno))
2488     return 1;
2489 
2490   if (CALL_P (insn)
2491       && find_regno_fusage (insn, CLOBBER, test_regno))
2492     return 1;
2493 
2494   pattern = PATTERN (insn);
2495 
2496   /* If a COND_EXEC is not executed, the value survives.  */
2497   if (GET_CODE (pattern) == COND_EXEC)
2498     return 0;
2499 
2500   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2501     return covers_regno_p (SET_DEST (pattern), test_regno);
2502   else if (GET_CODE (pattern) == PARALLEL)
2503     {
2504       int i;
2505 
2506       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2507 	{
2508 	  rtx body = XVECEXP (pattern, 0, i);
2509 
2510 	  if (GET_CODE (body) == COND_EXEC)
2511 	    body = COND_EXEC_CODE (body);
2512 
2513 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2514 	      && covers_regno_p (SET_DEST (body), test_regno))
2515 	    return 1;
2516 	}
2517     }
2518 
2519   return 0;
2520 }
2521 
2522 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2523    If DATUM is nonzero, look for one whose datum is DATUM.  */
2524 
2525 rtx
find_reg_note(const_rtx insn,enum reg_note kind,const_rtx datum)2526 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2527 {
2528   rtx link;
2529 
2530   gcc_checking_assert (insn);
2531 
2532   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2533   if (! INSN_P (insn))
2534     return 0;
2535   if (datum == 0)
2536     {
2537       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2538 	if (REG_NOTE_KIND (link) == kind)
2539 	  return link;
2540       return 0;
2541     }
2542 
2543   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2544     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2545       return link;
2546   return 0;
2547 }
2548 
2549 /* Return the reg-note of kind KIND in insn INSN which applies to register
2550    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2551    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2552    it might be the case that the note overlaps REGNO.  */
2553 
2554 rtx
find_regno_note(const_rtx insn,enum reg_note kind,unsigned int regno)2555 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2556 {
2557   rtx link;
2558 
2559   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2560   if (! INSN_P (insn))
2561     return 0;
2562 
2563   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2564     if (REG_NOTE_KIND (link) == kind
2565 	/* Verify that it is a register, so that scratch and MEM won't cause a
2566 	   problem here.  */
2567 	&& REG_P (XEXP (link, 0))
2568 	&& REGNO (XEXP (link, 0)) <= regno
2569 	&& END_REGNO (XEXP (link, 0)) > regno)
2570       return link;
2571   return 0;
2572 }
2573 
2574 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2575    has such a note.  */
2576 
2577 rtx
find_reg_equal_equiv_note(const_rtx insn)2578 find_reg_equal_equiv_note (const_rtx insn)
2579 {
2580   rtx link;
2581 
2582   if (!INSN_P (insn))
2583     return 0;
2584 
2585   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2586     if (REG_NOTE_KIND (link) == REG_EQUAL
2587 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2588       {
2589 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2590 	   insns that have multiple sets.  Checking single_set to
2591 	   make sure of this is not the proper check, as explained
2592 	   in the comment in set_unique_reg_note.
2593 
2594 	   This should be changed into an assert.  */
2595 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2596 	  return 0;
2597 	return link;
2598       }
2599   return NULL;
2600 }
2601 
2602 /* Check whether INSN is a single_set whose source is known to be
2603    equivalent to a constant.  Return that constant if so, otherwise
2604    return null.  */
2605 
2606 rtx
find_constant_src(const rtx_insn * insn)2607 find_constant_src (const rtx_insn *insn)
2608 {
2609   rtx note, set, x;
2610 
2611   set = single_set (insn);
2612   if (set)
2613     {
2614       x = avoid_constant_pool_reference (SET_SRC (set));
2615       if (CONSTANT_P (x))
2616 	return x;
2617     }
2618 
2619   note = find_reg_equal_equiv_note (insn);
2620   if (note && CONSTANT_P (XEXP (note, 0)))
2621     return XEXP (note, 0);
2622 
2623   return NULL_RTX;
2624 }
2625 
2626 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2627    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2628 
2629 int
find_reg_fusage(const_rtx insn,enum rtx_code code,const_rtx datum)2630 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2631 {
2632   /* If it's not a CALL_INSN, it can't possibly have a
2633      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2634   if (!CALL_P (insn))
2635     return 0;
2636 
2637   gcc_assert (datum);
2638 
2639   if (!REG_P (datum))
2640     {
2641       rtx link;
2642 
2643       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2644 	   link;
2645 	   link = XEXP (link, 1))
2646 	if (GET_CODE (XEXP (link, 0)) == code
2647 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2648 	  return 1;
2649     }
2650   else
2651     {
2652       unsigned int regno = REGNO (datum);
2653 
2654       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2655 	 to pseudo registers, so don't bother checking.  */
2656 
2657       if (regno < FIRST_PSEUDO_REGISTER)
2658 	{
2659 	  unsigned int end_regno = END_REGNO (datum);
2660 	  unsigned int i;
2661 
2662 	  for (i = regno; i < end_regno; i++)
2663 	    if (find_regno_fusage (insn, code, i))
2664 	      return 1;
2665 	}
2666     }
2667 
2668   return 0;
2669 }
2670 
2671 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2672    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2673 
2674 int
find_regno_fusage(const_rtx insn,enum rtx_code code,unsigned int regno)2675 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2676 {
2677   rtx link;
2678 
2679   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2680      to pseudo registers, so don't bother checking.  */
2681 
2682   if (regno >= FIRST_PSEUDO_REGISTER
2683       || !CALL_P (insn) )
2684     return 0;
2685 
2686   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2687     {
2688       rtx op, reg;
2689 
2690       if (GET_CODE (op = XEXP (link, 0)) == code
2691 	  && REG_P (reg = XEXP (op, 0))
2692 	  && REGNO (reg) <= regno
2693 	  && END_REGNO (reg) > regno)
2694 	return 1;
2695     }
2696 
2697   return 0;
2698 }
2699 
2700 
2701 /* Return true if KIND is an integer REG_NOTE.  */
2702 
2703 static bool
int_reg_note_p(enum reg_note kind)2704 int_reg_note_p (enum reg_note kind)
2705 {
2706   return kind == REG_BR_PROB;
2707 }
2708 
2709 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2710    stored as the pointer to the next register note.  */
2711 
2712 rtx
alloc_reg_note(enum reg_note kind,rtx datum,rtx list)2713 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2714 {
2715   rtx note;
2716 
2717   gcc_checking_assert (!int_reg_note_p (kind));
2718   switch (kind)
2719     {
2720     case REG_CC_SETTER:
2721     case REG_CC_USER:
2722     case REG_LABEL_TARGET:
2723     case REG_LABEL_OPERAND:
2724     case REG_TM:
2725       /* These types of register notes use an INSN_LIST rather than an
2726 	 EXPR_LIST, so that copying is done right and dumps look
2727 	 better.  */
2728       note = alloc_INSN_LIST (datum, list);
2729       PUT_REG_NOTE_KIND (note, kind);
2730       break;
2731 
2732     default:
2733       note = alloc_EXPR_LIST (kind, datum, list);
2734       break;
2735     }
2736 
2737   return note;
2738 }
2739 
2740 /* Add register note with kind KIND and datum DATUM to INSN.  */
2741 
2742 void
add_reg_note(rtx insn,enum reg_note kind,rtx datum)2743 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2744 {
2745   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2746 }
2747 
2748 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2749 
2750 void
add_int_reg_note(rtx_insn * insn,enum reg_note kind,int datum)2751 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2752 {
2753   gcc_checking_assert (int_reg_note_p (kind));
2754   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2755 				       datum, REG_NOTES (insn));
2756 }
2757 
2758 /* Add a REG_ARGS_SIZE note to INSN with value VALUE.  */
2759 
2760 void
add_args_size_note(rtx_insn * insn,poly_int64 value)2761 add_args_size_note (rtx_insn *insn, poly_int64 value)
2762 {
2763   gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2764   add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2765 }
2766 
2767 /* Add a register note like NOTE to INSN.  */
2768 
2769 void
add_shallow_copy_of_reg_note(rtx_insn * insn,rtx note)2770 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2771 {
2772   if (GET_CODE (note) == INT_LIST)
2773     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2774   else
2775     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2776 }
2777 
2778 /* Duplicate NOTE and return the copy.  */
2779 rtx
duplicate_reg_note(rtx note)2780 duplicate_reg_note (rtx note)
2781 {
2782   reg_note kind = REG_NOTE_KIND (note);
2783 
2784   if (GET_CODE (note) == INT_LIST)
2785     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2786   else if (GET_CODE (note) == EXPR_LIST)
2787     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2788   else
2789     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2790 }
2791 
2792 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2793 
2794 void
remove_note(rtx_insn * insn,const_rtx note)2795 remove_note (rtx_insn *insn, const_rtx note)
2796 {
2797   rtx link;
2798 
2799   if (note == NULL_RTX)
2800     return;
2801 
2802   if (REG_NOTES (insn) == note)
2803     REG_NOTES (insn) = XEXP (note, 1);
2804   else
2805     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2806       if (XEXP (link, 1) == note)
2807 	{
2808 	  XEXP (link, 1) = XEXP (note, 1);
2809 	  break;
2810 	}
2811 
2812   switch (REG_NOTE_KIND (note))
2813     {
2814     case REG_EQUAL:
2815     case REG_EQUIV:
2816       df_notes_rescan (insn);
2817       break;
2818     default:
2819       break;
2820     }
2821 }
2822 
2823 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2824    If NO_RESCAN is false and any notes were removed, call
2825    df_notes_rescan.  Return true if any note has been removed.  */
2826 
2827 bool
remove_reg_equal_equiv_notes(rtx_insn * insn,bool no_rescan)2828 remove_reg_equal_equiv_notes (rtx_insn *insn, bool no_rescan)
2829 {
2830   rtx *loc;
2831   bool ret = false;
2832 
2833   loc = &REG_NOTES (insn);
2834   while (*loc)
2835     {
2836       enum reg_note kind = REG_NOTE_KIND (*loc);
2837       if (kind == REG_EQUAL || kind == REG_EQUIV)
2838 	{
2839 	  *loc = XEXP (*loc, 1);
2840 	  ret = true;
2841 	}
2842       else
2843 	loc = &XEXP (*loc, 1);
2844     }
2845   if (ret && !no_rescan)
2846     df_notes_rescan (insn);
2847   return ret;
2848 }
2849 
2850 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2851 
2852 void
remove_reg_equal_equiv_notes_for_regno(unsigned int regno)2853 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2854 {
2855   df_ref eq_use;
2856 
2857   if (!df)
2858     return;
2859 
2860   /* This loop is a little tricky.  We cannot just go down the chain because
2861      it is being modified by some actions in the loop.  So we just iterate
2862      over the head.  We plan to drain the list anyway.  */
2863   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2864     {
2865       rtx_insn *insn = DF_REF_INSN (eq_use);
2866       rtx note = find_reg_equal_equiv_note (insn);
2867 
2868       /* This assert is generally triggered when someone deletes a REG_EQUAL
2869 	 or REG_EQUIV note by hacking the list manually rather than calling
2870 	 remove_note.  */
2871       gcc_assert (note);
2872 
2873       remove_note (insn, note);
2874     }
2875 }
2876 
2877 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2878    return 1 if it is found.  A simple equality test is used to determine if
2879    NODE matches.  */
2880 
2881 bool
in_insn_list_p(const rtx_insn_list * listp,const rtx_insn * node)2882 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2883 {
2884   const_rtx x;
2885 
2886   for (x = listp; x; x = XEXP (x, 1))
2887     if (node == XEXP (x, 0))
2888       return true;
2889 
2890   return false;
2891 }
2892 
2893 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2894    remove that entry from the list if it is found.
2895 
2896    A simple equality test is used to determine if NODE matches.  */
2897 
2898 void
remove_node_from_expr_list(const_rtx node,rtx_expr_list ** listp)2899 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2900 {
2901   rtx_expr_list *temp = *listp;
2902   rtx_expr_list *prev = NULL;
2903 
2904   while (temp)
2905     {
2906       if (node == temp->element ())
2907 	{
2908 	  /* Splice the node out of the list.  */
2909 	  if (prev)
2910 	    XEXP (prev, 1) = temp->next ();
2911 	  else
2912 	    *listp = temp->next ();
2913 
2914 	  return;
2915 	}
2916 
2917       prev = temp;
2918       temp = temp->next ();
2919     }
2920 }
2921 
2922 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2923    remove that entry from the list if it is found.
2924 
2925    A simple equality test is used to determine if NODE matches.  */
2926 
2927 void
remove_node_from_insn_list(const rtx_insn * node,rtx_insn_list ** listp)2928 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2929 {
2930   rtx_insn_list *temp = *listp;
2931   rtx_insn_list *prev = NULL;
2932 
2933   while (temp)
2934     {
2935       if (node == temp->insn ())
2936 	{
2937 	  /* Splice the node out of the list.  */
2938 	  if (prev)
2939 	    XEXP (prev, 1) = temp->next ();
2940 	  else
2941 	    *listp = temp->next ();
2942 
2943 	  return;
2944 	}
2945 
2946       prev = temp;
2947       temp = temp->next ();
2948     }
2949 }
2950 
2951 /* Nonzero if X contains any volatile instructions.  These are instructions
2952    which may cause unpredictable machine state instructions, and thus no
2953    instructions or register uses should be moved or combined across them.
2954    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2955 
2956 int
volatile_insn_p(const_rtx x)2957 volatile_insn_p (const_rtx x)
2958 {
2959   const RTX_CODE code = GET_CODE (x);
2960   switch (code)
2961     {
2962     case LABEL_REF:
2963     case SYMBOL_REF:
2964     case CONST:
2965     CASE_CONST_ANY:
2966     case CC0:
2967     case PC:
2968     case REG:
2969     case SCRATCH:
2970     case CLOBBER:
2971     case ADDR_VEC:
2972     case ADDR_DIFF_VEC:
2973     case CALL:
2974     case MEM:
2975       return 0;
2976 
2977     case UNSPEC_VOLATILE:
2978       return 1;
2979 
2980     case ASM_INPUT:
2981     case ASM_OPERANDS:
2982       if (MEM_VOLATILE_P (x))
2983 	return 1;
2984 
2985     default:
2986       break;
2987     }
2988 
2989   /* Recursively scan the operands of this expression.  */
2990 
2991   {
2992     const char *const fmt = GET_RTX_FORMAT (code);
2993     int i;
2994 
2995     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2996       {
2997 	if (fmt[i] == 'e')
2998 	  {
2999 	    if (volatile_insn_p (XEXP (x, i)))
3000 	      return 1;
3001 	  }
3002 	else if (fmt[i] == 'E')
3003 	  {
3004 	    int j;
3005 	    for (j = 0; j < XVECLEN (x, i); j++)
3006 	      if (volatile_insn_p (XVECEXP (x, i, j)))
3007 		return 1;
3008 	  }
3009       }
3010   }
3011   return 0;
3012 }
3013 
3014 /* Nonzero if X contains any volatile memory references
3015    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
3016 
3017 int
volatile_refs_p(const_rtx x)3018 volatile_refs_p (const_rtx x)
3019 {
3020   const RTX_CODE code = GET_CODE (x);
3021   switch (code)
3022     {
3023     case LABEL_REF:
3024     case SYMBOL_REF:
3025     case CONST:
3026     CASE_CONST_ANY:
3027     case CC0:
3028     case PC:
3029     case REG:
3030     case SCRATCH:
3031     case CLOBBER:
3032     case ADDR_VEC:
3033     case ADDR_DIFF_VEC:
3034       return 0;
3035 
3036     case UNSPEC_VOLATILE:
3037       return 1;
3038 
3039     case MEM:
3040     case ASM_INPUT:
3041     case ASM_OPERANDS:
3042       if (MEM_VOLATILE_P (x))
3043 	return 1;
3044 
3045     default:
3046       break;
3047     }
3048 
3049   /* Recursively scan the operands of this expression.  */
3050 
3051   {
3052     const char *const fmt = GET_RTX_FORMAT (code);
3053     int i;
3054 
3055     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3056       {
3057 	if (fmt[i] == 'e')
3058 	  {
3059 	    if (volatile_refs_p (XEXP (x, i)))
3060 	      return 1;
3061 	  }
3062 	else if (fmt[i] == 'E')
3063 	  {
3064 	    int j;
3065 	    for (j = 0; j < XVECLEN (x, i); j++)
3066 	      if (volatile_refs_p (XVECEXP (x, i, j)))
3067 		return 1;
3068 	  }
3069       }
3070   }
3071   return 0;
3072 }
3073 
3074 /* Similar to above, except that it also rejects register pre- and post-
3075    incrementing.  */
3076 
3077 int
side_effects_p(const_rtx x)3078 side_effects_p (const_rtx x)
3079 {
3080   const RTX_CODE code = GET_CODE (x);
3081   switch (code)
3082     {
3083     case LABEL_REF:
3084     case SYMBOL_REF:
3085     case CONST:
3086     CASE_CONST_ANY:
3087     case CC0:
3088     case PC:
3089     case REG:
3090     case SCRATCH:
3091     case ADDR_VEC:
3092     case ADDR_DIFF_VEC:
3093     case VAR_LOCATION:
3094       return 0;
3095 
3096     case CLOBBER:
3097       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
3098 	 when some combination can't be done.  If we see one, don't think
3099 	 that we can simplify the expression.  */
3100       return (GET_MODE (x) != VOIDmode);
3101 
3102     case PRE_INC:
3103     case PRE_DEC:
3104     case POST_INC:
3105     case POST_DEC:
3106     case PRE_MODIFY:
3107     case POST_MODIFY:
3108     case CALL:
3109     case UNSPEC_VOLATILE:
3110       return 1;
3111 
3112     case MEM:
3113     case ASM_INPUT:
3114     case ASM_OPERANDS:
3115       if (MEM_VOLATILE_P (x))
3116 	return 1;
3117 
3118     default:
3119       break;
3120     }
3121 
3122   /* Recursively scan the operands of this expression.  */
3123 
3124   {
3125     const char *fmt = GET_RTX_FORMAT (code);
3126     int i;
3127 
3128     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3129       {
3130 	if (fmt[i] == 'e')
3131 	  {
3132 	    if (side_effects_p (XEXP (x, i)))
3133 	      return 1;
3134 	  }
3135 	else if (fmt[i] == 'E')
3136 	  {
3137 	    int j;
3138 	    for (j = 0; j < XVECLEN (x, i); j++)
3139 	      if (side_effects_p (XVECEXP (x, i, j)))
3140 		return 1;
3141 	  }
3142       }
3143   }
3144   return 0;
3145 }
3146 
3147 /* Return nonzero if evaluating rtx X might cause a trap.
3148    FLAGS controls how to consider MEMs.  A nonzero means the context
3149    of the access may have changed from the original, such that the
3150    address may have become invalid.  */
3151 
3152 int
may_trap_p_1(const_rtx x,unsigned flags)3153 may_trap_p_1 (const_rtx x, unsigned flags)
3154 {
3155   int i;
3156   enum rtx_code code;
3157   const char *fmt;
3158 
3159   /* We make no distinction currently, but this function is part of
3160      the internal target-hooks ABI so we keep the parameter as
3161      "unsigned flags".  */
3162   bool code_changed = flags != 0;
3163 
3164   if (x == 0)
3165     return 0;
3166   code = GET_CODE (x);
3167   switch (code)
3168     {
3169       /* Handle these cases quickly.  */
3170     CASE_CONST_ANY:
3171     case SYMBOL_REF:
3172     case LABEL_REF:
3173     case CONST:
3174     case PC:
3175     case CC0:
3176     case REG:
3177     case SCRATCH:
3178       return 0;
3179 
3180     case UNSPEC:
3181       return targetm.unspec_may_trap_p (x, flags);
3182 
3183     case UNSPEC_VOLATILE:
3184     case ASM_INPUT:
3185     case TRAP_IF:
3186       return 1;
3187 
3188     case ASM_OPERANDS:
3189       return MEM_VOLATILE_P (x);
3190 
3191       /* Memory ref can trap unless it's a static var or a stack slot.  */
3192     case MEM:
3193       /* Recognize specific pattern of stack checking probes.  */
3194       if (flag_stack_check
3195 	  && MEM_VOLATILE_P (x)
3196 	  && XEXP (x, 0) == stack_pointer_rtx)
3197 	return 1;
3198       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
3199 	     reference; moving it out of context such as when moving code
3200 	     when optimizing, might cause its address to become invalid.  */
3201 	  code_changed
3202 	  || !MEM_NOTRAP_P (x))
3203 	{
3204 	  poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
3205 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
3206 					GET_MODE (x), code_changed);
3207 	}
3208 
3209       return 0;
3210 
3211       /* Division by a non-constant might trap.  */
3212     case DIV:
3213     case MOD:
3214     case UDIV:
3215     case UMOD:
3216       if (HONOR_SNANS (x))
3217 	return 1;
3218       if (FLOAT_MODE_P (GET_MODE (x)))
3219 	return flag_trapping_math;
3220       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
3221 	return 1;
3222       if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
3223 	{
3224 	  /* For CONST_VECTOR, return 1 if any element is or might be zero.  */
3225 	  unsigned int n_elts;
3226 	  rtx op = XEXP (x, 1);
3227 	  if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
3228 	    {
3229 	      if (!CONST_VECTOR_DUPLICATE_P (op))
3230 		return 1;
3231 	      for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
3232 		if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
3233 		  return 1;
3234 	    }
3235 	  else
3236 	    for (unsigned i = 0; i < n_elts; i++)
3237 	      if (CONST_VECTOR_ELT (op, i) == const0_rtx)
3238 		return 1;
3239 	}
3240       break;
3241 
3242     case EXPR_LIST:
3243       /* An EXPR_LIST is used to represent a function call.  This
3244 	 certainly may trap.  */
3245       return 1;
3246 
3247     case GE:
3248     case GT:
3249     case LE:
3250     case LT:
3251     case LTGT:
3252     case COMPARE:
3253       /* Some floating point comparisons may trap.  */
3254       if (!flag_trapping_math)
3255 	break;
3256       /* ??? There is no machine independent way to check for tests that trap
3257 	 when COMPARE is used, though many targets do make this distinction.
3258 	 For instance, sparc uses CCFPE for compares which generate exceptions
3259 	 and CCFP for compares which do not generate exceptions.  */
3260       if (HONOR_NANS (x))
3261 	return 1;
3262       /* But often the compare has some CC mode, so check operand
3263 	 modes as well.  */
3264       if (HONOR_NANS (XEXP (x, 0))
3265 	  || HONOR_NANS (XEXP (x, 1)))
3266 	return 1;
3267       break;
3268 
3269     case EQ:
3270     case NE:
3271       if (HONOR_SNANS (x))
3272 	return 1;
3273       /* Often comparison is CC mode, so check operand modes.  */
3274       if (HONOR_SNANS (XEXP (x, 0))
3275 	  || HONOR_SNANS (XEXP (x, 1)))
3276 	return 1;
3277       break;
3278 
3279     case FIX:
3280       /* Conversion of floating point might trap.  */
3281       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
3282 	return 1;
3283       break;
3284 
3285     case NEG:
3286     case ABS:
3287     case SUBREG:
3288     case VEC_MERGE:
3289     case VEC_SELECT:
3290     case VEC_CONCAT:
3291     case VEC_DUPLICATE:
3292       /* These operations don't trap even with floating point.  */
3293       break;
3294 
3295     default:
3296       /* Any floating arithmetic may trap.  */
3297       if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
3298 	return 1;
3299     }
3300 
3301   fmt = GET_RTX_FORMAT (code);
3302   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3303     {
3304       if (fmt[i] == 'e')
3305 	{
3306 	  if (may_trap_p_1 (XEXP (x, i), flags))
3307 	    return 1;
3308 	}
3309       else if (fmt[i] == 'E')
3310 	{
3311 	  int j;
3312 	  for (j = 0; j < XVECLEN (x, i); j++)
3313 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
3314 	      return 1;
3315 	}
3316     }
3317   return 0;
3318 }
3319 
3320 /* Return nonzero if evaluating rtx X might cause a trap.  */
3321 
3322 int
may_trap_p(const_rtx x)3323 may_trap_p (const_rtx x)
3324 {
3325   return may_trap_p_1 (x, 0);
3326 }
3327 
3328 /* Same as above, but additionally return nonzero if evaluating rtx X might
3329    cause a fault.  We define a fault for the purpose of this function as a
3330    erroneous execution condition that cannot be encountered during the normal
3331    execution of a valid program; the typical example is an unaligned memory
3332    access on a strict alignment machine.  The compiler guarantees that it
3333    doesn't generate code that will fault from a valid program, but this
3334    guarantee doesn't mean anything for individual instructions.  Consider
3335    the following example:
3336 
3337       struct S { int d; union { char *cp; int *ip; }; };
3338 
3339       int foo(struct S *s)
3340       {
3341 	if (s->d == 1)
3342 	  return *s->ip;
3343 	else
3344 	  return *s->cp;
3345       }
3346 
3347    on a strict alignment machine.  In a valid program, foo will never be
3348    invoked on a structure for which d is equal to 1 and the underlying
3349    unique field of the union not aligned on a 4-byte boundary, but the
3350    expression *s->ip might cause a fault if considered individually.
3351 
3352    At the RTL level, potentially problematic expressions will almost always
3353    verify may_trap_p; for example, the above dereference can be emitted as
3354    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3355    However, suppose that foo is inlined in a caller that causes s->cp to
3356    point to a local character variable and guarantees that s->d is not set
3357    to 1; foo may have been effectively translated into pseudo-RTL as:
3358 
3359       if ((reg:SI) == 1)
3360 	(set (reg:SI) (mem:SI (%fp - 7)))
3361       else
3362 	(set (reg:QI) (mem:QI (%fp - 7)))
3363 
3364    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3365    memory reference to a stack slot, but it will certainly cause a fault
3366    on a strict alignment machine.  */
3367 
3368 int
may_trap_or_fault_p(const_rtx x)3369 may_trap_or_fault_p (const_rtx x)
3370 {
3371   return may_trap_p_1 (x, 1);
3372 }
3373 
3374 /* Replace any occurrence of FROM in X with TO.  The function does
3375    not enter into CONST_DOUBLE for the replace.
3376 
3377    Note that copying is not done so X must not be shared unless all copies
3378    are to be modified.
3379 
3380    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3381    those pointer-equal ones.  */
3382 
3383 rtx
replace_rtx(rtx x,rtx from,rtx to,bool all_regs)3384 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3385 {
3386   int i, j;
3387   const char *fmt;
3388 
3389   if (x == from)
3390     return to;
3391 
3392   /* Allow this function to make replacements in EXPR_LISTs.  */
3393   if (x == 0)
3394     return 0;
3395 
3396   if (all_regs
3397       && REG_P (x)
3398       && REG_P (from)
3399       && REGNO (x) == REGNO (from))
3400     {
3401       gcc_assert (GET_MODE (x) == GET_MODE (from));
3402       return to;
3403     }
3404   else if (GET_CODE (x) == SUBREG)
3405     {
3406       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3407 
3408       if (CONST_INT_P (new_rtx))
3409 	{
3410 	  x = simplify_subreg (GET_MODE (x), new_rtx,
3411 			       GET_MODE (SUBREG_REG (x)),
3412 			       SUBREG_BYTE (x));
3413 	  gcc_assert (x);
3414 	}
3415       else
3416 	SUBREG_REG (x) = new_rtx;
3417 
3418       return x;
3419     }
3420   else if (GET_CODE (x) == ZERO_EXTEND)
3421     {
3422       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3423 
3424       if (CONST_INT_P (new_rtx))
3425 	{
3426 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3427 					new_rtx, GET_MODE (XEXP (x, 0)));
3428 	  gcc_assert (x);
3429 	}
3430       else
3431 	XEXP (x, 0) = new_rtx;
3432 
3433       return x;
3434     }
3435 
3436   fmt = GET_RTX_FORMAT (GET_CODE (x));
3437   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3438     {
3439       if (fmt[i] == 'e')
3440 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3441       else if (fmt[i] == 'E')
3442 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3443 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3444 					   from, to, all_regs);
3445     }
3446 
3447   return x;
3448 }
3449 
3450 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3451    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3452 
3453 void
replace_label(rtx * loc,rtx old_label,rtx new_label,bool update_label_nuses)3454 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3455 {
3456   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3457   rtx x = *loc;
3458   if (JUMP_TABLE_DATA_P (x))
3459     {
3460       x = PATTERN (x);
3461       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3462       int len = GET_NUM_ELEM (vec);
3463       for (int i = 0; i < len; ++i)
3464 	{
3465 	  rtx ref = RTVEC_ELT (vec, i);
3466 	  if (XEXP (ref, 0) == old_label)
3467 	    {
3468 	      XEXP (ref, 0) = new_label;
3469 	      if (update_label_nuses)
3470 		{
3471 		  ++LABEL_NUSES (new_label);
3472 		  --LABEL_NUSES (old_label);
3473 		}
3474 	    }
3475 	}
3476       return;
3477     }
3478 
3479   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3480      field.  This is not handled by the iterator because it doesn't
3481      handle unprinted ('0') fields.  */
3482   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3483     JUMP_LABEL (x) = new_label;
3484 
3485   subrtx_ptr_iterator::array_type array;
3486   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3487     {
3488       rtx *loc = *iter;
3489       if (rtx x = *loc)
3490 	{
3491 	  if (GET_CODE (x) == SYMBOL_REF
3492 	      && CONSTANT_POOL_ADDRESS_P (x))
3493 	    {
3494 	      rtx c = get_pool_constant (x);
3495 	      if (rtx_referenced_p (old_label, c))
3496 		{
3497 		  /* Create a copy of constant C; replace the label inside
3498 		     but do not update LABEL_NUSES because uses in constant pool
3499 		     are not counted.  */
3500 		  rtx new_c = copy_rtx (c);
3501 		  replace_label (&new_c, old_label, new_label, false);
3502 
3503 		  /* Add the new constant NEW_C to constant pool and replace
3504 		     the old reference to constant by new reference.  */
3505 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3506 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3507 		}
3508 	    }
3509 
3510 	  if ((GET_CODE (x) == LABEL_REF
3511 	       || GET_CODE (x) == INSN_LIST)
3512 	      && XEXP (x, 0) == old_label)
3513 	    {
3514 	      XEXP (x, 0) = new_label;
3515 	      if (update_label_nuses)
3516 		{
3517 		  ++LABEL_NUSES (new_label);
3518 		  --LABEL_NUSES (old_label);
3519 		}
3520 	    }
3521 	}
3522     }
3523 }
3524 
3525 void
replace_label_in_insn(rtx_insn * insn,rtx_insn * old_label,rtx_insn * new_label,bool update_label_nuses)3526 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3527 		       rtx_insn *new_label, bool update_label_nuses)
3528 {
3529   rtx insn_as_rtx = insn;
3530   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3531   gcc_checking_assert (insn_as_rtx == insn);
3532 }
3533 
3534 /* Return true if X is referenced in BODY.  */
3535 
3536 bool
rtx_referenced_p(const_rtx x,const_rtx body)3537 rtx_referenced_p (const_rtx x, const_rtx body)
3538 {
3539   subrtx_iterator::array_type array;
3540   FOR_EACH_SUBRTX (iter, array, body, ALL)
3541     if (const_rtx y = *iter)
3542       {
3543 	/* Check if a label_ref Y refers to label X.  */
3544 	if (GET_CODE (y) == LABEL_REF
3545 	    && LABEL_P (x)
3546 	    && label_ref_label (y) == x)
3547 	  return true;
3548 
3549 	if (rtx_equal_p (x, y))
3550 	  return true;
3551 
3552 	/* If Y is a reference to pool constant traverse the constant.  */
3553 	if (GET_CODE (y) == SYMBOL_REF
3554 	    && CONSTANT_POOL_ADDRESS_P (y))
3555 	  iter.substitute (get_pool_constant (y));
3556       }
3557   return false;
3558 }
3559 
3560 /* If INSN is a tablejump return true and store the label (before jump table) to
3561    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3562 
3563 bool
tablejump_p(const rtx_insn * insn,rtx_insn ** labelp,rtx_jump_table_data ** tablep)3564 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3565 	     rtx_jump_table_data **tablep)
3566 {
3567   if (!JUMP_P (insn))
3568     return false;
3569 
3570   rtx target = JUMP_LABEL (insn);
3571   if (target == NULL_RTX || ANY_RETURN_P (target))
3572     return false;
3573 
3574   rtx_insn *label = as_a<rtx_insn *> (target);
3575   rtx_insn *table = next_insn (label);
3576   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3577     return false;
3578 
3579   if (labelp)
3580     *labelp = label;
3581   if (tablep)
3582     *tablep = as_a <rtx_jump_table_data *> (table);
3583   return true;
3584 }
3585 
3586 /* For INSN known to satisfy tablejump_p, determine if it actually is a
3587    CASESI.  Return the insn pattern if so, NULL_RTX otherwise.  */
3588 
3589 rtx
tablejump_casesi_pattern(const rtx_insn * insn)3590 tablejump_casesi_pattern (const rtx_insn *insn)
3591 {
3592   rtx tmp;
3593 
3594   if ((tmp = single_set (insn)) != NULL
3595       && SET_DEST (tmp) == pc_rtx
3596       && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3597       && GET_CODE (XEXP (SET_SRC (tmp), 2)) == LABEL_REF)
3598     return tmp;
3599 
3600   return NULL_RTX;
3601 }
3602 
3603 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3604    constant that is not in the constant pool and not in the condition
3605    of an IF_THEN_ELSE.  */
3606 
3607 static int
computed_jump_p_1(const_rtx x)3608 computed_jump_p_1 (const_rtx x)
3609 {
3610   const enum rtx_code code = GET_CODE (x);
3611   int i, j;
3612   const char *fmt;
3613 
3614   switch (code)
3615     {
3616     case LABEL_REF:
3617     case PC:
3618       return 0;
3619 
3620     case CONST:
3621     CASE_CONST_ANY:
3622     case SYMBOL_REF:
3623     case REG:
3624       return 1;
3625 
3626     case MEM:
3627       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3628 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3629 
3630     case IF_THEN_ELSE:
3631       return (computed_jump_p_1 (XEXP (x, 1))
3632 	      || computed_jump_p_1 (XEXP (x, 2)));
3633 
3634     default:
3635       break;
3636     }
3637 
3638   fmt = GET_RTX_FORMAT (code);
3639   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3640     {
3641       if (fmt[i] == 'e'
3642 	  && computed_jump_p_1 (XEXP (x, i)))
3643 	return 1;
3644 
3645       else if (fmt[i] == 'E')
3646 	for (j = 0; j < XVECLEN (x, i); j++)
3647 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3648 	    return 1;
3649     }
3650 
3651   return 0;
3652 }
3653 
3654 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3655 
3656    Tablejumps and casesi insns are not considered indirect jumps;
3657    we can recognize them by a (use (label_ref)).  */
3658 
3659 int
computed_jump_p(const rtx_insn * insn)3660 computed_jump_p (const rtx_insn *insn)
3661 {
3662   int i;
3663   if (JUMP_P (insn))
3664     {
3665       rtx pat = PATTERN (insn);
3666 
3667       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3668       if (JUMP_LABEL (insn) != NULL)
3669 	return 0;
3670 
3671       if (GET_CODE (pat) == PARALLEL)
3672 	{
3673 	  int len = XVECLEN (pat, 0);
3674 	  int has_use_labelref = 0;
3675 
3676 	  for (i = len - 1; i >= 0; i--)
3677 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3678 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3679 		    == LABEL_REF))
3680 	      {
3681 	        has_use_labelref = 1;
3682 	        break;
3683 	      }
3684 
3685 	  if (! has_use_labelref)
3686 	    for (i = len - 1; i >= 0; i--)
3687 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3688 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3689 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3690 		return 1;
3691 	}
3692       else if (GET_CODE (pat) == SET
3693 	       && SET_DEST (pat) == pc_rtx
3694 	       && computed_jump_p_1 (SET_SRC (pat)))
3695 	return 1;
3696     }
3697   return 0;
3698 }
3699 
3700 
3701 
3702 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3703    the equivalent add insn and pass the result to FN, using DATA as the
3704    final argument.  */
3705 
3706 static int
for_each_inc_dec_find_inc_dec(rtx mem,for_each_inc_dec_fn fn,void * data)3707 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3708 {
3709   rtx x = XEXP (mem, 0);
3710   switch (GET_CODE (x))
3711     {
3712     case PRE_INC:
3713     case POST_INC:
3714       {
3715 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3716 	rtx r1 = XEXP (x, 0);
3717 	rtx c = gen_int_mode (size, GET_MODE (r1));
3718 	return fn (mem, x, r1, r1, c, data);
3719       }
3720 
3721     case PRE_DEC:
3722     case POST_DEC:
3723       {
3724 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3725 	rtx r1 = XEXP (x, 0);
3726 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3727 	return fn (mem, x, r1, r1, c, data);
3728       }
3729 
3730     case PRE_MODIFY:
3731     case POST_MODIFY:
3732       {
3733 	rtx r1 = XEXP (x, 0);
3734 	rtx add = XEXP (x, 1);
3735 	return fn (mem, x, r1, add, NULL, data);
3736       }
3737 
3738     default:
3739       gcc_unreachable ();
3740     }
3741 }
3742 
3743 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3744    For each such autoinc operation found, call FN, passing it
3745    the innermost enclosing MEM, the operation itself, the RTX modified
3746    by the operation, two RTXs (the second may be NULL) that, once
3747    added, represent the value to be held by the modified RTX
3748    afterwards, and DATA.  FN is to return 0 to continue the
3749    traversal or any other value to have it returned to the caller of
3750    for_each_inc_dec.  */
3751 
3752 int
for_each_inc_dec(rtx x,for_each_inc_dec_fn fn,void * data)3753 for_each_inc_dec (rtx x,
3754 		  for_each_inc_dec_fn fn,
3755 		  void *data)
3756 {
3757   subrtx_var_iterator::array_type array;
3758   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3759     {
3760       rtx mem = *iter;
3761       if (mem
3762 	  && MEM_P (mem)
3763 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3764 	{
3765 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3766 	  if (res != 0)
3767 	    return res;
3768 	  iter.skip_subrtxes ();
3769 	}
3770     }
3771   return 0;
3772 }
3773 
3774 
3775 /* Searches X for any reference to REGNO, returning the rtx of the
3776    reference found if any.  Otherwise, returns NULL_RTX.  */
3777 
3778 rtx
regno_use_in(unsigned int regno,rtx x)3779 regno_use_in (unsigned int regno, rtx x)
3780 {
3781   const char *fmt;
3782   int i, j;
3783   rtx tem;
3784 
3785   if (REG_P (x) && REGNO (x) == regno)
3786     return x;
3787 
3788   fmt = GET_RTX_FORMAT (GET_CODE (x));
3789   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3790     {
3791       if (fmt[i] == 'e')
3792 	{
3793 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3794 	    return tem;
3795 	}
3796       else if (fmt[i] == 'E')
3797 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3798 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3799 	    return tem;
3800     }
3801 
3802   return NULL_RTX;
3803 }
3804 
3805 /* Return a value indicating whether OP, an operand of a commutative
3806    operation, is preferred as the first or second operand.  The more
3807    positive the value, the stronger the preference for being the first
3808    operand.  */
3809 
3810 int
commutative_operand_precedence(rtx op)3811 commutative_operand_precedence (rtx op)
3812 {
3813   enum rtx_code code = GET_CODE (op);
3814 
3815   /* Constants always become the second operand.  Prefer "nice" constants.  */
3816   if (code == CONST_INT)
3817     return -10;
3818   if (code == CONST_WIDE_INT)
3819     return -9;
3820   if (code == CONST_POLY_INT)
3821     return -8;
3822   if (code == CONST_DOUBLE)
3823     return -8;
3824   if (code == CONST_FIXED)
3825     return -8;
3826   op = avoid_constant_pool_reference (op);
3827   code = GET_CODE (op);
3828 
3829   switch (GET_RTX_CLASS (code))
3830     {
3831     case RTX_CONST_OBJ:
3832       if (code == CONST_INT)
3833 	return -7;
3834       if (code == CONST_WIDE_INT)
3835 	return -6;
3836       if (code == CONST_POLY_INT)
3837 	return -5;
3838       if (code == CONST_DOUBLE)
3839 	return -5;
3840       if (code == CONST_FIXED)
3841 	return -5;
3842       return -4;
3843 
3844     case RTX_EXTRA:
3845       /* SUBREGs of objects should come second.  */
3846       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3847         return -3;
3848       return 0;
3849 
3850     case RTX_OBJ:
3851       /* Complex expressions should be the first, so decrease priority
3852          of objects.  Prefer pointer objects over non pointer objects.  */
3853       if ((REG_P (op) && REG_POINTER (op))
3854 	  || (MEM_P (op) && MEM_POINTER (op)))
3855 	return -1;
3856       return -2;
3857 
3858     case RTX_COMM_ARITH:
3859       /* Prefer operands that are themselves commutative to be first.
3860          This helps to make things linear.  In particular,
3861          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3862       return 4;
3863 
3864     case RTX_BIN_ARITH:
3865       /* If only one operand is a binary expression, it will be the first
3866          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3867          is canonical, although it will usually be further simplified.  */
3868       return 2;
3869 
3870     case RTX_UNARY:
3871       /* Then prefer NEG and NOT.  */
3872       if (code == NEG || code == NOT)
3873         return 1;
3874       /* FALLTHRU */
3875 
3876     default:
3877       return 0;
3878     }
3879 }
3880 
3881 /* Return 1 iff it is necessary to swap operands of commutative operation
3882    in order to canonicalize expression.  */
3883 
3884 bool
swap_commutative_operands_p(rtx x,rtx y)3885 swap_commutative_operands_p (rtx x, rtx y)
3886 {
3887   return (commutative_operand_precedence (x)
3888 	  < commutative_operand_precedence (y));
3889 }
3890 
3891 /* Return 1 if X is an autoincrement side effect and the register is
3892    not the stack pointer.  */
3893 int
auto_inc_p(const_rtx x)3894 auto_inc_p (const_rtx x)
3895 {
3896   switch (GET_CODE (x))
3897     {
3898     case PRE_INC:
3899     case POST_INC:
3900     case PRE_DEC:
3901     case POST_DEC:
3902     case PRE_MODIFY:
3903     case POST_MODIFY:
3904       /* There are no REG_INC notes for SP.  */
3905       if (XEXP (x, 0) != stack_pointer_rtx)
3906 	return 1;
3907     default:
3908       break;
3909     }
3910   return 0;
3911 }
3912 
3913 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3914 int
loc_mentioned_in_p(rtx * loc,const_rtx in)3915 loc_mentioned_in_p (rtx *loc, const_rtx in)
3916 {
3917   enum rtx_code code;
3918   const char *fmt;
3919   int i, j;
3920 
3921   if (!in)
3922     return 0;
3923 
3924   code = GET_CODE (in);
3925   fmt = GET_RTX_FORMAT (code);
3926   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3927     {
3928       if (fmt[i] == 'e')
3929 	{
3930 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3931 	    return 1;
3932 	}
3933       else if (fmt[i] == 'E')
3934 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3935 	  if (loc == &XVECEXP (in, i, j)
3936 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3937 	    return 1;
3938     }
3939   return 0;
3940 }
3941 
3942 /* Reinterpret a subreg as a bit extraction from an integer and return
3943    the position of the least significant bit of the extracted value.
3944    In other words, if the extraction were performed as a shift right
3945    and mask, return the number of bits to shift right.
3946 
3947    The outer value of the subreg has OUTER_BYTES bytes and starts at
3948    byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes.  */
3949 
3950 poly_uint64
subreg_size_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 subreg_byte)3951 subreg_size_lsb (poly_uint64 outer_bytes,
3952 		 poly_uint64 inner_bytes,
3953 		 poly_uint64 subreg_byte)
3954 {
3955   poly_uint64 subreg_end, trailing_bytes, byte_pos;
3956 
3957   /* A paradoxical subreg begins at bit position 0.  */
3958   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3959   if (maybe_gt (outer_bytes, inner_bytes))
3960     {
3961       gcc_checking_assert (known_eq (subreg_byte, 0U));
3962       return 0;
3963     }
3964 
3965   subreg_end = subreg_byte + outer_bytes;
3966   trailing_bytes = inner_bytes - subreg_end;
3967   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3968     byte_pos = trailing_bytes;
3969   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3970     byte_pos = subreg_byte;
3971   else
3972     {
3973       /* When bytes and words have opposite endianness, we must be able
3974 	 to split offsets into words and bytes at compile time.  */
3975       poly_uint64 leading_word_part
3976 	= force_align_down (subreg_byte, UNITS_PER_WORD);
3977       poly_uint64 trailing_word_part
3978 	= force_align_down (trailing_bytes, UNITS_PER_WORD);
3979       /* If the subreg crosses a word boundary ensure that
3980 	 it also begins and ends on a word boundary.  */
3981       gcc_assert (known_le (subreg_end - leading_word_part,
3982 			    (unsigned int) UNITS_PER_WORD)
3983 		  || (known_eq (leading_word_part, subreg_byte)
3984 		      && known_eq (trailing_word_part, trailing_bytes)));
3985       if (WORDS_BIG_ENDIAN)
3986 	byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3987       else
3988 	byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3989     }
3990 
3991   return byte_pos * BITS_PER_UNIT;
3992 }
3993 
3994 /* Given a subreg X, return the bit offset where the subreg begins
3995    (counting from the least significant bit of the reg).  */
3996 
3997 poly_uint64
subreg_lsb(const_rtx x)3998 subreg_lsb (const_rtx x)
3999 {
4000   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
4001 		       SUBREG_BYTE (x));
4002 }
4003 
4004 /* Return the subreg byte offset for a subreg whose outer value has
4005    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
4006    there are LSB_SHIFT *bits* between the lsb of the outer value and the
4007    lsb of the inner value.  This is the inverse of the calculation
4008    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
4009 
4010 poly_uint64
subreg_size_offset_from_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 lsb_shift)4011 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
4012 			     poly_uint64 lsb_shift)
4013 {
4014   /* A paradoxical subreg begins at bit position 0.  */
4015   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
4016   if (maybe_gt (outer_bytes, inner_bytes))
4017     {
4018       gcc_checking_assert (known_eq (lsb_shift, 0U));
4019       return 0;
4020     }
4021 
4022   poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
4023   poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
4024   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
4025     return upper_bytes;
4026   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
4027     return lower_bytes;
4028   else
4029     {
4030       /* When bytes and words have opposite endianness, we must be able
4031 	 to split offsets into words and bytes at compile time.  */
4032       poly_uint64 lower_word_part = force_align_down (lower_bytes,
4033 						      UNITS_PER_WORD);
4034       poly_uint64 upper_word_part = force_align_down (upper_bytes,
4035 						      UNITS_PER_WORD);
4036       if (WORDS_BIG_ENDIAN)
4037 	return upper_word_part + (lower_bytes - lower_word_part);
4038       else
4039 	return lower_word_part + (upper_bytes - upper_word_part);
4040     }
4041 }
4042 
4043 /* Fill in information about a subreg of a hard register.
4044    xregno - A regno of an inner hard subreg_reg (or what will become one).
4045    xmode  - The mode of xregno.
4046    offset - The byte offset.
4047    ymode  - The mode of a top level SUBREG (or what may become one).
4048    info   - Pointer to structure to fill in.
4049 
4050    Rather than considering one particular inner register (and thus one
4051    particular "outer" register) in isolation, this function really uses
4052    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
4053    function does not check whether adding INFO->offset to XREGNO gives
4054    a valid hard register; even if INFO->offset + XREGNO is out of range,
4055    there might be another register of the same type that is in range.
4056    Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
4057    the new register, since that can depend on things like whether the final
4058    register number is even or odd.  Callers that want to check whether
4059    this particular subreg can be replaced by a simple (reg ...) should
4060    use simplify_subreg_regno.  */
4061 
4062 void
subreg_get_info(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode,struct subreg_info * info)4063 subreg_get_info (unsigned int xregno, machine_mode xmode,
4064 		 poly_uint64 offset, machine_mode ymode,
4065 		 struct subreg_info *info)
4066 {
4067   unsigned int nregs_xmode, nregs_ymode;
4068 
4069   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
4070 
4071   poly_uint64 xsize = GET_MODE_SIZE (xmode);
4072   poly_uint64 ysize = GET_MODE_SIZE (ymode);
4073 
4074   bool rknown = false;
4075 
4076   /* If the register representation of a non-scalar mode has holes in it,
4077      we expect the scalar units to be concatenated together, with the holes
4078      distributed evenly among the scalar units.  Each scalar unit must occupy
4079      at least one register.  */
4080   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
4081     {
4082       /* As a consequence, we must be dealing with a constant number of
4083 	 scalars, and thus a constant offset and number of units.  */
4084       HOST_WIDE_INT coffset = offset.to_constant ();
4085       HOST_WIDE_INT cysize = ysize.to_constant ();
4086       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
4087       unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
4088       scalar_mode xmode_unit = GET_MODE_INNER (xmode);
4089       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
4090       gcc_assert (nregs_xmode
4091 		  == (nunits
4092 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
4093       gcc_assert (hard_regno_nregs (xregno, xmode)
4094 		  == hard_regno_nregs (xregno, xmode_unit) * nunits);
4095 
4096       /* You can only ask for a SUBREG of a value with holes in the middle
4097 	 if you don't cross the holes.  (Such a SUBREG should be done by
4098 	 picking a different register class, or doing it in memory if
4099 	 necessary.)  An example of a value with holes is XCmode on 32-bit
4100 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
4101 	 3 for each part, but in memory it's two 128-bit parts.
4102 	 Padding is assumed to be at the end (not necessarily the 'high part')
4103 	 of each unit.  */
4104       if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
4105 	  && (coffset / GET_MODE_SIZE (xmode_unit)
4106 	      != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
4107 	{
4108 	  info->representable_p = false;
4109 	  rknown = true;
4110 	}
4111     }
4112   else
4113     nregs_xmode = hard_regno_nregs (xregno, xmode);
4114 
4115   nregs_ymode = hard_regno_nregs (xregno, ymode);
4116 
4117   /* Subreg sizes must be ordered, so that we can tell whether they are
4118      partial, paradoxical or complete.  */
4119   gcc_checking_assert (ordered_p (xsize, ysize));
4120 
4121   /* Paradoxical subregs are otherwise valid.  */
4122   if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
4123     {
4124       info->representable_p = true;
4125       /* If this is a big endian paradoxical subreg, which uses more
4126 	 actual hard registers than the original register, we must
4127 	 return a negative offset so that we find the proper highpart
4128 	 of the register.
4129 
4130 	 We assume that the ordering of registers within a multi-register
4131 	 value has a consistent endianness: if bytes and register words
4132 	 have different endianness, the hard registers that make up a
4133 	 multi-register value must be at least word-sized.  */
4134       if (REG_WORDS_BIG_ENDIAN)
4135 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
4136       else
4137 	info->offset = 0;
4138       info->nregs = nregs_ymode;
4139       return;
4140     }
4141 
4142   /* If registers store different numbers of bits in the different
4143      modes, we cannot generally form this subreg.  */
4144   poly_uint64 regsize_xmode, regsize_ymode;
4145   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
4146       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
4147       && multiple_p (xsize, nregs_xmode, &regsize_xmode)
4148       && multiple_p (ysize, nregs_ymode, &regsize_ymode))
4149     {
4150       if (!rknown
4151 	  && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
4152 	      || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
4153 	{
4154 	  info->representable_p = false;
4155 	  if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
4156 	      || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
4157 	    /* Checked by validate_subreg.  We must know at compile time
4158 	       which inner registers are being accessed.  */
4159 	    gcc_unreachable ();
4160 	  return;
4161 	}
4162       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
4163 	 would go outside of XMODE.  */
4164       if (!rknown && maybe_gt (ysize + offset, xsize))
4165 	{
4166 	  info->representable_p = false;
4167 	  info->nregs = nregs_ymode;
4168 	  if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
4169 	    /* Checked by validate_subreg.  We must know at compile time
4170 	       which inner registers are being accessed.  */
4171 	    gcc_unreachable ();
4172 	  return;
4173 	}
4174       /* Quick exit for the simple and common case of extracting whole
4175 	 subregisters from a multiregister value.  */
4176       /* ??? It would be better to integrate this into the code below,
4177 	 if we can generalize the concept enough and figure out how
4178 	 odd-sized modes can coexist with the other weird cases we support.  */
4179       HOST_WIDE_INT count;
4180       if (!rknown
4181 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
4182 	  && known_eq (regsize_xmode, regsize_ymode)
4183 	  && constant_multiple_p (offset, regsize_ymode, &count))
4184 	{
4185 	  info->representable_p = true;
4186 	  info->nregs = nregs_ymode;
4187 	  info->offset = count;
4188 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
4189 	  return;
4190 	}
4191     }
4192 
4193   /* Lowpart subregs are otherwise valid.  */
4194   if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
4195     {
4196       info->representable_p = true;
4197       rknown = true;
4198 
4199       if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
4200 	{
4201 	  info->offset = 0;
4202 	  info->nregs = nregs_ymode;
4203 	  return;
4204 	}
4205     }
4206 
4207   /* Set NUM_BLOCKS to the number of independently-representable YMODE
4208      values there are in (reg:XMODE XREGNO).  We can view the register
4209      as consisting of this number of independent "blocks", where each
4210      block occupies NREGS_YMODE registers and contains exactly one
4211      representable YMODE value.  */
4212   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
4213   unsigned int num_blocks = nregs_xmode / nregs_ymode;
4214 
4215   /* Calculate the number of bytes in each block.  This must always
4216      be exact, otherwise we don't know how to verify the constraint.
4217      These conditions may be relaxed but subreg_regno_offset would
4218      need to be redesigned.  */
4219   poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
4220 
4221   /* Get the number of the first block that contains the subreg and the byte
4222      offset of the subreg from the start of that block.  */
4223   unsigned int block_number;
4224   poly_uint64 subblock_offset;
4225   if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
4226 			&subblock_offset))
4227     /* Checked by validate_subreg.  We must know at compile time which
4228        inner registers are being accessed.  */
4229     gcc_unreachable ();
4230 
4231   if (!rknown)
4232     {
4233       /* Only the lowpart of each block is representable.  */
4234       info->representable_p
4235 	= known_eq (subblock_offset,
4236 		    subreg_size_lowpart_offset (ysize, bytes_per_block));
4237       rknown = true;
4238     }
4239 
4240   /* We assume that the ordering of registers within a multi-register
4241      value has a consistent endianness: if bytes and register words
4242      have different endianness, the hard registers that make up a
4243      multi-register value must be at least word-sized.  */
4244   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
4245     /* The block number we calculated above followed memory endianness.
4246        Convert it to register endianness by counting back from the end.
4247        (Note that, because of the assumption above, each block must be
4248        at least word-sized.)  */
4249     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
4250   else
4251     info->offset = block_number * nregs_ymode;
4252   info->nregs = nregs_ymode;
4253 }
4254 
4255 /* This function returns the regno offset of a subreg expression.
4256    xregno - A regno of an inner hard subreg_reg (or what will become one).
4257    xmode  - The mode of xregno.
4258    offset - The byte offset.
4259    ymode  - The mode of a top level SUBREG (or what may become one).
4260    RETURN - The regno offset which would be used.  */
4261 unsigned int
subreg_regno_offset(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)4262 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
4263 		     poly_uint64 offset, machine_mode ymode)
4264 {
4265   struct subreg_info info;
4266   subreg_get_info (xregno, xmode, offset, ymode, &info);
4267   return info.offset;
4268 }
4269 
4270 /* This function returns true when the offset is representable via
4271    subreg_offset in the given regno.
4272    xregno - A regno of an inner hard subreg_reg (or what will become one).
4273    xmode  - The mode of xregno.
4274    offset - The byte offset.
4275    ymode  - The mode of a top level SUBREG (or what may become one).
4276    RETURN - Whether the offset is representable.  */
4277 bool
subreg_offset_representable_p(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)4278 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
4279 			       poly_uint64 offset, machine_mode ymode)
4280 {
4281   struct subreg_info info;
4282   subreg_get_info (xregno, xmode, offset, ymode, &info);
4283   return info.representable_p;
4284 }
4285 
4286 /* Return the number of a YMODE register to which
4287 
4288        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
4289 
4290    can be simplified.  Return -1 if the subreg can't be simplified.
4291 
4292    XREGNO is a hard register number.  */
4293 
4294 int
simplify_subreg_regno(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)4295 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
4296 		       poly_uint64 offset, machine_mode ymode)
4297 {
4298   struct subreg_info info;
4299   unsigned int yregno;
4300 
4301   /* Give the backend a chance to disallow the mode change.  */
4302   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
4303       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
4304       && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode))
4305     return -1;
4306 
4307   /* We shouldn't simplify stack-related registers.  */
4308   if ((!reload_completed || frame_pointer_needed)
4309       && xregno == FRAME_POINTER_REGNUM)
4310     return -1;
4311 
4312   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
4313       && xregno == ARG_POINTER_REGNUM)
4314     return -1;
4315 
4316   if (xregno == STACK_POINTER_REGNUM
4317       /* We should convert hard stack register in LRA if it is
4318 	 possible.  */
4319       && ! lra_in_progress)
4320     return -1;
4321 
4322   /* Try to get the register offset.  */
4323   subreg_get_info (xregno, xmode, offset, ymode, &info);
4324   if (!info.representable_p)
4325     return -1;
4326 
4327   /* Make sure that the offsetted register value is in range.  */
4328   yregno = xregno + info.offset;
4329   if (!HARD_REGISTER_NUM_P (yregno))
4330     return -1;
4331 
4332   /* See whether (reg:YMODE YREGNO) is valid.
4333 
4334      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4335      This is a kludge to work around how complex FP arguments are passed
4336      on IA-64 and should be fixed.  See PR target/49226.  */
4337   if (!targetm.hard_regno_mode_ok (yregno, ymode)
4338       && targetm.hard_regno_mode_ok (xregno, xmode))
4339     return -1;
4340 
4341   return (int) yregno;
4342 }
4343 
4344 /* Return the final regno that a subreg expression refers to.  */
4345 unsigned int
subreg_regno(const_rtx x)4346 subreg_regno (const_rtx x)
4347 {
4348   unsigned int ret;
4349   rtx subreg = SUBREG_REG (x);
4350   int regno = REGNO (subreg);
4351 
4352   ret = regno + subreg_regno_offset (regno,
4353 				     GET_MODE (subreg),
4354 				     SUBREG_BYTE (x),
4355 				     GET_MODE (x));
4356   return ret;
4357 
4358 }
4359 
4360 /* Return the number of registers that a subreg expression refers
4361    to.  */
4362 unsigned int
subreg_nregs(const_rtx x)4363 subreg_nregs (const_rtx x)
4364 {
4365   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4366 }
4367 
4368 /* Return the number of registers that a subreg REG with REGNO
4369    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
4370    changed so that the regno can be passed in. */
4371 
4372 unsigned int
subreg_nregs_with_regno(unsigned int regno,const_rtx x)4373 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4374 {
4375   struct subreg_info info;
4376   rtx subreg = SUBREG_REG (x);
4377 
4378   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4379 		   &info);
4380   return info.nregs;
4381 }
4382 
4383 struct parms_set_data
4384 {
4385   int nregs;
4386   HARD_REG_SET regs;
4387 };
4388 
4389 /* Helper function for noticing stores to parameter registers.  */
4390 static void
parms_set(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)4391 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4392 {
4393   struct parms_set_data *const d = (struct parms_set_data *) data;
4394   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4395       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4396     {
4397       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4398       d->nregs--;
4399     }
4400 }
4401 
4402 /* Look backward for first parameter to be loaded.
4403    Note that loads of all parameters will not necessarily be
4404    found if CSE has eliminated some of them (e.g., an argument
4405    to the outer function is passed down as a parameter).
4406    Do not skip BOUNDARY.  */
4407 rtx_insn *
find_first_parameter_load(rtx_insn * call_insn,rtx_insn * boundary)4408 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4409 {
4410   struct parms_set_data parm;
4411   rtx p;
4412   rtx_insn *before, *first_set;
4413 
4414   /* Since different machines initialize their parameter registers
4415      in different orders, assume nothing.  Collect the set of all
4416      parameter registers.  */
4417   CLEAR_HARD_REG_SET (parm.regs);
4418   parm.nregs = 0;
4419   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4420     if (GET_CODE (XEXP (p, 0)) == USE
4421 	&& REG_P (XEXP (XEXP (p, 0), 0))
4422 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4423       {
4424 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4425 
4426 	/* We only care about registers which can hold function
4427 	   arguments.  */
4428 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4429 	  continue;
4430 
4431 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4432 	parm.nregs++;
4433       }
4434   before = call_insn;
4435   first_set = call_insn;
4436 
4437   /* Search backward for the first set of a register in this set.  */
4438   while (parm.nregs && before != boundary)
4439     {
4440       before = PREV_INSN (before);
4441 
4442       /* It is possible that some loads got CSEed from one call to
4443          another.  Stop in that case.  */
4444       if (CALL_P (before))
4445 	break;
4446 
4447       /* Our caller needs either ensure that we will find all sets
4448          (in case code has not been optimized yet), or take care
4449          for possible labels in a way by setting boundary to preceding
4450          CODE_LABEL.  */
4451       if (LABEL_P (before))
4452 	{
4453 	  gcc_assert (before == boundary);
4454 	  break;
4455 	}
4456 
4457       if (INSN_P (before))
4458 	{
4459 	  int nregs_old = parm.nregs;
4460 	  note_stores (before, parms_set, &parm);
4461 	  /* If we found something that did not set a parameter reg,
4462 	     we're done.  Do not keep going, as that might result
4463 	     in hoisting an insn before the setting of a pseudo
4464 	     that is used by the hoisted insn. */
4465 	  if (nregs_old != parm.nregs)
4466 	    first_set = before;
4467 	  else
4468 	    break;
4469 	}
4470     }
4471   return first_set;
4472 }
4473 
4474 /* Return true if we should avoid inserting code between INSN and preceding
4475    call instruction.  */
4476 
4477 bool
keep_with_call_p(const rtx_insn * insn)4478 keep_with_call_p (const rtx_insn *insn)
4479 {
4480   rtx set;
4481 
4482   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4483     {
4484       if (REG_P (SET_DEST (set))
4485 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4486 	  && fixed_regs[REGNO (SET_DEST (set))]
4487 	  && general_operand (SET_SRC (set), VOIDmode))
4488 	return true;
4489       if (REG_P (SET_SRC (set))
4490 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4491 	  && REG_P (SET_DEST (set))
4492 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4493 	return true;
4494       /* There may be a stack pop just after the call and before the store
4495 	 of the return register.  Search for the actual store when deciding
4496 	 if we can break or not.  */
4497       if (SET_DEST (set) == stack_pointer_rtx)
4498 	{
4499 	  /* This CONST_CAST is okay because next_nonnote_insn just
4500 	     returns its argument and we assign it to a const_rtx
4501 	     variable.  */
4502 	  const rtx_insn *i2
4503 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4504 	  if (i2 && keep_with_call_p (i2))
4505 	    return true;
4506 	}
4507     }
4508   return false;
4509 }
4510 
4511 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4512    to non-complex jumps.  That is, direct unconditional, conditional,
4513    and tablejumps, but not computed jumps or returns.  It also does
4514    not apply to the fallthru case of a conditional jump.  */
4515 
4516 bool
label_is_jump_target_p(const_rtx label,const rtx_insn * jump_insn)4517 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4518 {
4519   rtx tmp = JUMP_LABEL (jump_insn);
4520   rtx_jump_table_data *table;
4521 
4522   if (label == tmp)
4523     return true;
4524 
4525   if (tablejump_p (jump_insn, NULL, &table))
4526     {
4527       rtvec vec = table->get_labels ();
4528       int i, veclen = GET_NUM_ELEM (vec);
4529 
4530       for (i = 0; i < veclen; ++i)
4531 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4532 	  return true;
4533     }
4534 
4535   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4536     return true;
4537 
4538   return false;
4539 }
4540 
4541 
4542 /* Return an estimate of the cost of computing rtx X.
4543    One use is in cse, to decide which expression to keep in the hash table.
4544    Another is in rtl generation, to pick the cheapest way to multiply.
4545    Other uses like the latter are expected in the future.
4546 
4547    X appears as operand OPNO in an expression with code OUTER_CODE.
4548    SPEED specifies whether costs optimized for speed or size should
4549    be returned.  */
4550 
4551 int
rtx_cost(rtx x,machine_mode mode,enum rtx_code outer_code,int opno,bool speed)4552 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4553 	  int opno, bool speed)
4554 {
4555   int i, j;
4556   enum rtx_code code;
4557   const char *fmt;
4558   int total;
4559   int factor;
4560   unsigned mode_size;
4561 
4562   if (x == 0)
4563     return 0;
4564 
4565   if (GET_CODE (x) == SET)
4566     /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4567        the mode for the factor.  */
4568     mode = GET_MODE (SET_DEST (x));
4569   else if (GET_MODE (x) != VOIDmode)
4570     mode = GET_MODE (x);
4571 
4572   mode_size = estimated_poly_value (GET_MODE_SIZE (mode));
4573 
4574   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4575      many insns, taking N times as long.  */
4576   factor = mode_size > UNITS_PER_WORD ? mode_size / UNITS_PER_WORD : 1;
4577 
4578   /* Compute the default costs of certain things.
4579      Note that targetm.rtx_costs can override the defaults.  */
4580 
4581   code = GET_CODE (x);
4582   switch (code)
4583     {
4584     case MULT:
4585       /* Multiplication has time-complexity O(N*N), where N is the
4586 	 number of units (translated from digits) when using
4587 	 schoolbook long multiplication.  */
4588       total = factor * factor * COSTS_N_INSNS (5);
4589       break;
4590     case DIV:
4591     case UDIV:
4592     case MOD:
4593     case UMOD:
4594       /* Similarly, complexity for schoolbook long division.  */
4595       total = factor * factor * COSTS_N_INSNS (7);
4596       break;
4597     case USE:
4598       /* Used in combine.c as a marker.  */
4599       total = 0;
4600       break;
4601     default:
4602       total = factor * COSTS_N_INSNS (1);
4603     }
4604 
4605   switch (code)
4606     {
4607     case REG:
4608       return 0;
4609 
4610     case SUBREG:
4611       total = 0;
4612       /* If we can't tie these modes, make this expensive.  The larger
4613 	 the mode, the more expensive it is.  */
4614       if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4615 	return COSTS_N_INSNS (2 + factor);
4616       break;
4617 
4618     case TRUNCATE:
4619       if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4620 	{
4621 	  total = 0;
4622 	  break;
4623 	}
4624       /* FALLTHRU */
4625     default:
4626       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4627 	return total;
4628       break;
4629     }
4630 
4631   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4632      which is already in total.  */
4633 
4634   fmt = GET_RTX_FORMAT (code);
4635   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4636     if (fmt[i] == 'e')
4637       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4638     else if (fmt[i] == 'E')
4639       for (j = 0; j < XVECLEN (x, i); j++)
4640 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4641 
4642   return total;
4643 }
4644 
4645 /* Fill in the structure C with information about both speed and size rtx
4646    costs for X, which is operand OPNO in an expression with code OUTER.  */
4647 
4648 void
get_full_rtx_cost(rtx x,machine_mode mode,enum rtx_code outer,int opno,struct full_rtx_costs * c)4649 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4650 		   struct full_rtx_costs *c)
4651 {
4652   c->speed = rtx_cost (x, mode, outer, opno, true);
4653   c->size = rtx_cost (x, mode, outer, opno, false);
4654 }
4655 
4656 
4657 /* Return cost of address expression X.
4658    Expect that X is properly formed address reference.
4659 
4660    SPEED parameter specify whether costs optimized for speed or size should
4661    be returned.  */
4662 
4663 int
address_cost(rtx x,machine_mode mode,addr_space_t as,bool speed)4664 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4665 {
4666   /* We may be asked for cost of various unusual addresses, such as operands
4667      of push instruction.  It is not worthwhile to complicate writing
4668      of the target hook by such cases.  */
4669 
4670   if (!memory_address_addr_space_p (mode, x, as))
4671     return 1000;
4672 
4673   return targetm.address_cost (x, mode, as, speed);
4674 }
4675 
4676 /* If the target doesn't override, compute the cost as with arithmetic.  */
4677 
4678 int
default_address_cost(rtx x,machine_mode,addr_space_t,bool speed)4679 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4680 {
4681   return rtx_cost (x, Pmode, MEM, 0, speed);
4682 }
4683 
4684 
4685 unsigned HOST_WIDE_INT
nonzero_bits(const_rtx x,machine_mode mode)4686 nonzero_bits (const_rtx x, machine_mode mode)
4687 {
4688   if (mode == VOIDmode)
4689     mode = GET_MODE (x);
4690   scalar_int_mode int_mode;
4691   if (!is_a <scalar_int_mode> (mode, &int_mode))
4692     return GET_MODE_MASK (mode);
4693   return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4694 }
4695 
4696 unsigned int
num_sign_bit_copies(const_rtx x,machine_mode mode)4697 num_sign_bit_copies (const_rtx x, machine_mode mode)
4698 {
4699   if (mode == VOIDmode)
4700     mode = GET_MODE (x);
4701   scalar_int_mode int_mode;
4702   if (!is_a <scalar_int_mode> (mode, &int_mode))
4703     return 1;
4704   return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4705 }
4706 
4707 /* Return true if nonzero_bits1 might recurse into both operands
4708    of X.  */
4709 
4710 static inline bool
nonzero_bits_binary_arith_p(const_rtx x)4711 nonzero_bits_binary_arith_p (const_rtx x)
4712 {
4713   if (!ARITHMETIC_P (x))
4714     return false;
4715   switch (GET_CODE (x))
4716     {
4717     case AND:
4718     case XOR:
4719     case IOR:
4720     case UMIN:
4721     case UMAX:
4722     case SMIN:
4723     case SMAX:
4724     case PLUS:
4725     case MINUS:
4726     case MULT:
4727     case DIV:
4728     case UDIV:
4729     case MOD:
4730     case UMOD:
4731       return true;
4732     default:
4733       return false;
4734     }
4735 }
4736 
4737 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4738    It avoids exponential behavior in nonzero_bits1 when X has
4739    identical subexpressions on the first or the second level.  */
4740 
4741 static unsigned HOST_WIDE_INT
cached_nonzero_bits(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4742 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4743 		     machine_mode known_mode,
4744 		     unsigned HOST_WIDE_INT known_ret)
4745 {
4746   if (x == known_x && mode == known_mode)
4747     return known_ret;
4748 
4749   /* Try to find identical subexpressions.  If found call
4750      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4751      precomputed value for the subexpression as KNOWN_RET.  */
4752 
4753   if (nonzero_bits_binary_arith_p (x))
4754     {
4755       rtx x0 = XEXP (x, 0);
4756       rtx x1 = XEXP (x, 1);
4757 
4758       /* Check the first level.  */
4759       if (x0 == x1)
4760 	return nonzero_bits1 (x, mode, x0, mode,
4761 			      cached_nonzero_bits (x0, mode, known_x,
4762 						   known_mode, known_ret));
4763 
4764       /* Check the second level.  */
4765       if (nonzero_bits_binary_arith_p (x0)
4766 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4767 	return nonzero_bits1 (x, mode, x1, mode,
4768 			      cached_nonzero_bits (x1, mode, known_x,
4769 						   known_mode, known_ret));
4770 
4771       if (nonzero_bits_binary_arith_p (x1)
4772 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4773 	return nonzero_bits1 (x, mode, x0, mode,
4774 			      cached_nonzero_bits (x0, mode, known_x,
4775 						   known_mode, known_ret));
4776     }
4777 
4778   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4779 }
4780 
4781 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4782    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4783    is less useful.  We can't allow both, because that results in exponential
4784    run time recursion.  There is a nullstone testcase that triggered
4785    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4786 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4787 
4788 /* Given an expression, X, compute which bits in X can be nonzero.
4789    We don't care about bits outside of those defined in MODE.
4790 
4791    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4792    an arithmetic operation, we can do better.  */
4793 
4794 static unsigned HOST_WIDE_INT
nonzero_bits1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4795 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4796 	       machine_mode known_mode,
4797 	       unsigned HOST_WIDE_INT known_ret)
4798 {
4799   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4800   unsigned HOST_WIDE_INT inner_nz;
4801   enum rtx_code code = GET_CODE (x);
4802   machine_mode inner_mode;
4803   unsigned int inner_width;
4804   scalar_int_mode xmode;
4805 
4806   unsigned int mode_width = GET_MODE_PRECISION (mode);
4807 
4808   if (CONST_INT_P (x))
4809     {
4810       if (SHORT_IMMEDIATES_SIGN_EXTEND
4811 	  && INTVAL (x) > 0
4812 	  && mode_width < BITS_PER_WORD
4813 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4814 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4815 
4816       return UINTVAL (x);
4817     }
4818 
4819   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4820     return nonzero;
4821   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4822 
4823   /* If X is wider than MODE, use its mode instead.  */
4824   if (xmode_width > mode_width)
4825     {
4826       mode = xmode;
4827       nonzero = GET_MODE_MASK (mode);
4828       mode_width = xmode_width;
4829     }
4830 
4831   if (mode_width > HOST_BITS_PER_WIDE_INT)
4832     /* Our only callers in this case look for single bit values.  So
4833        just return the mode mask.  Those tests will then be false.  */
4834     return nonzero;
4835 
4836   /* If MODE is wider than X, but both are a single word for both the host
4837      and target machines, we can compute this from which bits of the object
4838      might be nonzero in its own mode, taking into account the fact that, on
4839      CISC machines, accessing an object in a wider mode generally causes the
4840      high-order bits to become undefined, so they are not known to be zero.
4841      We extend this reasoning to RISC machines for operations that might not
4842      operate on the full registers.  */
4843   if (mode_width > xmode_width
4844       && xmode_width <= BITS_PER_WORD
4845       && xmode_width <= HOST_BITS_PER_WIDE_INT
4846       && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4847     {
4848       nonzero &= cached_nonzero_bits (x, xmode,
4849 				      known_x, known_mode, known_ret);
4850       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4851       return nonzero;
4852     }
4853 
4854   /* Please keep nonzero_bits_binary_arith_p above in sync with
4855      the code in the switch below.  */
4856   switch (code)
4857     {
4858     case REG:
4859 #if defined(POINTERS_EXTEND_UNSIGNED)
4860       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4861 	 all the bits above ptr_mode are known to be zero.  */
4862       /* As we do not know which address space the pointer is referring to,
4863 	 we can do this only if the target does not support different pointer
4864 	 or address modes depending on the address space.  */
4865       if (target_default_pointer_address_modes_p ()
4866 	  && POINTERS_EXTEND_UNSIGNED
4867 	  && xmode == Pmode
4868 	  && REG_POINTER (x)
4869 	  && !targetm.have_ptr_extend ())
4870 	nonzero &= GET_MODE_MASK (ptr_mode);
4871 #endif
4872 
4873       /* Include declared information about alignment of pointers.  */
4874       /* ??? We don't properly preserve REG_POINTER changes across
4875 	 pointer-to-integer casts, so we can't trust it except for
4876 	 things that we know must be pointers.  See execute/960116-1.c.  */
4877       if ((x == stack_pointer_rtx
4878 	   || x == frame_pointer_rtx
4879 	   || x == arg_pointer_rtx)
4880 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4881 	{
4882 	  unsigned HOST_WIDE_INT alignment
4883 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4884 
4885 #ifdef PUSH_ROUNDING
4886 	  /* If PUSH_ROUNDING is defined, it is possible for the
4887 	     stack to be momentarily aligned only to that amount,
4888 	     so we pick the least alignment.  */
4889 	  if (x == stack_pointer_rtx && PUSH_ARGS)
4890 	    {
4891 	      poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4892 	      alignment = MIN (known_alignment (rounded_1), alignment);
4893 	    }
4894 #endif
4895 
4896 	  nonzero &= ~(alignment - 1);
4897 	}
4898 
4899       {
4900 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4901 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4902 						  &nonzero_for_hook);
4903 
4904 	if (new_rtx)
4905 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4906 						   known_mode, known_ret);
4907 
4908 	return nonzero_for_hook;
4909       }
4910 
4911     case MEM:
4912       /* In many, if not most, RISC machines, reading a byte from memory
4913 	 zeros the rest of the register.  Noticing that fact saves a lot
4914 	 of extra zero-extends.  */
4915       if (load_extend_op (xmode) == ZERO_EXTEND)
4916 	nonzero &= GET_MODE_MASK (xmode);
4917       break;
4918 
4919     case EQ:  case NE:
4920     case UNEQ:  case LTGT:
4921     case GT:  case GTU:  case UNGT:
4922     case LT:  case LTU:  case UNLT:
4923     case GE:  case GEU:  case UNGE:
4924     case LE:  case LEU:  case UNLE:
4925     case UNORDERED: case ORDERED:
4926       /* If this produces an integer result, we know which bits are set.
4927 	 Code here used to clear bits outside the mode of X, but that is
4928 	 now done above.  */
4929       /* Mind that MODE is the mode the caller wants to look at this
4930 	 operation in, and not the actual operation mode.  We can wind
4931 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4932 	 that describes the results of a vector compare.  */
4933       if (GET_MODE_CLASS (xmode) == MODE_INT
4934 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4935 	nonzero = STORE_FLAG_VALUE;
4936       break;
4937 
4938     case NEG:
4939 #if 0
4940       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4941 	 and num_sign_bit_copies.  */
4942       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4943 	nonzero = 1;
4944 #endif
4945 
4946       if (xmode_width < mode_width)
4947 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4948       break;
4949 
4950     case ABS:
4951 #if 0
4952       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4953 	 and num_sign_bit_copies.  */
4954       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4955 	nonzero = 1;
4956 #endif
4957       break;
4958 
4959     case TRUNCATE:
4960       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4961 				       known_x, known_mode, known_ret)
4962 		  & GET_MODE_MASK (mode));
4963       break;
4964 
4965     case ZERO_EXTEND:
4966       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4967 				      known_x, known_mode, known_ret);
4968       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4969 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4970       break;
4971 
4972     case SIGN_EXTEND:
4973       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4974 	 Otherwise, show all the bits in the outer mode but not the inner
4975 	 may be nonzero.  */
4976       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4977 				      known_x, known_mode, known_ret);
4978       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4979 	{
4980 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4981 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4982 	    inner_nz |= (GET_MODE_MASK (mode)
4983 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4984 	}
4985 
4986       nonzero &= inner_nz;
4987       break;
4988 
4989     case AND:
4990       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4991 				       known_x, known_mode, known_ret)
4992       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4993 					known_x, known_mode, known_ret);
4994       break;
4995 
4996     case XOR:   case IOR:
4997     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4998       {
4999 	unsigned HOST_WIDE_INT nonzero0
5000 	   = cached_nonzero_bits (XEXP (x, 0), mode,
5001 				  known_x, known_mode, known_ret);
5002 
5003 	/* Don't call nonzero_bits for the second time if it cannot change
5004 	   anything.  */
5005 	if ((nonzero & nonzero0) != nonzero)
5006 	  nonzero &= nonzero0
5007       		     | cached_nonzero_bits (XEXP (x, 1), mode,
5008 					    known_x, known_mode, known_ret);
5009       }
5010       break;
5011 
5012     case PLUS:  case MINUS:
5013     case MULT:
5014     case DIV:   case UDIV:
5015     case MOD:   case UMOD:
5016       /* We can apply the rules of arithmetic to compute the number of
5017 	 high- and low-order zero bits of these operations.  We start by
5018 	 computing the width (position of the highest-order nonzero bit)
5019 	 and the number of low-order zero bits for each value.  */
5020       {
5021 	unsigned HOST_WIDE_INT nz0
5022 	  = cached_nonzero_bits (XEXP (x, 0), mode,
5023 				 known_x, known_mode, known_ret);
5024 	unsigned HOST_WIDE_INT nz1
5025 	  = cached_nonzero_bits (XEXP (x, 1), mode,
5026 				 known_x, known_mode, known_ret);
5027 	int sign_index = xmode_width - 1;
5028 	int width0 = floor_log2 (nz0) + 1;
5029 	int width1 = floor_log2 (nz1) + 1;
5030 	int low0 = ctz_or_zero (nz0);
5031 	int low1 = ctz_or_zero (nz1);
5032 	unsigned HOST_WIDE_INT op0_maybe_minusp
5033 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
5034 	unsigned HOST_WIDE_INT op1_maybe_minusp
5035 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
5036 	unsigned int result_width = mode_width;
5037 	int result_low = 0;
5038 
5039 	switch (code)
5040 	  {
5041 	  case PLUS:
5042 	    result_width = MAX (width0, width1) + 1;
5043 	    result_low = MIN (low0, low1);
5044 	    break;
5045 	  case MINUS:
5046 	    result_low = MIN (low0, low1);
5047 	    break;
5048 	  case MULT:
5049 	    result_width = width0 + width1;
5050 	    result_low = low0 + low1;
5051 	    break;
5052 	  case DIV:
5053 	    if (width1 == 0)
5054 	      break;
5055 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
5056 	      result_width = width0;
5057 	    break;
5058 	  case UDIV:
5059 	    if (width1 == 0)
5060 	      break;
5061 	    result_width = width0;
5062 	    break;
5063 	  case MOD:
5064 	    if (width1 == 0)
5065 	      break;
5066 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
5067 	      result_width = MIN (width0, width1);
5068 	    result_low = MIN (low0, low1);
5069 	    break;
5070 	  case UMOD:
5071 	    if (width1 == 0)
5072 	      break;
5073 	    result_width = MIN (width0, width1);
5074 	    result_low = MIN (low0, low1);
5075 	    break;
5076 	  default:
5077 	    gcc_unreachable ();
5078 	  }
5079 
5080 	/* Note that mode_width <= HOST_BITS_PER_WIDE_INT, see above.  */
5081 	if (result_width < mode_width)
5082 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
5083 
5084 	if (result_low > 0)
5085 	  {
5086 	    if (result_low < HOST_BITS_PER_WIDE_INT)
5087 	      nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
5088 	    else
5089 	      nonzero = 0;
5090 	  }
5091       }
5092       break;
5093 
5094     case ZERO_EXTRACT:
5095       if (CONST_INT_P (XEXP (x, 1))
5096 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
5097 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
5098       break;
5099 
5100     case SUBREG:
5101       /* If this is a SUBREG formed for a promoted variable that has
5102 	 been zero-extended, we know that at least the high-order bits
5103 	 are zero, though others might be too.  */
5104       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
5105 	nonzero = GET_MODE_MASK (xmode)
5106 		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
5107 					 known_x, known_mode, known_ret);
5108 
5109       /* If the inner mode is a single word for both the host and target
5110 	 machines, we can compute this from which bits of the inner
5111 	 object might be nonzero.  */
5112       inner_mode = GET_MODE (SUBREG_REG (x));
5113       if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
5114 	  && inner_width <= BITS_PER_WORD
5115 	  && inner_width <= HOST_BITS_PER_WIDE_INT)
5116 	{
5117 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
5118 					  known_x, known_mode, known_ret);
5119 
5120           /* On a typical CISC machine, accessing an object in a wider mode
5121 	     causes the high-order bits to become undefined.  So they are
5122 	     not known to be zero.
5123 
5124 	     On a typical RISC machine, we only have to worry about the way
5125 	     loads are extended.  Otherwise, if we get a reload for the inner
5126 	     part, it may be loaded from the stack, and then we may lose all
5127 	     the zero bits that existed before the store to the stack.  */
5128 	  rtx_code extend_op;
5129 	  if ((!WORD_REGISTER_OPERATIONS
5130 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
5131 		   ? val_signbit_known_set_p (inner_mode, nonzero)
5132 		   : extend_op != ZERO_EXTEND)
5133 	       || !MEM_P (SUBREG_REG (x)))
5134 	      && xmode_width > inner_width)
5135 	    nonzero
5136 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
5137 	}
5138       break;
5139 
5140     case ASHIFT:
5141     case ASHIFTRT:
5142     case LSHIFTRT:
5143     case ROTATE:
5144     case ROTATERT:
5145       /* The nonzero bits are in two classes: any bits within MODE
5146 	 that aren't in xmode are always significant.  The rest of the
5147 	 nonzero bits are those that are significant in the operand of
5148 	 the shift when shifted the appropriate number of bits.  This
5149 	 shows that high-order bits are cleared by the right shift and
5150 	 low-order bits by left shifts.  */
5151       if (CONST_INT_P (XEXP (x, 1))
5152 	  && INTVAL (XEXP (x, 1)) >= 0
5153 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
5154 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5155 	{
5156 	  int count = INTVAL (XEXP (x, 1));
5157 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
5158 	  unsigned HOST_WIDE_INT op_nonzero
5159 	    = cached_nonzero_bits (XEXP (x, 0), mode,
5160 				   known_x, known_mode, known_ret);
5161 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
5162 	  unsigned HOST_WIDE_INT outer = 0;
5163 
5164 	  if (mode_width > xmode_width)
5165 	    outer = (op_nonzero & nonzero & ~mode_mask);
5166 
5167 	  switch (code)
5168 	    {
5169 	    case ASHIFT:
5170 	      inner <<= count;
5171 	      break;
5172 
5173 	    case LSHIFTRT:
5174 	      inner >>= count;
5175 	      break;
5176 
5177 	    case ASHIFTRT:
5178 	      inner >>= count;
5179 
5180 	      /* If the sign bit may have been nonzero before the shift, we
5181 		 need to mark all the places it could have been copied to
5182 		 by the shift as possibly nonzero.  */
5183 	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
5184 		inner |= (((HOST_WIDE_INT_1U << count) - 1)
5185 			  << (xmode_width - count));
5186 	      break;
5187 
5188 	    case ROTATE:
5189 	      inner = (inner << (count % xmode_width)
5190 		       | (inner >> (xmode_width - (count % xmode_width))))
5191 		      & mode_mask;
5192 	      break;
5193 
5194 	    case ROTATERT:
5195 	      inner = (inner >> (count % xmode_width)
5196 		       | (inner << (xmode_width - (count % xmode_width))))
5197 		      & mode_mask;
5198 	      break;
5199 
5200 	    default:
5201 	      gcc_unreachable ();
5202 	    }
5203 
5204 	  nonzero &= (outer | inner);
5205 	}
5206       break;
5207 
5208     case FFS:
5209     case POPCOUNT:
5210       /* This is at most the number of bits in the mode.  */
5211       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
5212       break;
5213 
5214     case CLZ:
5215       /* If CLZ has a known value at zero, then the nonzero bits are
5216 	 that value, plus the number of bits in the mode minus one.  */
5217       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
5218 	nonzero
5219 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
5220       else
5221 	nonzero = -1;
5222       break;
5223 
5224     case CTZ:
5225       /* If CTZ has a known value at zero, then the nonzero bits are
5226 	 that value, plus the number of bits in the mode minus one.  */
5227       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
5228 	nonzero
5229 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
5230       else
5231 	nonzero = -1;
5232       break;
5233 
5234     case CLRSB:
5235       /* This is at most the number of bits in the mode minus 1.  */
5236       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
5237       break;
5238 
5239     case PARITY:
5240       nonzero = 1;
5241       break;
5242 
5243     case IF_THEN_ELSE:
5244       {
5245 	unsigned HOST_WIDE_INT nonzero_true
5246 	  = cached_nonzero_bits (XEXP (x, 1), mode,
5247 				 known_x, known_mode, known_ret);
5248 
5249 	/* Don't call nonzero_bits for the second time if it cannot change
5250 	   anything.  */
5251 	if ((nonzero & nonzero_true) != nonzero)
5252 	  nonzero &= nonzero_true
5253       		     | cached_nonzero_bits (XEXP (x, 2), mode,
5254 					    known_x, known_mode, known_ret);
5255       }
5256       break;
5257 
5258     default:
5259       break;
5260     }
5261 
5262   return nonzero;
5263 }
5264 
5265 /* See the macro definition above.  */
5266 #undef cached_num_sign_bit_copies
5267 
5268 
5269 /* Return true if num_sign_bit_copies1 might recurse into both operands
5270    of X.  */
5271 
5272 static inline bool
num_sign_bit_copies_binary_arith_p(const_rtx x)5273 num_sign_bit_copies_binary_arith_p (const_rtx x)
5274 {
5275   if (!ARITHMETIC_P (x))
5276     return false;
5277   switch (GET_CODE (x))
5278     {
5279     case IOR:
5280     case AND:
5281     case XOR:
5282     case SMIN:
5283     case SMAX:
5284     case UMIN:
5285     case UMAX:
5286     case PLUS:
5287     case MINUS:
5288     case MULT:
5289       return true;
5290     default:
5291       return false;
5292     }
5293 }
5294 
5295 /* The function cached_num_sign_bit_copies is a wrapper around
5296    num_sign_bit_copies1.  It avoids exponential behavior in
5297    num_sign_bit_copies1 when X has identical subexpressions on the
5298    first or the second level.  */
5299 
5300 static unsigned int
cached_num_sign_bit_copies(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)5301 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
5302 			    const_rtx known_x, machine_mode known_mode,
5303 			    unsigned int known_ret)
5304 {
5305   if (x == known_x && mode == known_mode)
5306     return known_ret;
5307 
5308   /* Try to find identical subexpressions.  If found call
5309      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5310      the precomputed value for the subexpression as KNOWN_RET.  */
5311 
5312   if (num_sign_bit_copies_binary_arith_p (x))
5313     {
5314       rtx x0 = XEXP (x, 0);
5315       rtx x1 = XEXP (x, 1);
5316 
5317       /* Check the first level.  */
5318       if (x0 == x1)
5319 	return
5320 	  num_sign_bit_copies1 (x, mode, x0, mode,
5321 				cached_num_sign_bit_copies (x0, mode, known_x,
5322 							    known_mode,
5323 							    known_ret));
5324 
5325       /* Check the second level.  */
5326       if (num_sign_bit_copies_binary_arith_p (x0)
5327 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
5328 	return
5329 	  num_sign_bit_copies1 (x, mode, x1, mode,
5330 				cached_num_sign_bit_copies (x1, mode, known_x,
5331 							    known_mode,
5332 							    known_ret));
5333 
5334       if (num_sign_bit_copies_binary_arith_p (x1)
5335 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
5336 	return
5337 	  num_sign_bit_copies1 (x, mode, x0, mode,
5338 				cached_num_sign_bit_copies (x0, mode, known_x,
5339 							    known_mode,
5340 							    known_ret));
5341     }
5342 
5343   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5344 }
5345 
5346 /* Return the number of bits at the high-order end of X that are known to
5347    be equal to the sign bit.  X will be used in mode MODE.  The returned
5348    value will always be between 1 and the number of bits in MODE.  */
5349 
5350 static unsigned int
num_sign_bit_copies1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)5351 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5352 		      machine_mode known_mode,
5353 		      unsigned int known_ret)
5354 {
5355   enum rtx_code code = GET_CODE (x);
5356   unsigned int bitwidth = GET_MODE_PRECISION (mode);
5357   int num0, num1, result;
5358   unsigned HOST_WIDE_INT nonzero;
5359 
5360   if (CONST_INT_P (x))
5361     {
5362       /* If the constant is negative, take its 1's complement and remask.
5363 	 Then see how many zero bits we have.  */
5364       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5365       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5366 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5367 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5368 
5369       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5370     }
5371 
5372   scalar_int_mode xmode, inner_mode;
5373   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5374     return 1;
5375 
5376   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5377 
5378   /* For a smaller mode, just ignore the high bits.  */
5379   if (bitwidth < xmode_width)
5380     {
5381       num0 = cached_num_sign_bit_copies (x, xmode,
5382 					 known_x, known_mode, known_ret);
5383       return MAX (1, num0 - (int) (xmode_width - bitwidth));
5384     }
5385 
5386   if (bitwidth > xmode_width)
5387     {
5388       /* If this machine does not do all register operations on the entire
5389 	 register and MODE is wider than the mode of X, we can say nothing
5390 	 at all about the high-order bits.  We extend this reasoning to RISC
5391 	 machines for operations that might not operate on full registers.  */
5392       if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5393 	return 1;
5394 
5395       /* Likewise on machines that do, if the mode of the object is smaller
5396 	 than a word and loads of that size don't sign extend, we can say
5397 	 nothing about the high order bits.  */
5398       if (xmode_width < BITS_PER_WORD
5399 	  && load_extend_op (xmode) != SIGN_EXTEND)
5400 	return 1;
5401     }
5402 
5403   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5404      the code in the switch below.  */
5405   switch (code)
5406     {
5407     case REG:
5408 
5409 #if defined(POINTERS_EXTEND_UNSIGNED)
5410       /* If pointers extend signed and this is a pointer in Pmode, say that
5411 	 all the bits above ptr_mode are known to be sign bit copies.  */
5412       /* As we do not know which address space the pointer is referring to,
5413 	 we can do this only if the target does not support different pointer
5414 	 or address modes depending on the address space.  */
5415       if (target_default_pointer_address_modes_p ()
5416 	  && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5417 	  && mode == Pmode && REG_POINTER (x)
5418 	  && !targetm.have_ptr_extend ())
5419 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5420 #endif
5421 
5422       {
5423 	unsigned int copies_for_hook = 1, copies = 1;
5424 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5425 							 &copies_for_hook);
5426 
5427 	if (new_rtx)
5428 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5429 					       known_mode, known_ret);
5430 
5431 	if (copies > 1 || copies_for_hook > 1)
5432 	  return MAX (copies, copies_for_hook);
5433 
5434 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
5435       }
5436       break;
5437 
5438     case MEM:
5439       /* Some RISC machines sign-extend all loads of smaller than a word.  */
5440       if (load_extend_op (xmode) == SIGN_EXTEND)
5441 	return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5442       break;
5443 
5444     case SUBREG:
5445       /* If this is a SUBREG for a promoted object that is sign-extended
5446 	 and we are looking at it in a wider mode, we know that at least the
5447 	 high-order bits are known to be sign bit copies.  */
5448 
5449       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5450 	{
5451 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5452 					     known_x, known_mode, known_ret);
5453 	  return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5454 	}
5455 
5456       if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5457 	{
5458 	  /* For a smaller object, just ignore the high bits.  */
5459 	  if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5460 	    {
5461 	      num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5462 						 known_x, known_mode,
5463 						 known_ret);
5464 	      return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5465 					   - bitwidth));
5466 	    }
5467 
5468 	  /* For paradoxical SUBREGs on machines where all register operations
5469 	     affect the entire register, just look inside.  Note that we are
5470 	     passing MODE to the recursive call, so the number of sign bit
5471 	     copies will remain relative to that mode, not the inner mode.
5472 
5473 	     This works only if loads sign extend.  Otherwise, if we get a
5474 	     reload for the inner part, it may be loaded from the stack, and
5475 	     then we lose all sign bit copies that existed before the store
5476 	     to the stack.  */
5477 	  if (WORD_REGISTER_OPERATIONS
5478 	      && load_extend_op (inner_mode) == SIGN_EXTEND
5479 	      && paradoxical_subreg_p (x)
5480 	      && MEM_P (SUBREG_REG (x)))
5481 	    return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5482 					       known_x, known_mode, known_ret);
5483 	}
5484       break;
5485 
5486     case SIGN_EXTRACT:
5487       if (CONST_INT_P (XEXP (x, 1)))
5488 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5489       break;
5490 
5491     case SIGN_EXTEND:
5492       if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5493 	return (bitwidth - GET_MODE_PRECISION (inner_mode)
5494 		+ cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5495 					      known_x, known_mode, known_ret));
5496       break;
5497 
5498     case TRUNCATE:
5499       /* For a smaller object, just ignore the high bits.  */
5500       inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5501       num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5502 					 known_x, known_mode, known_ret);
5503       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5504 				    - bitwidth)));
5505 
5506     case NOT:
5507       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5508 					 known_x, known_mode, known_ret);
5509 
5510     case ROTATE:       case ROTATERT:
5511       /* If we are rotating left by a number of bits less than the number
5512 	 of sign bit copies, we can just subtract that amount from the
5513 	 number.  */
5514       if (CONST_INT_P (XEXP (x, 1))
5515 	  && INTVAL (XEXP (x, 1)) >= 0
5516 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5517 	{
5518 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5519 					     known_x, known_mode, known_ret);
5520 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5521 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5522 	}
5523       break;
5524 
5525     case NEG:
5526       /* In general, this subtracts one sign bit copy.  But if the value
5527 	 is known to be positive, the number of sign bit copies is the
5528 	 same as that of the input.  Finally, if the input has just one bit
5529 	 that might be nonzero, all the bits are copies of the sign bit.  */
5530       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5531 					 known_x, known_mode, known_ret);
5532       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5533 	return num0 > 1 ? num0 - 1 : 1;
5534 
5535       nonzero = nonzero_bits (XEXP (x, 0), mode);
5536       if (nonzero == 1)
5537 	return bitwidth;
5538 
5539       if (num0 > 1
5540 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5541 	num0--;
5542 
5543       return num0;
5544 
5545     case IOR:   case AND:   case XOR:
5546     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5547       /* Logical operations will preserve the number of sign-bit copies.
5548 	 MIN and MAX operations always return one of the operands.  */
5549       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5550 					 known_x, known_mode, known_ret);
5551       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5552 					 known_x, known_mode, known_ret);
5553 
5554       /* If num1 is clearing some of the top bits then regardless of
5555 	 the other term, we are guaranteed to have at least that many
5556 	 high-order zero bits.  */
5557       if (code == AND
5558 	  && num1 > 1
5559 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5560 	  && CONST_INT_P (XEXP (x, 1))
5561 	  && (UINTVAL (XEXP (x, 1))
5562 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5563 	return num1;
5564 
5565       /* Similarly for IOR when setting high-order bits.  */
5566       if (code == IOR
5567 	  && num1 > 1
5568 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5569 	  && CONST_INT_P (XEXP (x, 1))
5570 	  && (UINTVAL (XEXP (x, 1))
5571 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5572 	return num1;
5573 
5574       return MIN (num0, num1);
5575 
5576     case PLUS:  case MINUS:
5577       /* For addition and subtraction, we can have a 1-bit carry.  However,
5578 	 if we are subtracting 1 from a positive number, there will not
5579 	 be such a carry.  Furthermore, if the positive number is known to
5580 	 be 0 or 1, we know the result is either -1 or 0.  */
5581 
5582       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5583 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5584 	{
5585 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5586 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5587 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5588 		    : bitwidth - floor_log2 (nonzero) - 1);
5589 	}
5590 
5591       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5592 					 known_x, known_mode, known_ret);
5593       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5594 					 known_x, known_mode, known_ret);
5595       result = MAX (1, MIN (num0, num1) - 1);
5596 
5597       return result;
5598 
5599     case MULT:
5600       /* The number of bits of the product is the sum of the number of
5601 	 bits of both terms.  However, unless one of the terms if known
5602 	 to be positive, we must allow for an additional bit since negating
5603 	 a negative number can remove one sign bit copy.  */
5604 
5605       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5606 					 known_x, known_mode, known_ret);
5607       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5608 					 known_x, known_mode, known_ret);
5609 
5610       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5611       if (result > 0
5612 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5613 	      || (((nonzero_bits (XEXP (x, 0), mode)
5614 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5615 		  && ((nonzero_bits (XEXP (x, 1), mode)
5616 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5617 		      != 0))))
5618 	result--;
5619 
5620       return MAX (1, result);
5621 
5622     case UDIV:
5623       /* The result must be <= the first operand.  If the first operand
5624 	 has the high bit set, we know nothing about the number of sign
5625 	 bit copies.  */
5626       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5627 	return 1;
5628       else if ((nonzero_bits (XEXP (x, 0), mode)
5629 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5630 	return 1;
5631       else
5632 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5633 					   known_x, known_mode, known_ret);
5634 
5635     case UMOD:
5636       /* The result must be <= the second operand.  If the second operand
5637 	 has (or just might have) the high bit set, we know nothing about
5638 	 the number of sign bit copies.  */
5639       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5640 	return 1;
5641       else if ((nonzero_bits (XEXP (x, 1), mode)
5642 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5643 	return 1;
5644       else
5645 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5646 					   known_x, known_mode, known_ret);
5647 
5648     case DIV:
5649       /* Similar to unsigned division, except that we have to worry about
5650 	 the case where the divisor is negative, in which case we have
5651 	 to add 1.  */
5652       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5653 					   known_x, known_mode, known_ret);
5654       if (result > 1
5655 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5656 	      || (nonzero_bits (XEXP (x, 1), mode)
5657 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5658 	result--;
5659 
5660       return result;
5661 
5662     case MOD:
5663       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5664 					   known_x, known_mode, known_ret);
5665       if (result > 1
5666 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5667 	      || (nonzero_bits (XEXP (x, 1), mode)
5668 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5669 	result--;
5670 
5671       return result;
5672 
5673     case ASHIFTRT:
5674       /* Shifts by a constant add to the number of bits equal to the
5675 	 sign bit.  */
5676       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5677 					 known_x, known_mode, known_ret);
5678       if (CONST_INT_P (XEXP (x, 1))
5679 	  && INTVAL (XEXP (x, 1)) > 0
5680 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5681 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5682 
5683       return num0;
5684 
5685     case ASHIFT:
5686       /* Left shifts destroy copies.  */
5687       if (!CONST_INT_P (XEXP (x, 1))
5688 	  || INTVAL (XEXP (x, 1)) < 0
5689 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5690 	  || INTVAL (XEXP (x, 1)) >= xmode_width)
5691 	return 1;
5692 
5693       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5694 					 known_x, known_mode, known_ret);
5695       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5696 
5697     case IF_THEN_ELSE:
5698       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5699 					 known_x, known_mode, known_ret);
5700       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5701 					 known_x, known_mode, known_ret);
5702       return MIN (num0, num1);
5703 
5704     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5705     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5706     case GEU: case GTU: case LEU: case LTU:
5707     case UNORDERED: case ORDERED:
5708       /* If the constant is negative, take its 1's complement and remask.
5709 	 Then see how many zero bits we have.  */
5710       nonzero = STORE_FLAG_VALUE;
5711       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5712 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5713 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5714 
5715       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5716 
5717     default:
5718       break;
5719     }
5720 
5721   /* If we haven't been able to figure it out by one of the above rules,
5722      see if some of the high-order bits are known to be zero.  If so,
5723      count those bits and return one less than that amount.  If we can't
5724      safely compute the mask for this mode, always return BITWIDTH.  */
5725 
5726   bitwidth = GET_MODE_PRECISION (mode);
5727   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5728     return 1;
5729 
5730   nonzero = nonzero_bits (x, mode);
5731   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5732 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5733 }
5734 
5735 /* Calculate the rtx_cost of a single instruction pattern.  A return value of
5736    zero indicates an instruction pattern without a known cost.  */
5737 
5738 int
pattern_cost(rtx pat,bool speed)5739 pattern_cost (rtx pat, bool speed)
5740 {
5741   int i, cost;
5742   rtx set;
5743 
5744   /* Extract the single set rtx from the instruction pattern.  We
5745      can't use single_set since we only have the pattern.  We also
5746      consider PARALLELs of a normal set and a single comparison.  In
5747      that case we use the cost of the non-comparison SET operation,
5748      which is most-likely to be the real cost of this operation.  */
5749   if (GET_CODE (pat) == SET)
5750     set = pat;
5751   else if (GET_CODE (pat) == PARALLEL)
5752     {
5753       set = NULL_RTX;
5754       rtx comparison = NULL_RTX;
5755 
5756       for (i = 0; i < XVECLEN (pat, 0); i++)
5757 	{
5758 	  rtx x = XVECEXP (pat, 0, i);
5759 	  if (GET_CODE (x) == SET)
5760 	    {
5761 	      if (GET_CODE (SET_SRC (x)) == COMPARE)
5762 		{
5763 		  if (comparison)
5764 		    return 0;
5765 		  comparison = x;
5766 		}
5767 	      else
5768 		{
5769 		  if (set)
5770 		    return 0;
5771 		  set = x;
5772 		}
5773 	    }
5774 	}
5775 
5776       if (!set && comparison)
5777 	set = comparison;
5778 
5779       if (!set)
5780 	return 0;
5781     }
5782   else
5783     return 0;
5784 
5785   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5786   return cost > 0 ? cost : COSTS_N_INSNS (1);
5787 }
5788 
5789 /* Calculate the cost of a single instruction.  A return value of zero
5790    indicates an instruction pattern without a known cost.  */
5791 
5792 int
insn_cost(rtx_insn * insn,bool speed)5793 insn_cost (rtx_insn *insn, bool speed)
5794 {
5795   if (targetm.insn_cost)
5796     return targetm.insn_cost (insn, speed);
5797 
5798   return pattern_cost (PATTERN (insn), speed);
5799 }
5800 
5801 /* Returns estimate on cost of computing SEQ.  */
5802 
5803 unsigned
seq_cost(const rtx_insn * seq,bool speed)5804 seq_cost (const rtx_insn *seq, bool speed)
5805 {
5806   unsigned cost = 0;
5807   rtx set;
5808 
5809   for (; seq; seq = NEXT_INSN (seq))
5810     {
5811       set = single_set (seq);
5812       if (set)
5813         cost += set_rtx_cost (set, speed);
5814       else if (NONDEBUG_INSN_P (seq))
5815 	{
5816 	  int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5817 	  if (this_cost > 0)
5818 	    cost += this_cost;
5819 	  else
5820 	    cost++;
5821 	}
5822     }
5823 
5824   return cost;
5825 }
5826 
5827 /* Given an insn INSN and condition COND, return the condition in a
5828    canonical form to simplify testing by callers.  Specifically:
5829 
5830    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5831    (2) Both operands will be machine operands; (cc0) will have been replaced.
5832    (3) If an operand is a constant, it will be the second operand.
5833    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5834        for GE, GEU, and LEU.
5835 
5836    If the condition cannot be understood, or is an inequality floating-point
5837    comparison which needs to be reversed, 0 will be returned.
5838 
5839    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5840 
5841    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5842    insn used in locating the condition was found.  If a replacement test
5843    of the condition is desired, it should be placed in front of that
5844    insn and we will be sure that the inputs are still valid.
5845 
5846    If WANT_REG is nonzero, we wish the condition to be relative to that
5847    register, if possible.  Therefore, do not canonicalize the condition
5848    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5849    to be a compare to a CC mode register.
5850 
5851    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5852    and at INSN.  */
5853 
5854 rtx
canonicalize_condition(rtx_insn * insn,rtx cond,int reverse,rtx_insn ** earliest,rtx want_reg,int allow_cc_mode,int valid_at_insn_p)5855 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5856 			rtx_insn **earliest,
5857 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5858 {
5859   enum rtx_code code;
5860   rtx_insn *prev = insn;
5861   const_rtx set;
5862   rtx tem;
5863   rtx op0, op1;
5864   int reverse_code = 0;
5865   machine_mode mode;
5866   basic_block bb = BLOCK_FOR_INSN (insn);
5867 
5868   code = GET_CODE (cond);
5869   mode = GET_MODE (cond);
5870   op0 = XEXP (cond, 0);
5871   op1 = XEXP (cond, 1);
5872 
5873   if (reverse)
5874     code = reversed_comparison_code (cond, insn);
5875   if (code == UNKNOWN)
5876     return 0;
5877 
5878   if (earliest)
5879     *earliest = insn;
5880 
5881   /* If we are comparing a register with zero, see if the register is set
5882      in the previous insn to a COMPARE or a comparison operation.  Perform
5883      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5884      in cse.c  */
5885 
5886   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5887 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5888 	 && op1 == CONST0_RTX (GET_MODE (op0))
5889 	 && op0 != want_reg)
5890     {
5891       /* Set nonzero when we find something of interest.  */
5892       rtx x = 0;
5893 
5894       /* If comparison with cc0, import actual comparison from compare
5895 	 insn.  */
5896       if (op0 == cc0_rtx)
5897 	{
5898 	  if ((prev = prev_nonnote_insn (prev)) == 0
5899 	      || !NONJUMP_INSN_P (prev)
5900 	      || (set = single_set (prev)) == 0
5901 	      || SET_DEST (set) != cc0_rtx)
5902 	    return 0;
5903 
5904 	  op0 = SET_SRC (set);
5905 	  op1 = CONST0_RTX (GET_MODE (op0));
5906 	  if (earliest)
5907 	    *earliest = prev;
5908 	}
5909 
5910       /* If this is a COMPARE, pick up the two things being compared.  */
5911       if (GET_CODE (op0) == COMPARE)
5912 	{
5913 	  op1 = XEXP (op0, 1);
5914 	  op0 = XEXP (op0, 0);
5915 	  continue;
5916 	}
5917       else if (!REG_P (op0))
5918 	break;
5919 
5920       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5921 	 stop if it isn't a single set or if it has a REG_INC note because
5922 	 we don't want to bother dealing with it.  */
5923 
5924       prev = prev_nonnote_nondebug_insn (prev);
5925 
5926       if (prev == 0
5927 	  || !NONJUMP_INSN_P (prev)
5928 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5929 	  /* In cfglayout mode, there do not have to be labels at the
5930 	     beginning of a block, or jumps at the end, so the previous
5931 	     conditions would not stop us when we reach bb boundary.  */
5932 	  || BLOCK_FOR_INSN (prev) != bb)
5933 	break;
5934 
5935       set = set_of (op0, prev);
5936 
5937       if (set
5938 	  && (GET_CODE (set) != SET
5939 	      || !rtx_equal_p (SET_DEST (set), op0)))
5940 	break;
5941 
5942       /* If this is setting OP0, get what it sets it to if it looks
5943 	 relevant.  */
5944       if (set)
5945 	{
5946 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5947 #ifdef FLOAT_STORE_FLAG_VALUE
5948 	  REAL_VALUE_TYPE fsfv;
5949 #endif
5950 
5951 	  /* ??? We may not combine comparisons done in a CCmode with
5952 	     comparisons not done in a CCmode.  This is to aid targets
5953 	     like Alpha that have an IEEE compliant EQ instruction, and
5954 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5955 	     actually artificial, simply to prevent the combination, but
5956 	     should not affect other platforms.
5957 
5958 	     However, we must allow VOIDmode comparisons to match either
5959 	     CCmode or non-CCmode comparison, because some ports have
5960 	     modeless comparisons inside branch patterns.
5961 
5962 	     ??? This mode check should perhaps look more like the mode check
5963 	     in simplify_comparison in combine.  */
5964 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5965 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5966 	      && mode != VOIDmode
5967 	      && inner_mode != VOIDmode)
5968 	    break;
5969 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5970 	      || (((code == NE
5971 		    || (code == LT
5972 			&& val_signbit_known_set_p (inner_mode,
5973 						    STORE_FLAG_VALUE))
5974 #ifdef FLOAT_STORE_FLAG_VALUE
5975 		    || (code == LT
5976 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5977 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5978 			    REAL_VALUE_NEGATIVE (fsfv)))
5979 #endif
5980 		    ))
5981 		  && COMPARISON_P (SET_SRC (set))))
5982 	    x = SET_SRC (set);
5983 	  else if (((code == EQ
5984 		     || (code == GE
5985 			 && val_signbit_known_set_p (inner_mode,
5986 						     STORE_FLAG_VALUE))
5987 #ifdef FLOAT_STORE_FLAG_VALUE
5988 		     || (code == GE
5989 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5990 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5991 			     REAL_VALUE_NEGATIVE (fsfv)))
5992 #endif
5993 		     ))
5994 		   && COMPARISON_P (SET_SRC (set)))
5995 	    {
5996 	      reverse_code = 1;
5997 	      x = SET_SRC (set);
5998 	    }
5999 	  else if ((code == EQ || code == NE)
6000 		   && GET_CODE (SET_SRC (set)) == XOR)
6001 	    /* Handle sequences like:
6002 
6003 	       (set op0 (xor X Y))
6004 	       ...(eq|ne op0 (const_int 0))...
6005 
6006 	       in which case:
6007 
6008 	       (eq op0 (const_int 0)) reduces to (eq X Y)
6009 	       (ne op0 (const_int 0)) reduces to (ne X Y)
6010 
6011 	       This is the form used by MIPS16, for example.  */
6012 	    x = SET_SRC (set);
6013 	  else
6014 	    break;
6015 	}
6016 
6017       else if (reg_set_p (op0, prev))
6018 	/* If this sets OP0, but not directly, we have to give up.  */
6019 	break;
6020 
6021       if (x)
6022 	{
6023 	  /* If the caller is expecting the condition to be valid at INSN,
6024 	     make sure X doesn't change before INSN.  */
6025 	  if (valid_at_insn_p)
6026 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
6027 	      break;
6028 	  if (COMPARISON_P (x))
6029 	    code = GET_CODE (x);
6030 	  if (reverse_code)
6031 	    {
6032 	      code = reversed_comparison_code (x, prev);
6033 	      if (code == UNKNOWN)
6034 		return 0;
6035 	      reverse_code = 0;
6036 	    }
6037 
6038 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
6039 	  if (earliest)
6040 	    *earliest = prev;
6041 	}
6042     }
6043 
6044   /* If constant is first, put it last.  */
6045   if (CONSTANT_P (op0))
6046     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
6047 
6048   /* If OP0 is the result of a comparison, we weren't able to find what
6049      was really being compared, so fail.  */
6050   if (!allow_cc_mode
6051       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6052     return 0;
6053 
6054   /* Canonicalize any ordered comparison with integers involving equality
6055      if we can do computations in the relevant mode and we do not
6056      overflow.  */
6057 
6058   scalar_int_mode op0_mode;
6059   if (CONST_INT_P (op1)
6060       && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
6061       && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
6062     {
6063       HOST_WIDE_INT const_val = INTVAL (op1);
6064       unsigned HOST_WIDE_INT uconst_val = const_val;
6065       unsigned HOST_WIDE_INT max_val
6066 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
6067 
6068       switch (code)
6069 	{
6070 	case LE:
6071 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
6072 	    code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
6073 	  break;
6074 
6075 	/* When cross-compiling, const_val might be sign-extended from
6076 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
6077 	case GE:
6078 	  if ((const_val & max_val)
6079 	      != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
6080 	    code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
6081 	  break;
6082 
6083 	case LEU:
6084 	  if (uconst_val < max_val)
6085 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
6086 	  break;
6087 
6088 	case GEU:
6089 	  if (uconst_val != 0)
6090 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
6091 	  break;
6092 
6093 	default:
6094 	  break;
6095 	}
6096     }
6097 
6098   /* Never return CC0; return zero instead.  */
6099   if (CC0_P (op0))
6100     return 0;
6101 
6102   /* We promised to return a comparison.  */
6103   rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
6104   if (COMPARISON_P (ret))
6105     return ret;
6106   return 0;
6107 }
6108 
6109 /* Given a jump insn JUMP, return the condition that will cause it to branch
6110    to its JUMP_LABEL.  If the condition cannot be understood, or is an
6111    inequality floating-point comparison which needs to be reversed, 0 will
6112    be returned.
6113 
6114    If EARLIEST is nonzero, it is a pointer to a place where the earliest
6115    insn used in locating the condition was found.  If a replacement test
6116    of the condition is desired, it should be placed in front of that
6117    insn and we will be sure that the inputs are still valid.  If EARLIEST
6118    is null, the returned condition will be valid at INSN.
6119 
6120    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
6121    compare CC mode register.
6122 
6123    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
6124 
6125 rtx
get_condition(rtx_insn * jump,rtx_insn ** earliest,int allow_cc_mode,int valid_at_insn_p)6126 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
6127 	       int valid_at_insn_p)
6128 {
6129   rtx cond;
6130   int reverse;
6131   rtx set;
6132 
6133   /* If this is not a standard conditional jump, we can't parse it.  */
6134   if (!JUMP_P (jump)
6135       || ! any_condjump_p (jump))
6136     return 0;
6137   set = pc_set (jump);
6138 
6139   cond = XEXP (SET_SRC (set), 0);
6140 
6141   /* If this branches to JUMP_LABEL when the condition is false, reverse
6142      the condition.  */
6143   reverse
6144     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
6145       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
6146 
6147   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
6148 				 allow_cc_mode, valid_at_insn_p);
6149 }
6150 
6151 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
6152    TARGET_MODE_REP_EXTENDED.
6153 
6154    Note that we assume that the property of
6155    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
6156    narrower than mode B.  I.e., if A is a mode narrower than B then in
6157    order to be able to operate on it in mode B, mode A needs to
6158    satisfy the requirements set by the representation of mode B.  */
6159 
6160 static void
init_num_sign_bit_copies_in_rep(void)6161 init_num_sign_bit_copies_in_rep (void)
6162 {
6163   opt_scalar_int_mode in_mode_iter;
6164   scalar_int_mode mode;
6165 
6166   FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
6167     FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
6168       {
6169 	scalar_int_mode in_mode = in_mode_iter.require ();
6170 	scalar_int_mode i;
6171 
6172 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
6173 	   extends to the next widest mode.  */
6174 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
6175 		    || GET_MODE_WIDER_MODE (mode).require () == in_mode);
6176 
6177 	/* We are in in_mode.  Count how many bits outside of mode
6178 	   have to be copies of the sign-bit.  */
6179 	FOR_EACH_MODE (i, mode, in_mode)
6180 	  {
6181 	    /* This must always exist (for the last iteration it will be
6182 	       IN_MODE).  */
6183 	    scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
6184 
6185 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
6186 		/* We can only check sign-bit copies starting from the
6187 		   top-bit.  In order to be able to check the bits we
6188 		   have already seen we pretend that subsequent bits
6189 		   have to be sign-bit copies too.  */
6190 		|| num_sign_bit_copies_in_rep [in_mode][mode])
6191 	      num_sign_bit_copies_in_rep [in_mode][mode]
6192 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
6193 	  }
6194       }
6195 }
6196 
6197 /* Suppose that truncation from the machine mode of X to MODE is not a
6198    no-op.  See if there is anything special about X so that we can
6199    assume it already contains a truncated value of MODE.  */
6200 
6201 bool
truncated_to_mode(machine_mode mode,const_rtx x)6202 truncated_to_mode (machine_mode mode, const_rtx x)
6203 {
6204   /* This register has already been used in MODE without explicit
6205      truncation.  */
6206   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
6207     return true;
6208 
6209   /* See if we already satisfy the requirements of MODE.  If yes we
6210      can just switch to MODE.  */
6211   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
6212       && (num_sign_bit_copies (x, GET_MODE (x))
6213 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
6214     return true;
6215 
6216   return false;
6217 }
6218 
6219 /* Return true if RTX code CODE has a single sequence of zero or more
6220    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
6221    entry in that case.  */
6222 
6223 static bool
setup_reg_subrtx_bounds(unsigned int code)6224 setup_reg_subrtx_bounds (unsigned int code)
6225 {
6226   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
6227   unsigned int i = 0;
6228   for (; format[i] != 'e'; ++i)
6229     {
6230       if (!format[i])
6231 	/* No subrtxes.  Leave start and count as 0.  */
6232 	return true;
6233       if (format[i] == 'E' || format[i] == 'V')
6234 	return false;
6235     }
6236 
6237   /* Record the sequence of 'e's.  */
6238   rtx_all_subrtx_bounds[code].start = i;
6239   do
6240     ++i;
6241   while (format[i] == 'e');
6242   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
6243   /* rtl-iter.h relies on this.  */
6244   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
6245 
6246   for (; format[i]; ++i)
6247     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
6248       return false;
6249 
6250   return true;
6251 }
6252 
6253 /* Initialize rtx_all_subrtx_bounds.  */
6254 void
init_rtlanal(void)6255 init_rtlanal (void)
6256 {
6257   int i;
6258   for (i = 0; i < NUM_RTX_CODE; i++)
6259     {
6260       if (!setup_reg_subrtx_bounds (i))
6261 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
6262       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
6263 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
6264     }
6265 
6266   init_num_sign_bit_copies_in_rep ();
6267 }
6268 
6269 /* Check whether this is a constant pool constant.  */
6270 bool
constant_pool_constant_p(rtx x)6271 constant_pool_constant_p (rtx x)
6272 {
6273   x = avoid_constant_pool_reference (x);
6274   return CONST_DOUBLE_P (x);
6275 }
6276 
6277 /* If M is a bitmask that selects a field of low-order bits within an item but
6278    not the entire word, return the length of the field.  Return -1 otherwise.
6279    M is used in machine mode MODE.  */
6280 
6281 int
low_bitmask_len(machine_mode mode,unsigned HOST_WIDE_INT m)6282 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
6283 {
6284   if (mode != VOIDmode)
6285     {
6286       if (!HWI_COMPUTABLE_MODE_P (mode))
6287 	return -1;
6288       m &= GET_MODE_MASK (mode);
6289     }
6290 
6291   return exact_log2 (m + 1);
6292 }
6293 
6294 /* Return the mode of MEM's address.  */
6295 
6296 scalar_int_mode
get_address_mode(rtx mem)6297 get_address_mode (rtx mem)
6298 {
6299   machine_mode mode;
6300 
6301   gcc_assert (MEM_P (mem));
6302   mode = GET_MODE (XEXP (mem, 0));
6303   if (mode != VOIDmode)
6304     return as_a <scalar_int_mode> (mode);
6305   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
6306 }
6307 
6308 /* Split up a CONST_DOUBLE or integer constant rtx
6309    into two rtx's for single words,
6310    storing in *FIRST the word that comes first in memory in the target
6311    and in *SECOND the other.
6312 
6313    TODO: This function needs to be rewritten to work on any size
6314    integer.  */
6315 
6316 void
split_double(rtx value,rtx * first,rtx * second)6317 split_double (rtx value, rtx *first, rtx *second)
6318 {
6319   if (CONST_INT_P (value))
6320     {
6321       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
6322 	{
6323 	  /* In this case the CONST_INT holds both target words.
6324 	     Extract the bits from it into two word-sized pieces.
6325 	     Sign extend each half to HOST_WIDE_INT.  */
6326 	  unsigned HOST_WIDE_INT low, high;
6327 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
6328 	  unsigned bits_per_word = BITS_PER_WORD;
6329 
6330 	  /* Set sign_bit to the most significant bit of a word.  */
6331 	  sign_bit = 1;
6332 	  sign_bit <<= bits_per_word - 1;
6333 
6334 	  /* Set mask so that all bits of the word are set.  We could
6335 	     have used 1 << BITS_PER_WORD instead of basing the
6336 	     calculation on sign_bit.  However, on machines where
6337 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6338 	     compiler warning, even though the code would never be
6339 	     executed.  */
6340 	  mask = sign_bit << 1;
6341 	  mask--;
6342 
6343 	  /* Set sign_extend as any remaining bits.  */
6344 	  sign_extend = ~mask;
6345 
6346 	  /* Pick the lower word and sign-extend it.  */
6347 	  low = INTVAL (value);
6348 	  low &= mask;
6349 	  if (low & sign_bit)
6350 	    low |= sign_extend;
6351 
6352 	  /* Pick the higher word, shifted to the least significant
6353 	     bits, and sign-extend it.  */
6354 	  high = INTVAL (value);
6355 	  high >>= bits_per_word - 1;
6356 	  high >>= 1;
6357 	  high &= mask;
6358 	  if (high & sign_bit)
6359 	    high |= sign_extend;
6360 
6361 	  /* Store the words in the target machine order.  */
6362 	  if (WORDS_BIG_ENDIAN)
6363 	    {
6364 	      *first = GEN_INT (high);
6365 	      *second = GEN_INT (low);
6366 	    }
6367 	  else
6368 	    {
6369 	      *first = GEN_INT (low);
6370 	      *second = GEN_INT (high);
6371 	    }
6372 	}
6373       else
6374 	{
6375 	  /* The rule for using CONST_INT for a wider mode
6376 	     is that we regard the value as signed.
6377 	     So sign-extend it.  */
6378 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6379 	  if (WORDS_BIG_ENDIAN)
6380 	    {
6381 	      *first = high;
6382 	      *second = value;
6383 	    }
6384 	  else
6385 	    {
6386 	      *first = value;
6387 	      *second = high;
6388 	    }
6389 	}
6390     }
6391   else if (GET_CODE (value) == CONST_WIDE_INT)
6392     {
6393       /* All of this is scary code and needs to be converted to
6394 	 properly work with any size integer.  */
6395       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6396       if (WORDS_BIG_ENDIAN)
6397 	{
6398 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6399 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6400 	}
6401       else
6402 	{
6403 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6404 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6405 	}
6406     }
6407   else if (!CONST_DOUBLE_P (value))
6408     {
6409       if (WORDS_BIG_ENDIAN)
6410 	{
6411 	  *first = const0_rtx;
6412 	  *second = value;
6413 	}
6414       else
6415 	{
6416 	  *first = value;
6417 	  *second = const0_rtx;
6418 	}
6419     }
6420   else if (GET_MODE (value) == VOIDmode
6421 	   /* This is the old way we did CONST_DOUBLE integers.  */
6422 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6423     {
6424       /* In an integer, the words are defined as most and least significant.
6425 	 So order them by the target's convention.  */
6426       if (WORDS_BIG_ENDIAN)
6427 	{
6428 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6429 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
6430 	}
6431       else
6432 	{
6433 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
6434 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6435 	}
6436     }
6437   else
6438     {
6439       long l[2];
6440 
6441       /* Note, this converts the REAL_VALUE_TYPE to the target's
6442 	 format, splits up the floating point double and outputs
6443 	 exactly 32 bits of it into each of l[0] and l[1] --
6444 	 not necessarily BITS_PER_WORD bits.  */
6445       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6446 
6447       /* If 32 bits is an entire word for the target, but not for the host,
6448 	 then sign-extend on the host so that the number will look the same
6449 	 way on the host that it would on the target.  See for instance
6450 	 simplify_unary_operation.  The #if is needed to avoid compiler
6451 	 warnings.  */
6452 
6453 #if HOST_BITS_PER_LONG > 32
6454       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6455 	{
6456 	  if (l[0] & ((long) 1 << 31))
6457 	    l[0] |= ((unsigned long) (-1) << 32);
6458 	  if (l[1] & ((long) 1 << 31))
6459 	    l[1] |= ((unsigned long) (-1) << 32);
6460 	}
6461 #endif
6462 
6463       *first = GEN_INT (l[0]);
6464       *second = GEN_INT (l[1]);
6465     }
6466 }
6467 
6468 /* Return true if X is a sign_extract or zero_extract from the least
6469    significant bit.  */
6470 
6471 static bool
lsb_bitfield_op_p(rtx x)6472 lsb_bitfield_op_p (rtx x)
6473 {
6474   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6475     {
6476       machine_mode mode = GET_MODE (XEXP (x, 0));
6477       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6478       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6479       poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6480 
6481       return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6482     }
6483   return false;
6484 }
6485 
6486 /* Strip outer address "mutations" from LOC and return a pointer to the
6487    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
6488    stripped expression there.
6489 
6490    "Mutations" either convert between modes or apply some kind of
6491    extension, truncation or alignment.  */
6492 
6493 rtx *
strip_address_mutations(rtx * loc,enum rtx_code * outer_code)6494 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6495 {
6496   for (;;)
6497     {
6498       enum rtx_code code = GET_CODE (*loc);
6499       if (GET_RTX_CLASS (code) == RTX_UNARY)
6500 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6501 	   used to convert between pointer sizes.  */
6502 	loc = &XEXP (*loc, 0);
6503       else if (lsb_bitfield_op_p (*loc))
6504 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6505 	   acts as a combined truncation and extension.  */
6506 	loc = &XEXP (*loc, 0);
6507       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6508 	/* (and ... (const_int -X)) is used to align to X bytes.  */
6509 	loc = &XEXP (*loc, 0);
6510       else if (code == SUBREG
6511                && !OBJECT_P (SUBREG_REG (*loc))
6512                && subreg_lowpart_p (*loc))
6513 	/* (subreg (operator ...) ...) inside and is used for mode
6514 	   conversion too.  */
6515 	loc = &SUBREG_REG (*loc);
6516       else
6517 	return loc;
6518       if (outer_code)
6519 	*outer_code = code;
6520     }
6521 }
6522 
6523 /* Return true if CODE applies some kind of scale.  The scaled value is
6524    is the first operand and the scale is the second.  */
6525 
6526 static bool
binary_scale_code_p(enum rtx_code code)6527 binary_scale_code_p (enum rtx_code code)
6528 {
6529   return (code == MULT
6530           || code == ASHIFT
6531           /* Needed by ARM targets.  */
6532           || code == ASHIFTRT
6533           || code == LSHIFTRT
6534           || code == ROTATE
6535           || code == ROTATERT);
6536 }
6537 
6538 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6539    (see address_info).  Return null otherwise.  */
6540 
6541 static rtx *
get_base_term(rtx * inner)6542 get_base_term (rtx *inner)
6543 {
6544   if (GET_CODE (*inner) == LO_SUM)
6545     inner = strip_address_mutations (&XEXP (*inner, 0));
6546   if (REG_P (*inner)
6547       || MEM_P (*inner)
6548       || GET_CODE (*inner) == SUBREG
6549       || GET_CODE (*inner) == SCRATCH)
6550     return inner;
6551   return 0;
6552 }
6553 
6554 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6555    (see address_info).  Return null otherwise.  */
6556 
6557 static rtx *
get_index_term(rtx * inner)6558 get_index_term (rtx *inner)
6559 {
6560   /* At present, only constant scales are allowed.  */
6561   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6562     inner = strip_address_mutations (&XEXP (*inner, 0));
6563   if (REG_P (*inner)
6564       || MEM_P (*inner)
6565       || GET_CODE (*inner) == SUBREG
6566       || GET_CODE (*inner) == SCRATCH)
6567     return inner;
6568   return 0;
6569 }
6570 
6571 /* Set the segment part of address INFO to LOC, given that INNER is the
6572    unmutated value.  */
6573 
6574 static void
set_address_segment(struct address_info * info,rtx * loc,rtx * inner)6575 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6576 {
6577   gcc_assert (!info->segment);
6578   info->segment = loc;
6579   info->segment_term = inner;
6580 }
6581 
6582 /* Set the base part of address INFO to LOC, given that INNER is the
6583    unmutated value.  */
6584 
6585 static void
set_address_base(struct address_info * info,rtx * loc,rtx * inner)6586 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6587 {
6588   gcc_assert (!info->base);
6589   info->base = loc;
6590   info->base_term = inner;
6591 }
6592 
6593 /* Set the index part of address INFO to LOC, given that INNER is the
6594    unmutated value.  */
6595 
6596 static void
set_address_index(struct address_info * info,rtx * loc,rtx * inner)6597 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6598 {
6599   gcc_assert (!info->index);
6600   info->index = loc;
6601   info->index_term = inner;
6602 }
6603 
6604 /* Set the displacement part of address INFO to LOC, given that INNER
6605    is the constant term.  */
6606 
6607 static void
set_address_disp(struct address_info * info,rtx * loc,rtx * inner)6608 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6609 {
6610   gcc_assert (!info->disp);
6611   info->disp = loc;
6612   info->disp_term = inner;
6613 }
6614 
6615 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6616    rest of INFO accordingly.  */
6617 
6618 static void
decompose_incdec_address(struct address_info * info)6619 decompose_incdec_address (struct address_info *info)
6620 {
6621   info->autoinc_p = true;
6622 
6623   rtx *base = &XEXP (*info->inner, 0);
6624   set_address_base (info, base, base);
6625   gcc_checking_assert (info->base == info->base_term);
6626 
6627   /* These addresses are only valid when the size of the addressed
6628      value is known.  */
6629   gcc_checking_assert (info->mode != VOIDmode);
6630 }
6631 
6632 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6633    of INFO accordingly.  */
6634 
6635 static void
decompose_automod_address(struct address_info * info)6636 decompose_automod_address (struct address_info *info)
6637 {
6638   info->autoinc_p = true;
6639 
6640   rtx *base = &XEXP (*info->inner, 0);
6641   set_address_base (info, base, base);
6642   gcc_checking_assert (info->base == info->base_term);
6643 
6644   rtx plus = XEXP (*info->inner, 1);
6645   gcc_assert (GET_CODE (plus) == PLUS);
6646 
6647   info->base_term2 = &XEXP (plus, 0);
6648   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6649 
6650   rtx *step = &XEXP (plus, 1);
6651   rtx *inner_step = strip_address_mutations (step);
6652   if (CONSTANT_P (*inner_step))
6653     set_address_disp (info, step, inner_step);
6654   else
6655     set_address_index (info, step, inner_step);
6656 }
6657 
6658 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6659    values in [PTR, END).  Return a pointer to the end of the used array.  */
6660 
6661 static rtx **
extract_plus_operands(rtx * loc,rtx ** ptr,rtx ** end)6662 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6663 {
6664   rtx x = *loc;
6665   if (GET_CODE (x) == PLUS)
6666     {
6667       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6668       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6669     }
6670   else
6671     {
6672       gcc_assert (ptr != end);
6673       *ptr++ = loc;
6674     }
6675   return ptr;
6676 }
6677 
6678 /* Evaluate the likelihood of X being a base or index value, returning
6679    positive if it is likely to be a base, negative if it is likely to be
6680    an index, and 0 if we can't tell.  Make the magnitude of the return
6681    value reflect the amount of confidence we have in the answer.
6682 
6683    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6684 
6685 static int
baseness(rtx x,machine_mode mode,addr_space_t as,enum rtx_code outer_code,enum rtx_code index_code)6686 baseness (rtx x, machine_mode mode, addr_space_t as,
6687 	  enum rtx_code outer_code, enum rtx_code index_code)
6688 {
6689   /* Believe *_POINTER unless the address shape requires otherwise.  */
6690   if (REG_P (x) && REG_POINTER (x))
6691     return 2;
6692   if (MEM_P (x) && MEM_POINTER (x))
6693     return 2;
6694 
6695   if (REG_P (x) && HARD_REGISTER_P (x))
6696     {
6697       /* X is a hard register.  If it only fits one of the base
6698 	 or index classes, choose that interpretation.  */
6699       int regno = REGNO (x);
6700       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6701       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6702       if (base_p != index_p)
6703 	return base_p ? 1 : -1;
6704     }
6705   return 0;
6706 }
6707 
6708 /* INFO->INNER describes a normal, non-automodified address.
6709    Fill in the rest of INFO accordingly.  */
6710 
6711 static void
decompose_normal_address(struct address_info * info)6712 decompose_normal_address (struct address_info *info)
6713 {
6714   /* Treat the address as the sum of up to four values.  */
6715   rtx *ops[4];
6716   size_t n_ops = extract_plus_operands (info->inner, ops,
6717 					ops + ARRAY_SIZE (ops)) - ops;
6718 
6719   /* If there is more than one component, any base component is in a PLUS.  */
6720   if (n_ops > 1)
6721     info->base_outer_code = PLUS;
6722 
6723   /* Try to classify each sum operand now.  Leave those that could be
6724      either a base or an index in OPS.  */
6725   rtx *inner_ops[4];
6726   size_t out = 0;
6727   for (size_t in = 0; in < n_ops; ++in)
6728     {
6729       rtx *loc = ops[in];
6730       rtx *inner = strip_address_mutations (loc);
6731       if (CONSTANT_P (*inner))
6732 	set_address_disp (info, loc, inner);
6733       else if (GET_CODE (*inner) == UNSPEC)
6734 	set_address_segment (info, loc, inner);
6735       else
6736 	{
6737 	  /* The only other possibilities are a base or an index.  */
6738 	  rtx *base_term = get_base_term (inner);
6739 	  rtx *index_term = get_index_term (inner);
6740 	  gcc_assert (base_term || index_term);
6741 	  if (!base_term)
6742 	    set_address_index (info, loc, index_term);
6743 	  else if (!index_term)
6744 	    set_address_base (info, loc, base_term);
6745 	  else
6746 	    {
6747 	      gcc_assert (base_term == index_term);
6748 	      ops[out] = loc;
6749 	      inner_ops[out] = base_term;
6750 	      ++out;
6751 	    }
6752 	}
6753     }
6754 
6755   /* Classify the remaining OPS members as bases and indexes.  */
6756   if (out == 1)
6757     {
6758       /* If we haven't seen a base or an index yet, assume that this is
6759 	 the base.  If we were confident that another term was the base
6760 	 or index, treat the remaining operand as the other kind.  */
6761       if (!info->base)
6762 	set_address_base (info, ops[0], inner_ops[0]);
6763       else
6764 	set_address_index (info, ops[0], inner_ops[0]);
6765     }
6766   else if (out == 2)
6767     {
6768       /* In the event of a tie, assume the base comes first.  */
6769       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6770 		    GET_CODE (*ops[1]))
6771 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6772 		       GET_CODE (*ops[0])))
6773 	{
6774 	  set_address_base (info, ops[0], inner_ops[0]);
6775 	  set_address_index (info, ops[1], inner_ops[1]);
6776 	}
6777       else
6778 	{
6779 	  set_address_base (info, ops[1], inner_ops[1]);
6780 	  set_address_index (info, ops[0], inner_ops[0]);
6781 	}
6782     }
6783   else
6784     gcc_assert (out == 0);
6785 }
6786 
6787 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6788    or VOIDmode if not known.  AS is the address space associated with LOC.
6789    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6790 
6791 void
decompose_address(struct address_info * info,rtx * loc,machine_mode mode,addr_space_t as,enum rtx_code outer_code)6792 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6793 		   addr_space_t as, enum rtx_code outer_code)
6794 {
6795   memset (info, 0, sizeof (*info));
6796   info->mode = mode;
6797   info->as = as;
6798   info->addr_outer_code = outer_code;
6799   info->outer = loc;
6800   info->inner = strip_address_mutations (loc, &outer_code);
6801   info->base_outer_code = outer_code;
6802   switch (GET_CODE (*info->inner))
6803     {
6804     case PRE_DEC:
6805     case PRE_INC:
6806     case POST_DEC:
6807     case POST_INC:
6808       decompose_incdec_address (info);
6809       break;
6810 
6811     case PRE_MODIFY:
6812     case POST_MODIFY:
6813       decompose_automod_address (info);
6814       break;
6815 
6816     default:
6817       decompose_normal_address (info);
6818       break;
6819     }
6820 }
6821 
6822 /* Describe address operand LOC in INFO.  */
6823 
6824 void
decompose_lea_address(struct address_info * info,rtx * loc)6825 decompose_lea_address (struct address_info *info, rtx *loc)
6826 {
6827   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6828 }
6829 
6830 /* Describe the address of MEM X in INFO.  */
6831 
6832 void
decompose_mem_address(struct address_info * info,rtx x)6833 decompose_mem_address (struct address_info *info, rtx x)
6834 {
6835   gcc_assert (MEM_P (x));
6836   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6837 		     MEM_ADDR_SPACE (x), MEM);
6838 }
6839 
6840 /* Update INFO after a change to the address it describes.  */
6841 
6842 void
update_address(struct address_info * info)6843 update_address (struct address_info *info)
6844 {
6845   decompose_address (info, info->outer, info->mode, info->as,
6846 		     info->addr_outer_code);
6847 }
6848 
6849 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6850    more complicated than that.  */
6851 
6852 HOST_WIDE_INT
get_index_scale(const struct address_info * info)6853 get_index_scale (const struct address_info *info)
6854 {
6855   rtx index = *info->index;
6856   if (GET_CODE (index) == MULT
6857       && CONST_INT_P (XEXP (index, 1))
6858       && info->index_term == &XEXP (index, 0))
6859     return INTVAL (XEXP (index, 1));
6860 
6861   if (GET_CODE (index) == ASHIFT
6862       && CONST_INT_P (XEXP (index, 1))
6863       && info->index_term == &XEXP (index, 0))
6864     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6865 
6866   if (info->index == info->index_term)
6867     return 1;
6868 
6869   return 0;
6870 }
6871 
6872 /* Return the "index code" of INFO, in the form required by
6873    ok_for_base_p_1.  */
6874 
6875 enum rtx_code
get_index_code(const struct address_info * info)6876 get_index_code (const struct address_info *info)
6877 {
6878   if (info->index)
6879     return GET_CODE (*info->index);
6880 
6881   if (info->disp)
6882     return GET_CODE (*info->disp);
6883 
6884   return SCRATCH;
6885 }
6886 
6887 /* Return true if RTL X contains a SYMBOL_REF.  */
6888 
6889 bool
contains_symbol_ref_p(const_rtx x)6890 contains_symbol_ref_p (const_rtx x)
6891 {
6892   subrtx_iterator::array_type array;
6893   FOR_EACH_SUBRTX (iter, array, x, ALL)
6894     if (SYMBOL_REF_P (*iter))
6895       return true;
6896 
6897   return false;
6898 }
6899 
6900 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6901 
6902 bool
contains_symbolic_reference_p(const_rtx x)6903 contains_symbolic_reference_p (const_rtx x)
6904 {
6905   subrtx_iterator::array_type array;
6906   FOR_EACH_SUBRTX (iter, array, x, ALL)
6907     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6908       return true;
6909 
6910   return false;
6911 }
6912 
6913 /* Return true if RTL X contains a constant pool address.  */
6914 
6915 bool
contains_constant_pool_address_p(const_rtx x)6916 contains_constant_pool_address_p (const_rtx x)
6917 {
6918   subrtx_iterator::array_type array;
6919   FOR_EACH_SUBRTX (iter, array, x, ALL)
6920     if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6921       return true;
6922 
6923   return false;
6924 }
6925 
6926 
6927 /* Return true if X contains a thread-local symbol.  */
6928 
6929 bool
tls_referenced_p(const_rtx x)6930 tls_referenced_p (const_rtx x)
6931 {
6932   if (!targetm.have_tls)
6933     return false;
6934 
6935   subrtx_iterator::array_type array;
6936   FOR_EACH_SUBRTX (iter, array, x, ALL)
6937     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6938       return true;
6939   return false;
6940 }
6941 
6942 /* Process recursively X of INSN and add REG_INC notes if necessary.  */
6943 void
add_auto_inc_notes(rtx_insn * insn,rtx x)6944 add_auto_inc_notes (rtx_insn *insn, rtx x)
6945 {
6946   enum rtx_code code = GET_CODE (x);
6947   const char *fmt;
6948   int i, j;
6949 
6950   if (code == MEM && auto_inc_p (XEXP (x, 0)))
6951     {
6952       add_reg_note (insn, REG_INC, XEXP (XEXP (x, 0), 0));
6953       return;
6954     }
6955 
6956   /* Scan all X sub-expressions.  */
6957   fmt = GET_RTX_FORMAT (code);
6958   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
6959     {
6960       if (fmt[i] == 'e')
6961 	add_auto_inc_notes (insn, XEXP (x, i));
6962       else if (fmt[i] == 'E')
6963 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6964 	  add_auto_inc_notes (insn, XVECEXP (x, i, j));
6965     }
6966 }
6967 
6968 /* Return true if X is register asm.  */
6969 
6970 bool
register_asm_p(const_rtx x)6971 register_asm_p (const_rtx x)
6972 {
6973   return (REG_P (x)
6974 	  && REG_EXPR (x) != NULL_TREE
6975 	  && HAS_DECL_ASSEMBLER_NAME_P (REG_EXPR (x))
6976 	  && DECL_ASSEMBLER_NAME_SET_P (REG_EXPR (x))
6977 	  && DECL_REGISTER (REG_EXPR (x)));
6978 }
6979