xref: /dragonfly/contrib/gcc-8.0/gcc/rtlanal.c (revision eea5ad68)
1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2018 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
45 
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
47                                                    const_rtx, machine_mode,
48                                                    unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
50 					     const_rtx, machine_mode,
51                                              unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
53 						const_rtx, machine_mode,
54                                                 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
56 					  const_rtx, machine_mode,
57 					  unsigned int);
58 
59 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
60 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
61 
62 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
63    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
64    SIGN_EXTEND then while narrowing we also have to enforce the
65    representation and sign-extend the value to mode DESTINATION_REP.
66 
67    If the value is already sign-extended to DESTINATION_REP mode we
68    can just switch to DESTINATION mode on it.  For each pair of
69    integral modes SOURCE and DESTINATION, when truncating from SOURCE
70    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
71    contains the number of high-order bits in SOURCE that have to be
72    copies of the sign-bit so that we can do this mode-switch to
73    DESTINATION.  */
74 
75 static unsigned int
76 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
77 
78 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
79    elements.  Return the new base of ARRAY.  */
80 
81 template <typename T>
82 typename T::value_type *
83 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
84 						  value_type *base,
85 						  size_t i, value_type x)
86 {
87   if (base == array.stack)
88     {
89       if (i < LOCAL_ELEMS)
90 	{
91 	  base[i] = x;
92 	  return base;
93 	}
94       gcc_checking_assert (i == LOCAL_ELEMS);
95       /* A previous iteration might also have moved from the stack to the
96 	 heap, in which case the heap array will already be big enough.  */
97       if (vec_safe_length (array.heap) <= i)
98 	vec_safe_grow (array.heap, i + 1);
99       base = array.heap->address ();
100       memcpy (base, array.stack, sizeof (array.stack));
101       base[LOCAL_ELEMS] = x;
102       return base;
103     }
104   unsigned int length = array.heap->length ();
105   if (length > i)
106     {
107       gcc_checking_assert (base == array.heap->address ());
108       base[i] = x;
109       return base;
110     }
111   else
112     {
113       gcc_checking_assert (i == length);
114       vec_safe_push (array.heap, x);
115       return array.heap->address ();
116     }
117 }
118 
119 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
120    number of elements added to the worklist.  */
121 
122 template <typename T>
123 size_t
124 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
125 						    value_type *base,
126 						    size_t end, rtx_type x)
127 {
128   enum rtx_code code = GET_CODE (x);
129   const char *format = GET_RTX_FORMAT (code);
130   size_t orig_end = end;
131   if (__builtin_expect (INSN_P (x), false))
132     {
133       /* Put the pattern at the top of the queue, since that's what
134 	 we're likely to want most.  It also allows for the SEQUENCE
135 	 code below.  */
136       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
137 	if (format[i] == 'e')
138 	  {
139 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
140 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
141 	      base[end++] = subx;
142 	    else
143 	      base = add_single_to_queue (array, base, end++, subx);
144 	  }
145     }
146   else
147     for (int i = 0; format[i]; ++i)
148       if (format[i] == 'e')
149 	{
150 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
151 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
152 	    base[end++] = subx;
153 	  else
154 	    base = add_single_to_queue (array, base, end++, subx);
155 	}
156       else if (format[i] == 'E')
157 	{
158 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
159 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
160 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
161 	    for (unsigned int j = 0; j < length; j++)
162 	      base[end++] = T::get_value (vec[j]);
163 	  else
164 	    for (unsigned int j = 0; j < length; j++)
165 	      base = add_single_to_queue (array, base, end++,
166 					  T::get_value (vec[j]));
167 	  if (code == SEQUENCE && end == length)
168 	    /* If the subrtxes of the sequence fill the entire array then
169 	       we know that no other parts of a containing insn are queued.
170 	       The caller is therefore iterating over the sequence as a
171 	       PATTERN (...), so we also want the patterns of the
172 	       subinstructions.  */
173 	    for (unsigned int j = 0; j < length; j++)
174 	      {
175 		typename T::rtx_type x = T::get_rtx (base[j]);
176 		if (INSN_P (x))
177 		  base[j] = T::get_value (PATTERN (x));
178 	      }
179 	}
180   return end - orig_end;
181 }
182 
183 template <typename T>
184 void
185 generic_subrtx_iterator <T>::free_array (array_type &array)
186 {
187   vec_free (array.heap);
188 }
189 
190 template <typename T>
191 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
192 
193 template class generic_subrtx_iterator <const_rtx_accessor>;
194 template class generic_subrtx_iterator <rtx_var_accessor>;
195 template class generic_subrtx_iterator <rtx_ptr_accessor>;
196 
197 /* Return 1 if the value of X is unstable
198    (would be different at a different point in the program).
199    The frame pointer, arg pointer, etc. are considered stable
200    (within one function) and so is anything marked `unchanging'.  */
201 
202 int
203 rtx_unstable_p (const_rtx x)
204 {
205   const RTX_CODE code = GET_CODE (x);
206   int i;
207   const char *fmt;
208 
209   switch (code)
210     {
211     case MEM:
212       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
213 
214     case CONST:
215     CASE_CONST_ANY:
216     case SYMBOL_REF:
217     case LABEL_REF:
218       return 0;
219 
220     case REG:
221       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
222       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
223 	  /* The arg pointer varies if it is not a fixed register.  */
224 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
225 	return 0;
226       /* ??? When call-clobbered, the value is stable modulo the restore
227 	 that must happen after a call.  This currently screws up local-alloc
228 	 into believing that the restore is not needed.  */
229       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
230 	return 0;
231       return 1;
232 
233     case ASM_OPERANDS:
234       if (MEM_VOLATILE_P (x))
235 	return 1;
236 
237       /* Fall through.  */
238 
239     default:
240       break;
241     }
242 
243   fmt = GET_RTX_FORMAT (code);
244   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
245     if (fmt[i] == 'e')
246       {
247 	if (rtx_unstable_p (XEXP (x, i)))
248 	  return 1;
249       }
250     else if (fmt[i] == 'E')
251       {
252 	int j;
253 	for (j = 0; j < XVECLEN (x, i); j++)
254 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
255 	    return 1;
256       }
257 
258   return 0;
259 }
260 
261 /* Return 1 if X has a value that can vary even between two
262    executions of the program.  0 means X can be compared reliably
263    against certain constants or near-constants.
264    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
265    zero, we are slightly more conservative.
266    The frame pointer and the arg pointer are considered constant.  */
267 
268 bool
269 rtx_varies_p (const_rtx x, bool for_alias)
270 {
271   RTX_CODE code;
272   int i;
273   const char *fmt;
274 
275   if (!x)
276     return 0;
277 
278   code = GET_CODE (x);
279   switch (code)
280     {
281     case MEM:
282       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
283 
284     case CONST:
285     CASE_CONST_ANY:
286     case SYMBOL_REF:
287     case LABEL_REF:
288       return 0;
289 
290     case REG:
291       /* Note that we have to test for the actual rtx used for the frame
292 	 and arg pointers and not just the register number in case we have
293 	 eliminated the frame and/or arg pointer and are using it
294 	 for pseudos.  */
295       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
296 	  /* The arg pointer varies if it is not a fixed register.  */
297 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
298 	return 0;
299       if (x == pic_offset_table_rtx
300 	  /* ??? When call-clobbered, the value is stable modulo the restore
301 	     that must happen after a call.  This currently screws up
302 	     local-alloc into believing that the restore is not needed, so we
303 	     must return 0 only if we are called from alias analysis.  */
304 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
305 	return 0;
306       return 1;
307 
308     case LO_SUM:
309       /* The operand 0 of a LO_SUM is considered constant
310 	 (in fact it is related specifically to operand 1)
311 	 during alias analysis.  */
312       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
313 	     || rtx_varies_p (XEXP (x, 1), for_alias);
314 
315     case ASM_OPERANDS:
316       if (MEM_VOLATILE_P (x))
317 	return 1;
318 
319       /* Fall through.  */
320 
321     default:
322       break;
323     }
324 
325   fmt = GET_RTX_FORMAT (code);
326   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
327     if (fmt[i] == 'e')
328       {
329 	if (rtx_varies_p (XEXP (x, i), for_alias))
330 	  return 1;
331       }
332     else if (fmt[i] == 'E')
333       {
334 	int j;
335 	for (j = 0; j < XVECLEN (x, i); j++)
336 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
337 	    return 1;
338       }
339 
340   return 0;
341 }
342 
343 /* Compute an approximation for the offset between the register
344    FROM and TO for the current function, as it was at the start
345    of the routine.  */
346 
347 static poly_int64
348 get_initial_register_offset (int from, int to)
349 {
350   static const struct elim_table_t
351   {
352     const int from;
353     const int to;
354   } table[] = ELIMINABLE_REGS;
355   poly_int64 offset1, offset2;
356   unsigned int i, j;
357 
358   if (to == from)
359     return 0;
360 
361   /* It is not safe to call INITIAL_ELIMINATION_OFFSET
362      before the reload pass.  We need to give at least
363      an estimation for the resulting frame size.  */
364   if (! reload_completed)
365     {
366       offset1 = crtl->outgoing_args_size + get_frame_size ();
367 #if !STACK_GROWS_DOWNWARD
368       offset1 = - offset1;
369 #endif
370       if (to == STACK_POINTER_REGNUM)
371 	return offset1;
372       else if (from == STACK_POINTER_REGNUM)
373 	return - offset1;
374       else
375 	return 0;
376      }
377 
378   for (i = 0; i < ARRAY_SIZE (table); i++)
379       if (table[i].from == from)
380 	{
381 	  if (table[i].to == to)
382 	    {
383 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
384 					  offset1);
385 	      return offset1;
386 	    }
387 	  for (j = 0; j < ARRAY_SIZE (table); j++)
388 	    {
389 	      if (table[j].to == to
390 		  && table[j].from == table[i].to)
391 		{
392 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
393 					      offset1);
394 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
395 					      offset2);
396 		  return offset1 + offset2;
397 		}
398 	      if (table[j].from == to
399 		  && table[j].to == table[i].to)
400 		{
401 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
402 					      offset1);
403 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
404 					      offset2);
405 		  return offset1 - offset2;
406 		}
407 	    }
408 	}
409       else if (table[i].to == from)
410 	{
411 	  if (table[i].from == to)
412 	    {
413 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
414 					  offset1);
415 	      return - offset1;
416 	    }
417 	  for (j = 0; j < ARRAY_SIZE (table); j++)
418 	    {
419 	      if (table[j].to == to
420 		  && table[j].from == table[i].from)
421 		{
422 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
423 					      offset1);
424 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
425 					      offset2);
426 		  return - offset1 + offset2;
427 		}
428 	      if (table[j].from == to
429 		  && table[j].to == table[i].from)
430 		{
431 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
432 					      offset1);
433 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
434 					      offset2);
435 		  return - offset1 - offset2;
436 		}
437 	    }
438 	}
439 
440   /* If the requested register combination was not found,
441      try a different more simple combination.  */
442   if (from == ARG_POINTER_REGNUM)
443     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
444   else if (to == ARG_POINTER_REGNUM)
445     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
446   else if (from == HARD_FRAME_POINTER_REGNUM)
447     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
448   else if (to == HARD_FRAME_POINTER_REGNUM)
449     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
450   else
451     return 0;
452 }
453 
454 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
455    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
456    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
457    references on strict alignment machines.  */
458 
459 static int
460 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
461 		       machine_mode mode, bool unaligned_mems)
462 {
463   enum rtx_code code = GET_CODE (x);
464   gcc_checking_assert (mode == BLKmode || known_size_p (size));
465 
466   /* The offset must be a multiple of the mode size if we are considering
467      unaligned memory references on strict alignment machines.  */
468   if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
469     {
470       poly_int64 actual_offset = offset;
471 
472 #ifdef SPARC_STACK_BOUNDARY_HACK
473       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
474 	     the real alignment of %sp.  However, when it does this, the
475 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
476       if (SPARC_STACK_BOUNDARY_HACK
477 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
478 	actual_offset -= STACK_POINTER_OFFSET;
479 #endif
480 
481       if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
482 	return 1;
483     }
484 
485   switch (code)
486     {
487     case SYMBOL_REF:
488       if (SYMBOL_REF_WEAK (x))
489 	return 1;
490       if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
491 	{
492 	  tree decl;
493 	  poly_int64 decl_size;
494 
495 	  if (maybe_lt (offset, 0))
496 	    return 1;
497 	  if (!known_size_p (size))
498 	    return maybe_ne (offset, 0);
499 
500 	  /* If the size of the access or of the symbol is unknown,
501 	     assume the worst.  */
502 	  decl = SYMBOL_REF_DECL (x);
503 
504 	  /* Else check that the access is in bounds.  TODO: restructure
505 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
506 	  if (!decl)
507 	    decl_size = -1;
508 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 	    {
510 	      if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
511 		decl_size = -1;
512 	    }
513 	  else if (TREE_CODE (decl) == STRING_CST)
514 	    decl_size = TREE_STRING_LENGTH (decl);
515 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
516 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
517 	  else
518 	    decl_size = -1;
519 
520 	  return (!known_size_p (decl_size) || known_eq (decl_size, 0)
521 		  ? maybe_ne (offset, 0)
522 		  : maybe_gt (offset + size, decl_size));
523         }
524 
525       return 0;
526 
527     case LABEL_REF:
528       return 0;
529 
530     case REG:
531       /* Stack references are assumed not to trap, but we need to deal with
532 	 nonsensical offsets.  */
533       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
534 	 || x == stack_pointer_rtx
535 	 /* The arg pointer varies if it is not a fixed register.  */
536 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
537 	{
538 #ifdef RED_ZONE_SIZE
539 	  poly_int64 red_zone_size = RED_ZONE_SIZE;
540 #else
541 	  poly_int64 red_zone_size = 0;
542 #endif
543 	  poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
544 	  poly_int64 low_bound, high_bound;
545 
546 	  if (!known_size_p (size))
547 	    return 1;
548 
549 	  if (x == frame_pointer_rtx)
550 	    {
551 	      if (FRAME_GROWS_DOWNWARD)
552 		{
553 		  high_bound = targetm.starting_frame_offset ();
554 		  low_bound  = high_bound - get_frame_size ();
555 		}
556 	      else
557 		{
558 		  low_bound  = targetm.starting_frame_offset ();
559 		  high_bound = low_bound + get_frame_size ();
560 		}
561 	    }
562 	  else if (x == hard_frame_pointer_rtx)
563 	    {
564 	      poly_int64 sp_offset
565 		= get_initial_register_offset (STACK_POINTER_REGNUM,
566 					       HARD_FRAME_POINTER_REGNUM);
567 	      poly_int64 ap_offset
568 		= get_initial_register_offset (ARG_POINTER_REGNUM,
569 					       HARD_FRAME_POINTER_REGNUM);
570 
571 #if STACK_GROWS_DOWNWARD
572 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
573 	      high_bound = ap_offset
574 			   + FIRST_PARM_OFFSET (current_function_decl)
575 #if !ARGS_GROW_DOWNWARD
576 			   + crtl->args.size
577 #endif
578 			   + stack_boundary;
579 #else
580 	      high_bound = sp_offset + red_zone_size + stack_boundary;
581 	      low_bound  = ap_offset
582 			   + FIRST_PARM_OFFSET (current_function_decl)
583 #if ARGS_GROW_DOWNWARD
584 			   - crtl->args.size
585 #endif
586 			   - stack_boundary;
587 #endif
588 	    }
589 	  else if (x == stack_pointer_rtx)
590 	    {
591 	      poly_int64 ap_offset
592 		= get_initial_register_offset (ARG_POINTER_REGNUM,
593 					       STACK_POINTER_REGNUM);
594 
595 #if STACK_GROWS_DOWNWARD
596 	      low_bound  = - red_zone_size - stack_boundary;
597 	      high_bound = ap_offset
598 			   + FIRST_PARM_OFFSET (current_function_decl)
599 #if !ARGS_GROW_DOWNWARD
600 			   + crtl->args.size
601 #endif
602 			   + stack_boundary;
603 #else
604 	      high_bound = red_zone_size + stack_boundary;
605 	      low_bound  = ap_offset
606 			   + FIRST_PARM_OFFSET (current_function_decl)
607 #if ARGS_GROW_DOWNWARD
608 			   - crtl->args.size
609 #endif
610 			   - stack_boundary;
611 #endif
612 	    }
613 	  else
614 	    {
615 	      /* We assume that accesses are safe to at least the
616 		 next stack boundary.
617 		 Examples are varargs and __builtin_return_address.  */
618 #if ARGS_GROW_DOWNWARD
619 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
620 			   + stack_boundary;
621 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
622 			   - crtl->args.size - stack_boundary;
623 #else
624 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
625 			   - stack_boundary;
626 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
627 			   + crtl->args.size + stack_boundary;
628 #endif
629 	    }
630 
631 	  if (known_ge (offset, low_bound)
632 	      && known_le (offset, high_bound - size))
633 	    return 0;
634 	  return 1;
635 	}
636       /* All of the virtual frame registers are stack references.  */
637       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
638 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
639 	return 0;
640       return 1;
641 
642     case CONST:
643       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
644 				    mode, unaligned_mems);
645 
646     case PLUS:
647       /* An address is assumed not to trap if:
648 	 - it is the pic register plus a const unspec without offset.  */
649       if (XEXP (x, 0) == pic_offset_table_rtx
650 	  && GET_CODE (XEXP (x, 1)) == CONST
651 	  && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
652 	  && known_eq (offset, 0))
653 	return 0;
654 
655       /* - or it is an address that can't trap plus a constant integer.  */
656       if (CONST_INT_P (XEXP (x, 1))
657 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
658 				     size, mode, unaligned_mems))
659 	return 0;
660 
661       return 1;
662 
663     case LO_SUM:
664     case PRE_MODIFY:
665       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
666 				    mode, unaligned_mems);
667 
668     case PRE_DEC:
669     case PRE_INC:
670     case POST_DEC:
671     case POST_INC:
672     case POST_MODIFY:
673       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
674 				    mode, unaligned_mems);
675 
676     default:
677       break;
678     }
679 
680   /* If it isn't one of the case above, it can cause a trap.  */
681   return 1;
682 }
683 
684 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
685 
686 int
687 rtx_addr_can_trap_p (const_rtx x)
688 {
689   return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
690 }
691 
692 /* Return true if X contains a MEM subrtx.  */
693 
694 bool
695 contains_mem_rtx_p (rtx x)
696 {
697   subrtx_iterator::array_type array;
698   FOR_EACH_SUBRTX (iter, array, x, ALL)
699     if (MEM_P (*iter))
700       return true;
701 
702   return false;
703 }
704 
705 /* Return true if X is an address that is known to not be zero.  */
706 
707 bool
708 nonzero_address_p (const_rtx x)
709 {
710   const enum rtx_code code = GET_CODE (x);
711 
712   switch (code)
713     {
714     case SYMBOL_REF:
715       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
716 
717     case LABEL_REF:
718       return true;
719 
720     case REG:
721       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
722       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
723 	  || x == stack_pointer_rtx
724 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
725 	return true;
726       /* All of the virtual frame registers are stack references.  */
727       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
728 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
729 	return true;
730       return false;
731 
732     case CONST:
733       return nonzero_address_p (XEXP (x, 0));
734 
735     case PLUS:
736       /* Handle PIC references.  */
737       if (XEXP (x, 0) == pic_offset_table_rtx
738 	       && CONSTANT_P (XEXP (x, 1)))
739 	return true;
740       return false;
741 
742     case PRE_MODIFY:
743       /* Similar to the above; allow positive offsets.  Further, since
744 	 auto-inc is only allowed in memories, the register must be a
745 	 pointer.  */
746       if (CONST_INT_P (XEXP (x, 1))
747 	  && INTVAL (XEXP (x, 1)) > 0)
748 	return true;
749       return nonzero_address_p (XEXP (x, 0));
750 
751     case PRE_INC:
752       /* Similarly.  Further, the offset is always positive.  */
753       return true;
754 
755     case PRE_DEC:
756     case POST_DEC:
757     case POST_INC:
758     case POST_MODIFY:
759       return nonzero_address_p (XEXP (x, 0));
760 
761     case LO_SUM:
762       return nonzero_address_p (XEXP (x, 1));
763 
764     default:
765       break;
766     }
767 
768   /* If it isn't one of the case above, might be zero.  */
769   return false;
770 }
771 
772 /* Return 1 if X refers to a memory location whose address
773    cannot be compared reliably with constant addresses,
774    or if X refers to a BLKmode memory object.
775    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
776    zero, we are slightly more conservative.  */
777 
778 bool
779 rtx_addr_varies_p (const_rtx x, bool for_alias)
780 {
781   enum rtx_code code;
782   int i;
783   const char *fmt;
784 
785   if (x == 0)
786     return 0;
787 
788   code = GET_CODE (x);
789   if (code == MEM)
790     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
791 
792   fmt = GET_RTX_FORMAT (code);
793   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
794     if (fmt[i] == 'e')
795       {
796 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
797 	  return 1;
798       }
799     else if (fmt[i] == 'E')
800       {
801 	int j;
802 	for (j = 0; j < XVECLEN (x, i); j++)
803 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
804 	    return 1;
805       }
806   return 0;
807 }
808 
809 /* Return the CALL in X if there is one.  */
810 
811 rtx
812 get_call_rtx_from (rtx x)
813 {
814   if (INSN_P (x))
815     x = PATTERN (x);
816   if (GET_CODE (x) == PARALLEL)
817     x = XVECEXP (x, 0, 0);
818   if (GET_CODE (x) == SET)
819     x = SET_SRC (x);
820   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
821     return x;
822   return NULL_RTX;
823 }
824 
825 /* Return the value of the integer term in X, if one is apparent;
826    otherwise return 0.
827    Only obvious integer terms are detected.
828    This is used in cse.c with the `related_value' field.  */
829 
830 HOST_WIDE_INT
831 get_integer_term (const_rtx x)
832 {
833   if (GET_CODE (x) == CONST)
834     x = XEXP (x, 0);
835 
836   if (GET_CODE (x) == MINUS
837       && CONST_INT_P (XEXP (x, 1)))
838     return - INTVAL (XEXP (x, 1));
839   if (GET_CODE (x) == PLUS
840       && CONST_INT_P (XEXP (x, 1)))
841     return INTVAL (XEXP (x, 1));
842   return 0;
843 }
844 
845 /* If X is a constant, return the value sans apparent integer term;
846    otherwise return 0.
847    Only obvious integer terms are detected.  */
848 
849 rtx
850 get_related_value (const_rtx x)
851 {
852   if (GET_CODE (x) != CONST)
853     return 0;
854   x = XEXP (x, 0);
855   if (GET_CODE (x) == PLUS
856       && CONST_INT_P (XEXP (x, 1)))
857     return XEXP (x, 0);
858   else if (GET_CODE (x) == MINUS
859 	   && CONST_INT_P (XEXP (x, 1)))
860     return XEXP (x, 0);
861   return 0;
862 }
863 
864 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
865    to somewhere in the same object or object_block as SYMBOL.  */
866 
867 bool
868 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
869 {
870   tree decl;
871 
872   if (GET_CODE (symbol) != SYMBOL_REF)
873     return false;
874 
875   if (offset == 0)
876     return true;
877 
878   if (offset > 0)
879     {
880       if (CONSTANT_POOL_ADDRESS_P (symbol)
881 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
882 	return true;
883 
884       decl = SYMBOL_REF_DECL (symbol);
885       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
886 	return true;
887     }
888 
889   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
890       && SYMBOL_REF_BLOCK (symbol)
891       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
892       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
893 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
894     return true;
895 
896   return false;
897 }
898 
899 /* Split X into a base and a constant offset, storing them in *BASE_OUT
900    and *OFFSET_OUT respectively.  */
901 
902 void
903 split_const (rtx x, rtx *base_out, rtx *offset_out)
904 {
905   if (GET_CODE (x) == CONST)
906     {
907       x = XEXP (x, 0);
908       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
909 	{
910 	  *base_out = XEXP (x, 0);
911 	  *offset_out = XEXP (x, 1);
912 	  return;
913 	}
914     }
915   *base_out = x;
916   *offset_out = const0_rtx;
917 }
918 
919 /* Express integer value X as some value Y plus a polynomial offset,
920    where Y is either const0_rtx, X or something within X (as opposed
921    to a new rtx).  Return the Y and store the offset in *OFFSET_OUT.  */
922 
923 rtx
924 strip_offset (rtx x, poly_int64_pod *offset_out)
925 {
926   rtx base = const0_rtx;
927   rtx test = x;
928   if (GET_CODE (test) == CONST)
929     test = XEXP (test, 0);
930   if (GET_CODE (test) == PLUS)
931     {
932       base = XEXP (test, 0);
933       test = XEXP (test, 1);
934     }
935   if (poly_int_rtx_p (test, offset_out))
936     return base;
937   *offset_out = 0;
938   return x;
939 }
940 
941 /* Return the argument size in REG_ARGS_SIZE note X.  */
942 
943 poly_int64
944 get_args_size (const_rtx x)
945 {
946   gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
947   return rtx_to_poly_int64 (XEXP (x, 0));
948 }
949 
950 /* Return the number of places FIND appears within X.  If COUNT_DEST is
951    zero, we do not count occurrences inside the destination of a SET.  */
952 
953 int
954 count_occurrences (const_rtx x, const_rtx find, int count_dest)
955 {
956   int i, j;
957   enum rtx_code code;
958   const char *format_ptr;
959   int count;
960 
961   if (x == find)
962     return 1;
963 
964   code = GET_CODE (x);
965 
966   switch (code)
967     {
968     case REG:
969     CASE_CONST_ANY:
970     case SYMBOL_REF:
971     case CODE_LABEL:
972     case PC:
973     case CC0:
974       return 0;
975 
976     case EXPR_LIST:
977       count = count_occurrences (XEXP (x, 0), find, count_dest);
978       if (XEXP (x, 1))
979 	count += count_occurrences (XEXP (x, 1), find, count_dest);
980       return count;
981 
982     case MEM:
983       if (MEM_P (find) && rtx_equal_p (x, find))
984 	return 1;
985       break;
986 
987     case SET:
988       if (SET_DEST (x) == find && ! count_dest)
989 	return count_occurrences (SET_SRC (x), find, count_dest);
990       break;
991 
992     default:
993       break;
994     }
995 
996   format_ptr = GET_RTX_FORMAT (code);
997   count = 0;
998 
999   for (i = 0; i < GET_RTX_LENGTH (code); i++)
1000     {
1001       switch (*format_ptr++)
1002 	{
1003 	case 'e':
1004 	  count += count_occurrences (XEXP (x, i), find, count_dest);
1005 	  break;
1006 
1007 	case 'E':
1008 	  for (j = 0; j < XVECLEN (x, i); j++)
1009 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1010 	  break;
1011 	}
1012     }
1013   return count;
1014 }
1015 
1016 
1017 /* Return TRUE if OP is a register or subreg of a register that
1018    holds an unsigned quantity.  Otherwise, return FALSE.  */
1019 
1020 bool
1021 unsigned_reg_p (rtx op)
1022 {
1023   if (REG_P (op)
1024       && REG_EXPR (op)
1025       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1026     return true;
1027 
1028   if (GET_CODE (op) == SUBREG
1029       && SUBREG_PROMOTED_SIGN (op))
1030     return true;
1031 
1032   return false;
1033 }
1034 
1035 
1036 /* Nonzero if register REG appears somewhere within IN.
1037    Also works if REG is not a register; in this case it checks
1038    for a subexpression of IN that is Lisp "equal" to REG.  */
1039 
1040 int
1041 reg_mentioned_p (const_rtx reg, const_rtx in)
1042 {
1043   const char *fmt;
1044   int i;
1045   enum rtx_code code;
1046 
1047   if (in == 0)
1048     return 0;
1049 
1050   if (reg == in)
1051     return 1;
1052 
1053   if (GET_CODE (in) == LABEL_REF)
1054     return reg == label_ref_label (in);
1055 
1056   code = GET_CODE (in);
1057 
1058   switch (code)
1059     {
1060       /* Compare registers by number.  */
1061     case REG:
1062       return REG_P (reg) && REGNO (in) == REGNO (reg);
1063 
1064       /* These codes have no constituent expressions
1065 	 and are unique.  */
1066     case SCRATCH:
1067     case CC0:
1068     case PC:
1069       return 0;
1070 
1071     CASE_CONST_ANY:
1072       /* These are kept unique for a given value.  */
1073       return 0;
1074 
1075     default:
1076       break;
1077     }
1078 
1079   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1080     return 1;
1081 
1082   fmt = GET_RTX_FORMAT (code);
1083 
1084   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1085     {
1086       if (fmt[i] == 'E')
1087 	{
1088 	  int j;
1089 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1090 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1091 	      return 1;
1092 	}
1093       else if (fmt[i] == 'e'
1094 	       && reg_mentioned_p (reg, XEXP (in, i)))
1095 	return 1;
1096     }
1097   return 0;
1098 }
1099 
1100 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1101    no CODE_LABEL insn.  */
1102 
1103 int
1104 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1105 {
1106   rtx_insn *p;
1107   if (beg == end)
1108     return 0;
1109   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1110     if (LABEL_P (p))
1111       return 0;
1112   return 1;
1113 }
1114 
1115 /* Nonzero if register REG is used in an insn between
1116    FROM_INSN and TO_INSN (exclusive of those two).  */
1117 
1118 int
1119 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1120 		    const rtx_insn *to_insn)
1121 {
1122   rtx_insn *insn;
1123 
1124   if (from_insn == to_insn)
1125     return 0;
1126 
1127   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1128     if (NONDEBUG_INSN_P (insn)
1129 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1130 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1131       return 1;
1132   return 0;
1133 }
1134 
1135 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1136    is entirely replaced by a new value and the only use is as a SET_DEST,
1137    we do not consider it a reference.  */
1138 
1139 int
1140 reg_referenced_p (const_rtx x, const_rtx body)
1141 {
1142   int i;
1143 
1144   switch (GET_CODE (body))
1145     {
1146     case SET:
1147       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1148 	return 1;
1149 
1150       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1151 	 of a REG that occupies all of the REG, the insn references X if
1152 	 it is mentioned in the destination.  */
1153       if (GET_CODE (SET_DEST (body)) != CC0
1154 	  && GET_CODE (SET_DEST (body)) != PC
1155 	  && !REG_P (SET_DEST (body))
1156 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1157 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1158 		&& !read_modify_subreg_p (SET_DEST (body)))
1159 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1160 	return 1;
1161       return 0;
1162 
1163     case ASM_OPERANDS:
1164       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1165 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1166 	  return 1;
1167       return 0;
1168 
1169     case CALL:
1170     case USE:
1171     case IF_THEN_ELSE:
1172       return reg_overlap_mentioned_p (x, body);
1173 
1174     case TRAP_IF:
1175       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1176 
1177     case PREFETCH:
1178       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1179 
1180     case UNSPEC:
1181     case UNSPEC_VOLATILE:
1182       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1183 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1184 	  return 1;
1185       return 0;
1186 
1187     case PARALLEL:
1188       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1189 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1190 	  return 1;
1191       return 0;
1192 
1193     case CLOBBER:
1194       if (MEM_P (XEXP (body, 0)))
1195 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1196 	  return 1;
1197       return 0;
1198 
1199     case COND_EXEC:
1200       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1201 	return 1;
1202       return reg_referenced_p (x, COND_EXEC_CODE (body));
1203 
1204     default:
1205       return 0;
1206     }
1207 }
1208 
1209 /* Nonzero if register REG is set or clobbered in an insn between
1210    FROM_INSN and TO_INSN (exclusive of those two).  */
1211 
1212 int
1213 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1214 		   const rtx_insn *to_insn)
1215 {
1216   const rtx_insn *insn;
1217 
1218   if (from_insn == to_insn)
1219     return 0;
1220 
1221   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1222     if (INSN_P (insn) && reg_set_p (reg, insn))
1223       return 1;
1224   return 0;
1225 }
1226 
1227 /* Return true if REG is set or clobbered inside INSN.  */
1228 
1229 int
1230 reg_set_p (const_rtx reg, const_rtx insn)
1231 {
1232   /* After delay slot handling, call and branch insns might be in a
1233      sequence.  Check all the elements there.  */
1234   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1235     {
1236       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1237 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1238 	  return true;
1239 
1240       return false;
1241     }
1242 
1243   /* We can be passed an insn or part of one.  If we are passed an insn,
1244      check if a side-effect of the insn clobbers REG.  */
1245   if (INSN_P (insn)
1246       && (FIND_REG_INC_NOTE (insn, reg)
1247 	  || (CALL_P (insn)
1248 	      && ((REG_P (reg)
1249 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1250 		   && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1251 					       GET_MODE (reg), REGNO (reg)))
1252 		  || MEM_P (reg)
1253 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1254     return true;
1255 
1256   /* There are no REG_INC notes for SP autoinc.  */
1257   if (reg == stack_pointer_rtx && INSN_P (insn))
1258     {
1259       subrtx_var_iterator::array_type array;
1260       FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1261 	{
1262 	  rtx mem = *iter;
1263 	  if (mem
1264 	      && MEM_P (mem)
1265 	      && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1266 	    {
1267 	      if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1268 		return true;
1269 	      iter.skip_subrtxes ();
1270 	    }
1271 	}
1272     }
1273 
1274   return set_of (reg, insn) != NULL_RTX;
1275 }
1276 
1277 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1278    only if none of them are modified between START and END.  Return 1 if
1279    X contains a MEM; this routine does use memory aliasing.  */
1280 
1281 int
1282 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1283 {
1284   const enum rtx_code code = GET_CODE (x);
1285   const char *fmt;
1286   int i, j;
1287   rtx_insn *insn;
1288 
1289   if (start == end)
1290     return 0;
1291 
1292   switch (code)
1293     {
1294     CASE_CONST_ANY:
1295     case CONST:
1296     case SYMBOL_REF:
1297     case LABEL_REF:
1298       return 0;
1299 
1300     case PC:
1301     case CC0:
1302       return 1;
1303 
1304     case MEM:
1305       if (modified_between_p (XEXP (x, 0), start, end))
1306 	return 1;
1307       if (MEM_READONLY_P (x))
1308 	return 0;
1309       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1310 	if (memory_modified_in_insn_p (x, insn))
1311 	  return 1;
1312       return 0;
1313 
1314     case REG:
1315       return reg_set_between_p (x, start, end);
1316 
1317     default:
1318       break;
1319     }
1320 
1321   fmt = GET_RTX_FORMAT (code);
1322   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1323     {
1324       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1325 	return 1;
1326 
1327       else if (fmt[i] == 'E')
1328 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1329 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1330 	    return 1;
1331     }
1332 
1333   return 0;
1334 }
1335 
1336 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1337    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1338    does use memory aliasing.  */
1339 
1340 int
1341 modified_in_p (const_rtx x, const_rtx insn)
1342 {
1343   const enum rtx_code code = GET_CODE (x);
1344   const char *fmt;
1345   int i, j;
1346 
1347   switch (code)
1348     {
1349     CASE_CONST_ANY:
1350     case CONST:
1351     case SYMBOL_REF:
1352     case LABEL_REF:
1353       return 0;
1354 
1355     case PC:
1356     case CC0:
1357       return 1;
1358 
1359     case MEM:
1360       if (modified_in_p (XEXP (x, 0), insn))
1361 	return 1;
1362       if (MEM_READONLY_P (x))
1363 	return 0;
1364       if (memory_modified_in_insn_p (x, insn))
1365 	return 1;
1366       return 0;
1367 
1368     case REG:
1369       return reg_set_p (x, insn);
1370 
1371     default:
1372       break;
1373     }
1374 
1375   fmt = GET_RTX_FORMAT (code);
1376   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1377     {
1378       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1379 	return 1;
1380 
1381       else if (fmt[i] == 'E')
1382 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1383 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1384 	    return 1;
1385     }
1386 
1387   return 0;
1388 }
1389 
1390 /* Return true if X is a SUBREG and if storing a value to X would
1391    preserve some of its SUBREG_REG.  For example, on a normal 32-bit
1392    target, using a SUBREG to store to one half of a DImode REG would
1393    preserve the other half.  */
1394 
1395 bool
1396 read_modify_subreg_p (const_rtx x)
1397 {
1398   if (GET_CODE (x) != SUBREG)
1399     return false;
1400   poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1401   poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1402   poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1403   /* The inner and outer modes of a subreg must be ordered, so that we
1404      can tell whether they're paradoxical or partial.  */
1405   gcc_checking_assert (ordered_p (isize, osize));
1406   return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1407 }
1408 
1409 /* Helper function for set_of.  */
1410 struct set_of_data
1411   {
1412     const_rtx found;
1413     const_rtx pat;
1414   };
1415 
1416 static void
1417 set_of_1 (rtx x, const_rtx pat, void *data1)
1418 {
1419   struct set_of_data *const data = (struct set_of_data *) (data1);
1420   if (rtx_equal_p (x, data->pat)
1421       || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1422     data->found = pat;
1423 }
1424 
1425 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1426    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1427 const_rtx
1428 set_of (const_rtx pat, const_rtx insn)
1429 {
1430   struct set_of_data data;
1431   data.found = NULL_RTX;
1432   data.pat = pat;
1433   note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1434   return data.found;
1435 }
1436 
1437 /* Add all hard register in X to *PSET.  */
1438 void
1439 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1440 {
1441   subrtx_iterator::array_type array;
1442   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1443     {
1444       const_rtx x = *iter;
1445       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1446 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1447     }
1448 }
1449 
1450 /* This function, called through note_stores, collects sets and
1451    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1452    by DATA.  */
1453 void
1454 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1455 {
1456   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1457   if (REG_P (x) && HARD_REGISTER_P (x))
1458     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1459 }
1460 
1461 /* Examine INSN, and compute the set of hard registers written by it.
1462    Store it in *PSET.  Should only be called after reload.  */
1463 void
1464 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1465 {
1466   rtx link;
1467 
1468   CLEAR_HARD_REG_SET (*pset);
1469   note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1470   if (CALL_P (insn))
1471     {
1472       if (implicit)
1473 	IOR_HARD_REG_SET (*pset, call_used_reg_set);
1474 
1475       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1476 	record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1477     }
1478   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1479     if (REG_NOTE_KIND (link) == REG_INC)
1480       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1481 }
1482 
1483 /* Like record_hard_reg_sets, but called through note_uses.  */
1484 void
1485 record_hard_reg_uses (rtx *px, void *data)
1486 {
1487   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1488 }
1489 
1490 /* Given an INSN, return a SET expression if this insn has only a single SET.
1491    It may also have CLOBBERs, USEs, or SET whose output
1492    will not be used, which we ignore.  */
1493 
1494 rtx
1495 single_set_2 (const rtx_insn *insn, const_rtx pat)
1496 {
1497   rtx set = NULL;
1498   int set_verified = 1;
1499   int i;
1500 
1501   if (GET_CODE (pat) == PARALLEL)
1502     {
1503       for (i = 0; i < XVECLEN (pat, 0); i++)
1504 	{
1505 	  rtx sub = XVECEXP (pat, 0, i);
1506 	  switch (GET_CODE (sub))
1507 	    {
1508 	    case USE:
1509 	    case CLOBBER:
1510 	      break;
1511 
1512 	    case SET:
1513 	      /* We can consider insns having multiple sets, where all
1514 		 but one are dead as single set insns.  In common case
1515 		 only single set is present in the pattern so we want
1516 		 to avoid checking for REG_UNUSED notes unless necessary.
1517 
1518 		 When we reach set first time, we just expect this is
1519 		 the single set we are looking for and only when more
1520 		 sets are found in the insn, we check them.  */
1521 	      if (!set_verified)
1522 		{
1523 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1524 		      && !side_effects_p (set))
1525 		    set = NULL;
1526 		  else
1527 		    set_verified = 1;
1528 		}
1529 	      if (!set)
1530 		set = sub, set_verified = 0;
1531 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1532 		       || side_effects_p (sub))
1533 		return NULL_RTX;
1534 	      break;
1535 
1536 	    default:
1537 	      return NULL_RTX;
1538 	    }
1539 	}
1540     }
1541   return set;
1542 }
1543 
1544 /* Given an INSN, return nonzero if it has more than one SET, else return
1545    zero.  */
1546 
1547 int
1548 multiple_sets (const_rtx insn)
1549 {
1550   int found;
1551   int i;
1552 
1553   /* INSN must be an insn.  */
1554   if (! INSN_P (insn))
1555     return 0;
1556 
1557   /* Only a PARALLEL can have multiple SETs.  */
1558   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1559     {
1560       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1561 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1562 	  {
1563 	    /* If we have already found a SET, then return now.  */
1564 	    if (found)
1565 	      return 1;
1566 	    else
1567 	      found = 1;
1568 	  }
1569     }
1570 
1571   /* Either zero or one SET.  */
1572   return 0;
1573 }
1574 
1575 /* Return nonzero if the destination of SET equals the source
1576    and there are no side effects.  */
1577 
1578 int
1579 set_noop_p (const_rtx set)
1580 {
1581   rtx src = SET_SRC (set);
1582   rtx dst = SET_DEST (set);
1583 
1584   if (dst == pc_rtx && src == pc_rtx)
1585     return 1;
1586 
1587   if (MEM_P (dst) && MEM_P (src))
1588     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1589 
1590   if (GET_CODE (dst) == ZERO_EXTRACT)
1591     return rtx_equal_p (XEXP (dst, 0), src)
1592 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1593 	   && !side_effects_p (src);
1594 
1595   if (GET_CODE (dst) == STRICT_LOW_PART)
1596     dst = XEXP (dst, 0);
1597 
1598   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1599     {
1600       if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1601 	return 0;
1602       src = SUBREG_REG (src);
1603       dst = SUBREG_REG (dst);
1604     }
1605 
1606   /* It is a NOOP if destination overlaps with selected src vector
1607      elements.  */
1608   if (GET_CODE (src) == VEC_SELECT
1609       && REG_P (XEXP (src, 0)) && REG_P (dst)
1610       && HARD_REGISTER_P (XEXP (src, 0))
1611       && HARD_REGISTER_P (dst))
1612     {
1613       int i;
1614       rtx par = XEXP (src, 1);
1615       rtx src0 = XEXP (src, 0);
1616       int c0 = INTVAL (XVECEXP (par, 0, 0));
1617       HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1618 
1619       for (i = 1; i < XVECLEN (par, 0); i++)
1620 	if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1621 	  return 0;
1622       return
1623 	simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1624 			       offset, GET_MODE (dst)) == (int) REGNO (dst);
1625     }
1626 
1627   return (REG_P (src) && REG_P (dst)
1628 	  && REGNO (src) == REGNO (dst));
1629 }
1630 
1631 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1632    value to itself.  */
1633 
1634 int
1635 noop_move_p (const rtx_insn *insn)
1636 {
1637   rtx pat = PATTERN (insn);
1638 
1639   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1640     return 1;
1641 
1642   /* Insns carrying these notes are useful later on.  */
1643   if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1644     return 0;
1645 
1646   /* Check the code to be executed for COND_EXEC.  */
1647   if (GET_CODE (pat) == COND_EXEC)
1648     pat = COND_EXEC_CODE (pat);
1649 
1650   if (GET_CODE (pat) == SET && set_noop_p (pat))
1651     return 1;
1652 
1653   if (GET_CODE (pat) == PARALLEL)
1654     {
1655       int i;
1656       /* If nothing but SETs of registers to themselves,
1657 	 this insn can also be deleted.  */
1658       for (i = 0; i < XVECLEN (pat, 0); i++)
1659 	{
1660 	  rtx tem = XVECEXP (pat, 0, i);
1661 
1662 	  if (GET_CODE (tem) == USE
1663 	      || GET_CODE (tem) == CLOBBER)
1664 	    continue;
1665 
1666 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1667 	    return 0;
1668 	}
1669 
1670       return 1;
1671     }
1672   return 0;
1673 }
1674 
1675 
1676 /* Return nonzero if register in range [REGNO, ENDREGNO)
1677    appears either explicitly or implicitly in X
1678    other than being stored into.
1679 
1680    References contained within the substructure at LOC do not count.
1681    LOC may be zero, meaning don't ignore anything.  */
1682 
1683 bool
1684 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1685 		   rtx *loc)
1686 {
1687   int i;
1688   unsigned int x_regno;
1689   RTX_CODE code;
1690   const char *fmt;
1691 
1692  repeat:
1693   /* The contents of a REG_NONNEG note is always zero, so we must come here
1694      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1695   if (x == 0)
1696     return false;
1697 
1698   code = GET_CODE (x);
1699 
1700   switch (code)
1701     {
1702     case REG:
1703       x_regno = REGNO (x);
1704 
1705       /* If we modifying the stack, frame, or argument pointer, it will
1706 	 clobber a virtual register.  In fact, we could be more precise,
1707 	 but it isn't worth it.  */
1708       if ((x_regno == STACK_POINTER_REGNUM
1709 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1710 	       && x_regno == ARG_POINTER_REGNUM)
1711 	   || x_regno == FRAME_POINTER_REGNUM)
1712 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1713 	return true;
1714 
1715       return endregno > x_regno && regno < END_REGNO (x);
1716 
1717     case SUBREG:
1718       /* If this is a SUBREG of a hard reg, we can see exactly which
1719 	 registers are being modified.  Otherwise, handle normally.  */
1720       if (REG_P (SUBREG_REG (x))
1721 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1722 	{
1723 	  unsigned int inner_regno = subreg_regno (x);
1724 	  unsigned int inner_endregno
1725 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1726 			     ? subreg_nregs (x) : 1);
1727 
1728 	  return endregno > inner_regno && regno < inner_endregno;
1729 	}
1730       break;
1731 
1732     case CLOBBER:
1733     case SET:
1734       if (&SET_DEST (x) != loc
1735 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1736 	     a pseudo but not for hard registers since we can
1737 	     treat each word individually.  */
1738 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1739 	       && loc != &SUBREG_REG (SET_DEST (x))
1740 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1741 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1742 	       && refers_to_regno_p (regno, endregno,
1743 				     SUBREG_REG (SET_DEST (x)), loc))
1744 	      || (!REG_P (SET_DEST (x))
1745 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1746 	return true;
1747 
1748       if (code == CLOBBER || loc == &SET_SRC (x))
1749 	return false;
1750       x = SET_SRC (x);
1751       goto repeat;
1752 
1753     default:
1754       break;
1755     }
1756 
1757   /* X does not match, so try its subexpressions.  */
1758 
1759   fmt = GET_RTX_FORMAT (code);
1760   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1761     {
1762       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1763 	{
1764 	  if (i == 0)
1765 	    {
1766 	      x = XEXP (x, 0);
1767 	      goto repeat;
1768 	    }
1769 	  else
1770 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1771 	      return true;
1772 	}
1773       else if (fmt[i] == 'E')
1774 	{
1775 	  int j;
1776 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1777 	    if (loc != &XVECEXP (x, i, j)
1778 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1779 	      return true;
1780 	}
1781     }
1782   return false;
1783 }
1784 
1785 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1786    we check if any register number in X conflicts with the relevant register
1787    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1788    contains a MEM (we don't bother checking for memory addresses that can't
1789    conflict because we expect this to be a rare case.  */
1790 
1791 int
1792 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1793 {
1794   unsigned int regno, endregno;
1795 
1796   /* If either argument is a constant, then modifying X can not
1797      affect IN.  Here we look at IN, we can profitably combine
1798      CONSTANT_P (x) with the switch statement below.  */
1799   if (CONSTANT_P (in))
1800     return 0;
1801 
1802  recurse:
1803   switch (GET_CODE (x))
1804     {
1805     case STRICT_LOW_PART:
1806     case ZERO_EXTRACT:
1807     case SIGN_EXTRACT:
1808       /* Overly conservative.  */
1809       x = XEXP (x, 0);
1810       goto recurse;
1811 
1812     case SUBREG:
1813       regno = REGNO (SUBREG_REG (x));
1814       if (regno < FIRST_PSEUDO_REGISTER)
1815 	regno = subreg_regno (x);
1816       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1817 			  ? subreg_nregs (x) : 1);
1818       goto do_reg;
1819 
1820     case REG:
1821       regno = REGNO (x);
1822       endregno = END_REGNO (x);
1823     do_reg:
1824       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1825 
1826     case MEM:
1827       {
1828 	const char *fmt;
1829 	int i;
1830 
1831 	if (MEM_P (in))
1832 	  return 1;
1833 
1834 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1835 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1836 	  if (fmt[i] == 'e')
1837 	    {
1838 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1839 		return 1;
1840 	    }
1841 	  else if (fmt[i] == 'E')
1842 	    {
1843 	      int j;
1844 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1845 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1846 		  return 1;
1847 	    }
1848 
1849 	return 0;
1850       }
1851 
1852     case SCRATCH:
1853     case PC:
1854     case CC0:
1855       return reg_mentioned_p (x, in);
1856 
1857     case PARALLEL:
1858       {
1859 	int i;
1860 
1861 	/* If any register in here refers to it we return true.  */
1862 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1863 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1864 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1865 	    return 1;
1866 	return 0;
1867       }
1868 
1869     default:
1870       gcc_assert (CONSTANT_P (x));
1871       return 0;
1872     }
1873 }
1874 
1875 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1876    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1877    ignored by note_stores, but passed to FUN.
1878 
1879    FUN receives three arguments:
1880    1. the REG, MEM, CC0 or PC being stored in or clobbered,
1881    2. the SET or CLOBBER rtx that does the store,
1882    3. the pointer DATA provided to note_stores.
1883 
1884   If the item being stored in or clobbered is a SUBREG of a hard register,
1885   the SUBREG will be passed.  */
1886 
1887 void
1888 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1889 {
1890   int i;
1891 
1892   if (GET_CODE (x) == COND_EXEC)
1893     x = COND_EXEC_CODE (x);
1894 
1895   if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1896     {
1897       rtx dest = SET_DEST (x);
1898 
1899       while ((GET_CODE (dest) == SUBREG
1900 	      && (!REG_P (SUBREG_REG (dest))
1901 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1902 	     || GET_CODE (dest) == ZERO_EXTRACT
1903 	     || GET_CODE (dest) == STRICT_LOW_PART)
1904 	dest = XEXP (dest, 0);
1905 
1906       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1907 	 each of whose first operand is a register.  */
1908       if (GET_CODE (dest) == PARALLEL)
1909 	{
1910 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1911 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1912 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1913 	}
1914       else
1915 	(*fun) (dest, x, data);
1916     }
1917 
1918   else if (GET_CODE (x) == PARALLEL)
1919     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1920       note_stores (XVECEXP (x, 0, i), fun, data);
1921 }
1922 
1923 /* Like notes_stores, but call FUN for each expression that is being
1924    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
1925    FUN for each expression, not any interior subexpressions.  FUN receives a
1926    pointer to the expression and the DATA passed to this function.
1927 
1928    Note that this is not quite the same test as that done in reg_referenced_p
1929    since that considers something as being referenced if it is being
1930    partially set, while we do not.  */
1931 
1932 void
1933 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1934 {
1935   rtx body = *pbody;
1936   int i;
1937 
1938   switch (GET_CODE (body))
1939     {
1940     case COND_EXEC:
1941       (*fun) (&COND_EXEC_TEST (body), data);
1942       note_uses (&COND_EXEC_CODE (body), fun, data);
1943       return;
1944 
1945     case PARALLEL:
1946       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1947 	note_uses (&XVECEXP (body, 0, i), fun, data);
1948       return;
1949 
1950     case SEQUENCE:
1951       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1952 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1953       return;
1954 
1955     case USE:
1956       (*fun) (&XEXP (body, 0), data);
1957       return;
1958 
1959     case ASM_OPERANDS:
1960       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1961 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1962       return;
1963 
1964     case TRAP_IF:
1965       (*fun) (&TRAP_CONDITION (body), data);
1966       return;
1967 
1968     case PREFETCH:
1969       (*fun) (&XEXP (body, 0), data);
1970       return;
1971 
1972     case UNSPEC:
1973     case UNSPEC_VOLATILE:
1974       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1975 	(*fun) (&XVECEXP (body, 0, i), data);
1976       return;
1977 
1978     case CLOBBER:
1979       if (MEM_P (XEXP (body, 0)))
1980 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
1981       return;
1982 
1983     case SET:
1984       {
1985 	rtx dest = SET_DEST (body);
1986 
1987 	/* For sets we replace everything in source plus registers in memory
1988 	   expression in store and operands of a ZERO_EXTRACT.  */
1989 	(*fun) (&SET_SRC (body), data);
1990 
1991 	if (GET_CODE (dest) == ZERO_EXTRACT)
1992 	  {
1993 	    (*fun) (&XEXP (dest, 1), data);
1994 	    (*fun) (&XEXP (dest, 2), data);
1995 	  }
1996 
1997 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1998 	  dest = XEXP (dest, 0);
1999 
2000 	if (MEM_P (dest))
2001 	  (*fun) (&XEXP (dest, 0), data);
2002       }
2003       return;
2004 
2005     default:
2006       /* All the other possibilities never store.  */
2007       (*fun) (pbody, data);
2008       return;
2009     }
2010 }
2011 
2012 /* Return nonzero if X's old contents don't survive after INSN.
2013    This will be true if X is (cc0) or if X is a register and
2014    X dies in INSN or because INSN entirely sets X.
2015 
2016    "Entirely set" means set directly and not through a SUBREG, or
2017    ZERO_EXTRACT, so no trace of the old contents remains.
2018    Likewise, REG_INC does not count.
2019 
2020    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
2021    but for this use that makes no difference, since regs don't overlap
2022    during their lifetimes.  Therefore, this function may be used
2023    at any time after deaths have been computed.
2024 
2025    If REG is a hard reg that occupies multiple machine registers, this
2026    function will only return 1 if each of those registers will be replaced
2027    by INSN.  */
2028 
2029 int
2030 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2031 {
2032   unsigned int regno, end_regno;
2033   unsigned int i;
2034 
2035   /* Can't use cc0_rtx below since this file is used by genattrtab.c.  */
2036   if (GET_CODE (x) == CC0)
2037     return 1;
2038 
2039   gcc_assert (REG_P (x));
2040 
2041   regno = REGNO (x);
2042   end_regno = END_REGNO (x);
2043   for (i = regno; i < end_regno; i++)
2044     if (! dead_or_set_regno_p (insn, i))
2045       return 0;
2046 
2047   return 1;
2048 }
2049 
2050 /* Return TRUE iff DEST is a register or subreg of a register, is a
2051    complete rather than read-modify-write destination, and contains
2052    register TEST_REGNO.  */
2053 
2054 static bool
2055 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2056 {
2057   unsigned int regno, endregno;
2058 
2059   if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2060     dest = SUBREG_REG (dest);
2061 
2062   if (!REG_P (dest))
2063     return false;
2064 
2065   regno = REGNO (dest);
2066   endregno = END_REGNO (dest);
2067   return (test_regno >= regno && test_regno < endregno);
2068 }
2069 
2070 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2071    any member matches the covers_regno_no_parallel_p criteria.  */
2072 
2073 static bool
2074 covers_regno_p (const_rtx dest, unsigned int test_regno)
2075 {
2076   if (GET_CODE (dest) == PARALLEL)
2077     {
2078       /* Some targets place small structures in registers for return
2079 	 values of functions, and those registers are wrapped in
2080 	 PARALLELs that we may see as the destination of a SET.  */
2081       int i;
2082 
2083       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2084 	{
2085 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2086 	  if (inner != NULL_RTX
2087 	      && covers_regno_no_parallel_p (inner, test_regno))
2088 	    return true;
2089 	}
2090 
2091       return false;
2092     }
2093   else
2094     return covers_regno_no_parallel_p (dest, test_regno);
2095 }
2096 
2097 /* Utility function for dead_or_set_p to check an individual register. */
2098 
2099 int
2100 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2101 {
2102   const_rtx pattern;
2103 
2104   /* See if there is a death note for something that includes TEST_REGNO.  */
2105   if (find_regno_note (insn, REG_DEAD, test_regno))
2106     return 1;
2107 
2108   if (CALL_P (insn)
2109       && find_regno_fusage (insn, CLOBBER, test_regno))
2110     return 1;
2111 
2112   pattern = PATTERN (insn);
2113 
2114   /* If a COND_EXEC is not executed, the value survives.  */
2115   if (GET_CODE (pattern) == COND_EXEC)
2116     return 0;
2117 
2118   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2119     return covers_regno_p (SET_DEST (pattern), test_regno);
2120   else if (GET_CODE (pattern) == PARALLEL)
2121     {
2122       int i;
2123 
2124       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2125 	{
2126 	  rtx body = XVECEXP (pattern, 0, i);
2127 
2128 	  if (GET_CODE (body) == COND_EXEC)
2129 	    body = COND_EXEC_CODE (body);
2130 
2131 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2132 	      && covers_regno_p (SET_DEST (body), test_regno))
2133 	    return 1;
2134 	}
2135     }
2136 
2137   return 0;
2138 }
2139 
2140 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2141    If DATUM is nonzero, look for one whose datum is DATUM.  */
2142 
2143 rtx
2144 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2145 {
2146   rtx link;
2147 
2148   gcc_checking_assert (insn);
2149 
2150   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2151   if (! INSN_P (insn))
2152     return 0;
2153   if (datum == 0)
2154     {
2155       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2156 	if (REG_NOTE_KIND (link) == kind)
2157 	  return link;
2158       return 0;
2159     }
2160 
2161   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2162     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2163       return link;
2164   return 0;
2165 }
2166 
2167 /* Return the reg-note of kind KIND in insn INSN which applies to register
2168    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2169    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2170    it might be the case that the note overlaps REGNO.  */
2171 
2172 rtx
2173 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2174 {
2175   rtx link;
2176 
2177   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2178   if (! INSN_P (insn))
2179     return 0;
2180 
2181   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2182     if (REG_NOTE_KIND (link) == kind
2183 	/* Verify that it is a register, so that scratch and MEM won't cause a
2184 	   problem here.  */
2185 	&& REG_P (XEXP (link, 0))
2186 	&& REGNO (XEXP (link, 0)) <= regno
2187 	&& END_REGNO (XEXP (link, 0)) > regno)
2188       return link;
2189   return 0;
2190 }
2191 
2192 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2193    has such a note.  */
2194 
2195 rtx
2196 find_reg_equal_equiv_note (const_rtx insn)
2197 {
2198   rtx link;
2199 
2200   if (!INSN_P (insn))
2201     return 0;
2202 
2203   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2204     if (REG_NOTE_KIND (link) == REG_EQUAL
2205 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2206       {
2207 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2208 	   insns that have multiple sets.  Checking single_set to
2209 	   make sure of this is not the proper check, as explained
2210 	   in the comment in set_unique_reg_note.
2211 
2212 	   This should be changed into an assert.  */
2213 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2214 	  return 0;
2215 	return link;
2216       }
2217   return NULL;
2218 }
2219 
2220 /* Check whether INSN is a single_set whose source is known to be
2221    equivalent to a constant.  Return that constant if so, otherwise
2222    return null.  */
2223 
2224 rtx
2225 find_constant_src (const rtx_insn *insn)
2226 {
2227   rtx note, set, x;
2228 
2229   set = single_set (insn);
2230   if (set)
2231     {
2232       x = avoid_constant_pool_reference (SET_SRC (set));
2233       if (CONSTANT_P (x))
2234 	return x;
2235     }
2236 
2237   note = find_reg_equal_equiv_note (insn);
2238   if (note && CONSTANT_P (XEXP (note, 0)))
2239     return XEXP (note, 0);
2240 
2241   return NULL_RTX;
2242 }
2243 
2244 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2245    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2246 
2247 int
2248 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2249 {
2250   /* If it's not a CALL_INSN, it can't possibly have a
2251      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2252   if (!CALL_P (insn))
2253     return 0;
2254 
2255   gcc_assert (datum);
2256 
2257   if (!REG_P (datum))
2258     {
2259       rtx link;
2260 
2261       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2262 	   link;
2263 	   link = XEXP (link, 1))
2264 	if (GET_CODE (XEXP (link, 0)) == code
2265 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2266 	  return 1;
2267     }
2268   else
2269     {
2270       unsigned int regno = REGNO (datum);
2271 
2272       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2273 	 to pseudo registers, so don't bother checking.  */
2274 
2275       if (regno < FIRST_PSEUDO_REGISTER)
2276 	{
2277 	  unsigned int end_regno = END_REGNO (datum);
2278 	  unsigned int i;
2279 
2280 	  for (i = regno; i < end_regno; i++)
2281 	    if (find_regno_fusage (insn, code, i))
2282 	      return 1;
2283 	}
2284     }
2285 
2286   return 0;
2287 }
2288 
2289 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2290    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2291 
2292 int
2293 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2294 {
2295   rtx link;
2296 
2297   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2298      to pseudo registers, so don't bother checking.  */
2299 
2300   if (regno >= FIRST_PSEUDO_REGISTER
2301       || !CALL_P (insn) )
2302     return 0;
2303 
2304   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2305     {
2306       rtx op, reg;
2307 
2308       if (GET_CODE (op = XEXP (link, 0)) == code
2309 	  && REG_P (reg = XEXP (op, 0))
2310 	  && REGNO (reg) <= regno
2311 	  && END_REGNO (reg) > regno)
2312 	return 1;
2313     }
2314 
2315   return 0;
2316 }
2317 
2318 
2319 /* Return true if KIND is an integer REG_NOTE.  */
2320 
2321 static bool
2322 int_reg_note_p (enum reg_note kind)
2323 {
2324   return kind == REG_BR_PROB;
2325 }
2326 
2327 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2328    stored as the pointer to the next register note.  */
2329 
2330 rtx
2331 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2332 {
2333   rtx note;
2334 
2335   gcc_checking_assert (!int_reg_note_p (kind));
2336   switch (kind)
2337     {
2338     case REG_CC_SETTER:
2339     case REG_CC_USER:
2340     case REG_LABEL_TARGET:
2341     case REG_LABEL_OPERAND:
2342     case REG_TM:
2343       /* These types of register notes use an INSN_LIST rather than an
2344 	 EXPR_LIST, so that copying is done right and dumps look
2345 	 better.  */
2346       note = alloc_INSN_LIST (datum, list);
2347       PUT_REG_NOTE_KIND (note, kind);
2348       break;
2349 
2350     default:
2351       note = alloc_EXPR_LIST (kind, datum, list);
2352       break;
2353     }
2354 
2355   return note;
2356 }
2357 
2358 /* Add register note with kind KIND and datum DATUM to INSN.  */
2359 
2360 void
2361 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2362 {
2363   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2364 }
2365 
2366 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2367 
2368 void
2369 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2370 {
2371   gcc_checking_assert (int_reg_note_p (kind));
2372   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2373 				       datum, REG_NOTES (insn));
2374 }
2375 
2376 /* Add a REG_ARGS_SIZE note to INSN with value VALUE.  */
2377 
2378 void
2379 add_args_size_note (rtx_insn *insn, poly_int64 value)
2380 {
2381   gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2382   add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2383 }
2384 
2385 /* Add a register note like NOTE to INSN.  */
2386 
2387 void
2388 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2389 {
2390   if (GET_CODE (note) == INT_LIST)
2391     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2392   else
2393     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2394 }
2395 
2396 /* Duplicate NOTE and return the copy.  */
2397 rtx
2398 duplicate_reg_note (rtx note)
2399 {
2400   reg_note kind = REG_NOTE_KIND (note);
2401 
2402   if (GET_CODE (note) == INT_LIST)
2403     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2404   else if (GET_CODE (note) == EXPR_LIST)
2405     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2406   else
2407     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2408 }
2409 
2410 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2411 
2412 void
2413 remove_note (rtx_insn *insn, const_rtx note)
2414 {
2415   rtx link;
2416 
2417   if (note == NULL_RTX)
2418     return;
2419 
2420   if (REG_NOTES (insn) == note)
2421     REG_NOTES (insn) = XEXP (note, 1);
2422   else
2423     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2424       if (XEXP (link, 1) == note)
2425 	{
2426 	  XEXP (link, 1) = XEXP (note, 1);
2427 	  break;
2428 	}
2429 
2430   switch (REG_NOTE_KIND (note))
2431     {
2432     case REG_EQUAL:
2433     case REG_EQUIV:
2434       df_notes_rescan (insn);
2435       break;
2436     default:
2437       break;
2438     }
2439 }
2440 
2441 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2442    Return true if any note has been removed.  */
2443 
2444 bool
2445 remove_reg_equal_equiv_notes (rtx_insn *insn)
2446 {
2447   rtx *loc;
2448   bool ret = false;
2449 
2450   loc = &REG_NOTES (insn);
2451   while (*loc)
2452     {
2453       enum reg_note kind = REG_NOTE_KIND (*loc);
2454       if (kind == REG_EQUAL || kind == REG_EQUIV)
2455 	{
2456 	  *loc = XEXP (*loc, 1);
2457 	  ret = true;
2458 	}
2459       else
2460 	loc = &XEXP (*loc, 1);
2461     }
2462   return ret;
2463 }
2464 
2465 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2466 
2467 void
2468 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2469 {
2470   df_ref eq_use;
2471 
2472   if (!df)
2473     return;
2474 
2475   /* This loop is a little tricky.  We cannot just go down the chain because
2476      it is being modified by some actions in the loop.  So we just iterate
2477      over the head.  We plan to drain the list anyway.  */
2478   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2479     {
2480       rtx_insn *insn = DF_REF_INSN (eq_use);
2481       rtx note = find_reg_equal_equiv_note (insn);
2482 
2483       /* This assert is generally triggered when someone deletes a REG_EQUAL
2484 	 or REG_EQUIV note by hacking the list manually rather than calling
2485 	 remove_note.  */
2486       gcc_assert (note);
2487 
2488       remove_note (insn, note);
2489     }
2490 }
2491 
2492 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2493    return 1 if it is found.  A simple equality test is used to determine if
2494    NODE matches.  */
2495 
2496 bool
2497 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2498 {
2499   const_rtx x;
2500 
2501   for (x = listp; x; x = XEXP (x, 1))
2502     if (node == XEXP (x, 0))
2503       return true;
2504 
2505   return false;
2506 }
2507 
2508 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2509    remove that entry from the list if it is found.
2510 
2511    A simple equality test is used to determine if NODE matches.  */
2512 
2513 void
2514 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2515 {
2516   rtx_expr_list *temp = *listp;
2517   rtx_expr_list *prev = NULL;
2518 
2519   while (temp)
2520     {
2521       if (node == temp->element ())
2522 	{
2523 	  /* Splice the node out of the list.  */
2524 	  if (prev)
2525 	    XEXP (prev, 1) = temp->next ();
2526 	  else
2527 	    *listp = temp->next ();
2528 
2529 	  return;
2530 	}
2531 
2532       prev = temp;
2533       temp = temp->next ();
2534     }
2535 }
2536 
2537 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2538    remove that entry from the list if it is found.
2539 
2540    A simple equality test is used to determine if NODE matches.  */
2541 
2542 void
2543 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2544 {
2545   rtx_insn_list *temp = *listp;
2546   rtx_insn_list *prev = NULL;
2547 
2548   while (temp)
2549     {
2550       if (node == temp->insn ())
2551 	{
2552 	  /* Splice the node out of the list.  */
2553 	  if (prev)
2554 	    XEXP (prev, 1) = temp->next ();
2555 	  else
2556 	    *listp = temp->next ();
2557 
2558 	  return;
2559 	}
2560 
2561       prev = temp;
2562       temp = temp->next ();
2563     }
2564 }
2565 
2566 /* Nonzero if X contains any volatile instructions.  These are instructions
2567    which may cause unpredictable machine state instructions, and thus no
2568    instructions or register uses should be moved or combined across them.
2569    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2570 
2571 int
2572 volatile_insn_p (const_rtx x)
2573 {
2574   const RTX_CODE code = GET_CODE (x);
2575   switch (code)
2576     {
2577     case LABEL_REF:
2578     case SYMBOL_REF:
2579     case CONST:
2580     CASE_CONST_ANY:
2581     case CC0:
2582     case PC:
2583     case REG:
2584     case SCRATCH:
2585     case CLOBBER:
2586     case ADDR_VEC:
2587     case ADDR_DIFF_VEC:
2588     case CALL:
2589     case MEM:
2590       return 0;
2591 
2592     case UNSPEC_VOLATILE:
2593       return 1;
2594 
2595     case ASM_INPUT:
2596     case ASM_OPERANDS:
2597       if (MEM_VOLATILE_P (x))
2598 	return 1;
2599 
2600     default:
2601       break;
2602     }
2603 
2604   /* Recursively scan the operands of this expression.  */
2605 
2606   {
2607     const char *const fmt = GET_RTX_FORMAT (code);
2608     int i;
2609 
2610     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2611       {
2612 	if (fmt[i] == 'e')
2613 	  {
2614 	    if (volatile_insn_p (XEXP (x, i)))
2615 	      return 1;
2616 	  }
2617 	else if (fmt[i] == 'E')
2618 	  {
2619 	    int j;
2620 	    for (j = 0; j < XVECLEN (x, i); j++)
2621 	      if (volatile_insn_p (XVECEXP (x, i, j)))
2622 		return 1;
2623 	  }
2624       }
2625   }
2626   return 0;
2627 }
2628 
2629 /* Nonzero if X contains any volatile memory references
2630    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
2631 
2632 int
2633 volatile_refs_p (const_rtx x)
2634 {
2635   const RTX_CODE code = GET_CODE (x);
2636   switch (code)
2637     {
2638     case LABEL_REF:
2639     case SYMBOL_REF:
2640     case CONST:
2641     CASE_CONST_ANY:
2642     case CC0:
2643     case PC:
2644     case REG:
2645     case SCRATCH:
2646     case CLOBBER:
2647     case ADDR_VEC:
2648     case ADDR_DIFF_VEC:
2649       return 0;
2650 
2651     case UNSPEC_VOLATILE:
2652       return 1;
2653 
2654     case MEM:
2655     case ASM_INPUT:
2656     case ASM_OPERANDS:
2657       if (MEM_VOLATILE_P (x))
2658 	return 1;
2659 
2660     default:
2661       break;
2662     }
2663 
2664   /* Recursively scan the operands of this expression.  */
2665 
2666   {
2667     const char *const fmt = GET_RTX_FORMAT (code);
2668     int i;
2669 
2670     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2671       {
2672 	if (fmt[i] == 'e')
2673 	  {
2674 	    if (volatile_refs_p (XEXP (x, i)))
2675 	      return 1;
2676 	  }
2677 	else if (fmt[i] == 'E')
2678 	  {
2679 	    int j;
2680 	    for (j = 0; j < XVECLEN (x, i); j++)
2681 	      if (volatile_refs_p (XVECEXP (x, i, j)))
2682 		return 1;
2683 	  }
2684       }
2685   }
2686   return 0;
2687 }
2688 
2689 /* Similar to above, except that it also rejects register pre- and post-
2690    incrementing.  */
2691 
2692 int
2693 side_effects_p (const_rtx x)
2694 {
2695   const RTX_CODE code = GET_CODE (x);
2696   switch (code)
2697     {
2698     case LABEL_REF:
2699     case SYMBOL_REF:
2700     case CONST:
2701     CASE_CONST_ANY:
2702     case CC0:
2703     case PC:
2704     case REG:
2705     case SCRATCH:
2706     case ADDR_VEC:
2707     case ADDR_DIFF_VEC:
2708     case VAR_LOCATION:
2709       return 0;
2710 
2711     case CLOBBER:
2712       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
2713 	 when some combination can't be done.  If we see one, don't think
2714 	 that we can simplify the expression.  */
2715       return (GET_MODE (x) != VOIDmode);
2716 
2717     case PRE_INC:
2718     case PRE_DEC:
2719     case POST_INC:
2720     case POST_DEC:
2721     case PRE_MODIFY:
2722     case POST_MODIFY:
2723     case CALL:
2724     case UNSPEC_VOLATILE:
2725       return 1;
2726 
2727     case MEM:
2728     case ASM_INPUT:
2729     case ASM_OPERANDS:
2730       if (MEM_VOLATILE_P (x))
2731 	return 1;
2732 
2733     default:
2734       break;
2735     }
2736 
2737   /* Recursively scan the operands of this expression.  */
2738 
2739   {
2740     const char *fmt = GET_RTX_FORMAT (code);
2741     int i;
2742 
2743     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2744       {
2745 	if (fmt[i] == 'e')
2746 	  {
2747 	    if (side_effects_p (XEXP (x, i)))
2748 	      return 1;
2749 	  }
2750 	else if (fmt[i] == 'E')
2751 	  {
2752 	    int j;
2753 	    for (j = 0; j < XVECLEN (x, i); j++)
2754 	      if (side_effects_p (XVECEXP (x, i, j)))
2755 		return 1;
2756 	  }
2757       }
2758   }
2759   return 0;
2760 }
2761 
2762 /* Return nonzero if evaluating rtx X might cause a trap.
2763    FLAGS controls how to consider MEMs.  A nonzero means the context
2764    of the access may have changed from the original, such that the
2765    address may have become invalid.  */
2766 
2767 int
2768 may_trap_p_1 (const_rtx x, unsigned flags)
2769 {
2770   int i;
2771   enum rtx_code code;
2772   const char *fmt;
2773 
2774   /* We make no distinction currently, but this function is part of
2775      the internal target-hooks ABI so we keep the parameter as
2776      "unsigned flags".  */
2777   bool code_changed = flags != 0;
2778 
2779   if (x == 0)
2780     return 0;
2781   code = GET_CODE (x);
2782   switch (code)
2783     {
2784       /* Handle these cases quickly.  */
2785     CASE_CONST_ANY:
2786     case SYMBOL_REF:
2787     case LABEL_REF:
2788     case CONST:
2789     case PC:
2790     case CC0:
2791     case REG:
2792     case SCRATCH:
2793       return 0;
2794 
2795     case UNSPEC:
2796       return targetm.unspec_may_trap_p (x, flags);
2797 
2798     case UNSPEC_VOLATILE:
2799     case ASM_INPUT:
2800     case TRAP_IF:
2801       return 1;
2802 
2803     case ASM_OPERANDS:
2804       return MEM_VOLATILE_P (x);
2805 
2806       /* Memory ref can trap unless it's a static var or a stack slot.  */
2807     case MEM:
2808       /* Recognize specific pattern of stack checking probes.  */
2809       if (flag_stack_check
2810 	  && MEM_VOLATILE_P (x)
2811 	  && XEXP (x, 0) == stack_pointer_rtx)
2812 	return 1;
2813       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2814 	     reference; moving it out of context such as when moving code
2815 	     when optimizing, might cause its address to become invalid.  */
2816 	  code_changed
2817 	  || !MEM_NOTRAP_P (x))
2818 	{
2819 	  poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2820 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2821 					GET_MODE (x), code_changed);
2822 	}
2823 
2824       return 0;
2825 
2826       /* Division by a non-constant might trap.  */
2827     case DIV:
2828     case MOD:
2829     case UDIV:
2830     case UMOD:
2831       if (HONOR_SNANS (x))
2832 	return 1;
2833       if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2834 	return flag_trapping_math;
2835       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2836 	return 1;
2837       break;
2838 
2839     case EXPR_LIST:
2840       /* An EXPR_LIST is used to represent a function call.  This
2841 	 certainly may trap.  */
2842       return 1;
2843 
2844     case GE:
2845     case GT:
2846     case LE:
2847     case LT:
2848     case LTGT:
2849     case COMPARE:
2850       /* Some floating point comparisons may trap.  */
2851       if (!flag_trapping_math)
2852 	break;
2853       /* ??? There is no machine independent way to check for tests that trap
2854 	 when COMPARE is used, though many targets do make this distinction.
2855 	 For instance, sparc uses CCFPE for compares which generate exceptions
2856 	 and CCFP for compares which do not generate exceptions.  */
2857       if (HONOR_NANS (x))
2858 	return 1;
2859       /* But often the compare has some CC mode, so check operand
2860 	 modes as well.  */
2861       if (HONOR_NANS (XEXP (x, 0))
2862 	  || HONOR_NANS (XEXP (x, 1)))
2863 	return 1;
2864       break;
2865 
2866     case EQ:
2867     case NE:
2868       if (HONOR_SNANS (x))
2869 	return 1;
2870       /* Often comparison is CC mode, so check operand modes.  */
2871       if (HONOR_SNANS (XEXP (x, 0))
2872 	  || HONOR_SNANS (XEXP (x, 1)))
2873 	return 1;
2874       break;
2875 
2876     case FIX:
2877       /* Conversion of floating point might trap.  */
2878       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2879 	return 1;
2880       break;
2881 
2882     case NEG:
2883     case ABS:
2884     case SUBREG:
2885       /* These operations don't trap even with floating point.  */
2886       break;
2887 
2888     default:
2889       /* Any floating arithmetic may trap.  */
2890       if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2891 	return 1;
2892     }
2893 
2894   fmt = GET_RTX_FORMAT (code);
2895   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2896     {
2897       if (fmt[i] == 'e')
2898 	{
2899 	  if (may_trap_p_1 (XEXP (x, i), flags))
2900 	    return 1;
2901 	}
2902       else if (fmt[i] == 'E')
2903 	{
2904 	  int j;
2905 	  for (j = 0; j < XVECLEN (x, i); j++)
2906 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2907 	      return 1;
2908 	}
2909     }
2910   return 0;
2911 }
2912 
2913 /* Return nonzero if evaluating rtx X might cause a trap.  */
2914 
2915 int
2916 may_trap_p (const_rtx x)
2917 {
2918   return may_trap_p_1 (x, 0);
2919 }
2920 
2921 /* Same as above, but additionally return nonzero if evaluating rtx X might
2922    cause a fault.  We define a fault for the purpose of this function as a
2923    erroneous execution condition that cannot be encountered during the normal
2924    execution of a valid program; the typical example is an unaligned memory
2925    access on a strict alignment machine.  The compiler guarantees that it
2926    doesn't generate code that will fault from a valid program, but this
2927    guarantee doesn't mean anything for individual instructions.  Consider
2928    the following example:
2929 
2930       struct S { int d; union { char *cp; int *ip; }; };
2931 
2932       int foo(struct S *s)
2933       {
2934 	if (s->d == 1)
2935 	  return *s->ip;
2936 	else
2937 	  return *s->cp;
2938       }
2939 
2940    on a strict alignment machine.  In a valid program, foo will never be
2941    invoked on a structure for which d is equal to 1 and the underlying
2942    unique field of the union not aligned on a 4-byte boundary, but the
2943    expression *s->ip might cause a fault if considered individually.
2944 
2945    At the RTL level, potentially problematic expressions will almost always
2946    verify may_trap_p; for example, the above dereference can be emitted as
2947    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2948    However, suppose that foo is inlined in a caller that causes s->cp to
2949    point to a local character variable and guarantees that s->d is not set
2950    to 1; foo may have been effectively translated into pseudo-RTL as:
2951 
2952       if ((reg:SI) == 1)
2953 	(set (reg:SI) (mem:SI (%fp - 7)))
2954       else
2955 	(set (reg:QI) (mem:QI (%fp - 7)))
2956 
2957    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2958    memory reference to a stack slot, but it will certainly cause a fault
2959    on a strict alignment machine.  */
2960 
2961 int
2962 may_trap_or_fault_p (const_rtx x)
2963 {
2964   return may_trap_p_1 (x, 1);
2965 }
2966 
2967 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2968    i.e., an inequality.  */
2969 
2970 int
2971 inequality_comparisons_p (const_rtx x)
2972 {
2973   const char *fmt;
2974   int len, i;
2975   const enum rtx_code code = GET_CODE (x);
2976 
2977   switch (code)
2978     {
2979     case REG:
2980     case SCRATCH:
2981     case PC:
2982     case CC0:
2983     CASE_CONST_ANY:
2984     case CONST:
2985     case LABEL_REF:
2986     case SYMBOL_REF:
2987       return 0;
2988 
2989     case LT:
2990     case LTU:
2991     case GT:
2992     case GTU:
2993     case LE:
2994     case LEU:
2995     case GE:
2996     case GEU:
2997       return 1;
2998 
2999     default:
3000       break;
3001     }
3002 
3003   len = GET_RTX_LENGTH (code);
3004   fmt = GET_RTX_FORMAT (code);
3005 
3006   for (i = 0; i < len; i++)
3007     {
3008       if (fmt[i] == 'e')
3009 	{
3010 	  if (inequality_comparisons_p (XEXP (x, i)))
3011 	    return 1;
3012 	}
3013       else if (fmt[i] == 'E')
3014 	{
3015 	  int j;
3016 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3017 	    if (inequality_comparisons_p (XVECEXP (x, i, j)))
3018 	      return 1;
3019 	}
3020     }
3021 
3022   return 0;
3023 }
3024 
3025 /* Replace any occurrence of FROM in X with TO.  The function does
3026    not enter into CONST_DOUBLE for the replace.
3027 
3028    Note that copying is not done so X must not be shared unless all copies
3029    are to be modified.
3030 
3031    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3032    those pointer-equal ones.  */
3033 
3034 rtx
3035 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3036 {
3037   int i, j;
3038   const char *fmt;
3039 
3040   if (x == from)
3041     return to;
3042 
3043   /* Allow this function to make replacements in EXPR_LISTs.  */
3044   if (x == 0)
3045     return 0;
3046 
3047   if (all_regs
3048       && REG_P (x)
3049       && REG_P (from)
3050       && REGNO (x) == REGNO (from))
3051     {
3052       gcc_assert (GET_MODE (x) == GET_MODE (from));
3053       return to;
3054     }
3055   else if (GET_CODE (x) == SUBREG)
3056     {
3057       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3058 
3059       if (CONST_INT_P (new_rtx))
3060 	{
3061 	  x = simplify_subreg (GET_MODE (x), new_rtx,
3062 			       GET_MODE (SUBREG_REG (x)),
3063 			       SUBREG_BYTE (x));
3064 	  gcc_assert (x);
3065 	}
3066       else
3067 	SUBREG_REG (x) = new_rtx;
3068 
3069       return x;
3070     }
3071   else if (GET_CODE (x) == ZERO_EXTEND)
3072     {
3073       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3074 
3075       if (CONST_INT_P (new_rtx))
3076 	{
3077 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3078 					new_rtx, GET_MODE (XEXP (x, 0)));
3079 	  gcc_assert (x);
3080 	}
3081       else
3082 	XEXP (x, 0) = new_rtx;
3083 
3084       return x;
3085     }
3086 
3087   fmt = GET_RTX_FORMAT (GET_CODE (x));
3088   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3089     {
3090       if (fmt[i] == 'e')
3091 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3092       else if (fmt[i] == 'E')
3093 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3094 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3095 					   from, to, all_regs);
3096     }
3097 
3098   return x;
3099 }
3100 
3101 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3102    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3103 
3104 void
3105 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3106 {
3107   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3108   rtx x = *loc;
3109   if (JUMP_TABLE_DATA_P (x))
3110     {
3111       x = PATTERN (x);
3112       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3113       int len = GET_NUM_ELEM (vec);
3114       for (int i = 0; i < len; ++i)
3115 	{
3116 	  rtx ref = RTVEC_ELT (vec, i);
3117 	  if (XEXP (ref, 0) == old_label)
3118 	    {
3119 	      XEXP (ref, 0) = new_label;
3120 	      if (update_label_nuses)
3121 		{
3122 		  ++LABEL_NUSES (new_label);
3123 		  --LABEL_NUSES (old_label);
3124 		}
3125 	    }
3126 	}
3127       return;
3128     }
3129 
3130   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3131      field.  This is not handled by the iterator because it doesn't
3132      handle unprinted ('0') fields.  */
3133   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3134     JUMP_LABEL (x) = new_label;
3135 
3136   subrtx_ptr_iterator::array_type array;
3137   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3138     {
3139       rtx *loc = *iter;
3140       if (rtx x = *loc)
3141 	{
3142 	  if (GET_CODE (x) == SYMBOL_REF
3143 	      && CONSTANT_POOL_ADDRESS_P (x))
3144 	    {
3145 	      rtx c = get_pool_constant (x);
3146 	      if (rtx_referenced_p (old_label, c))
3147 		{
3148 		  /* Create a copy of constant C; replace the label inside
3149 		     but do not update LABEL_NUSES because uses in constant pool
3150 		     are not counted.  */
3151 		  rtx new_c = copy_rtx (c);
3152 		  replace_label (&new_c, old_label, new_label, false);
3153 
3154 		  /* Add the new constant NEW_C to constant pool and replace
3155 		     the old reference to constant by new reference.  */
3156 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3157 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3158 		}
3159 	    }
3160 
3161 	  if ((GET_CODE (x) == LABEL_REF
3162 	       || GET_CODE (x) == INSN_LIST)
3163 	      && XEXP (x, 0) == old_label)
3164 	    {
3165 	      XEXP (x, 0) = new_label;
3166 	      if (update_label_nuses)
3167 		{
3168 		  ++LABEL_NUSES (new_label);
3169 		  --LABEL_NUSES (old_label);
3170 		}
3171 	    }
3172 	}
3173     }
3174 }
3175 
3176 void
3177 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3178 		       rtx_insn *new_label, bool update_label_nuses)
3179 {
3180   rtx insn_as_rtx = insn;
3181   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3182   gcc_checking_assert (insn_as_rtx == insn);
3183 }
3184 
3185 /* Return true if X is referenced in BODY.  */
3186 
3187 bool
3188 rtx_referenced_p (const_rtx x, const_rtx body)
3189 {
3190   subrtx_iterator::array_type array;
3191   FOR_EACH_SUBRTX (iter, array, body, ALL)
3192     if (const_rtx y = *iter)
3193       {
3194 	/* Check if a label_ref Y refers to label X.  */
3195 	if (GET_CODE (y) == LABEL_REF
3196 	    && LABEL_P (x)
3197 	    && label_ref_label (y) == x)
3198 	  return true;
3199 
3200 	if (rtx_equal_p (x, y))
3201 	  return true;
3202 
3203 	/* If Y is a reference to pool constant traverse the constant.  */
3204 	if (GET_CODE (y) == SYMBOL_REF
3205 	    && CONSTANT_POOL_ADDRESS_P (y))
3206 	  iter.substitute (get_pool_constant (y));
3207       }
3208   return false;
3209 }
3210 
3211 /* If INSN is a tablejump return true and store the label (before jump table) to
3212    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3213 
3214 bool
3215 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3216 	     rtx_jump_table_data **tablep)
3217 {
3218   if (!JUMP_P (insn))
3219     return false;
3220 
3221   rtx target = JUMP_LABEL (insn);
3222   if (target == NULL_RTX || ANY_RETURN_P (target))
3223     return false;
3224 
3225   rtx_insn *label = as_a<rtx_insn *> (target);
3226   rtx_insn *table = next_insn (label);
3227   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3228     return false;
3229 
3230   if (labelp)
3231     *labelp = label;
3232   if (tablep)
3233     *tablep = as_a <rtx_jump_table_data *> (table);
3234   return true;
3235 }
3236 
3237 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3238    constant that is not in the constant pool and not in the condition
3239    of an IF_THEN_ELSE.  */
3240 
3241 static int
3242 computed_jump_p_1 (const_rtx x)
3243 {
3244   const enum rtx_code code = GET_CODE (x);
3245   int i, j;
3246   const char *fmt;
3247 
3248   switch (code)
3249     {
3250     case LABEL_REF:
3251     case PC:
3252       return 0;
3253 
3254     case CONST:
3255     CASE_CONST_ANY:
3256     case SYMBOL_REF:
3257     case REG:
3258       return 1;
3259 
3260     case MEM:
3261       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3262 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3263 
3264     case IF_THEN_ELSE:
3265       return (computed_jump_p_1 (XEXP (x, 1))
3266 	      || computed_jump_p_1 (XEXP (x, 2)));
3267 
3268     default:
3269       break;
3270     }
3271 
3272   fmt = GET_RTX_FORMAT (code);
3273   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3274     {
3275       if (fmt[i] == 'e'
3276 	  && computed_jump_p_1 (XEXP (x, i)))
3277 	return 1;
3278 
3279       else if (fmt[i] == 'E')
3280 	for (j = 0; j < XVECLEN (x, i); j++)
3281 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3282 	    return 1;
3283     }
3284 
3285   return 0;
3286 }
3287 
3288 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3289 
3290    Tablejumps and casesi insns are not considered indirect jumps;
3291    we can recognize them by a (use (label_ref)).  */
3292 
3293 int
3294 computed_jump_p (const rtx_insn *insn)
3295 {
3296   int i;
3297   if (JUMP_P (insn))
3298     {
3299       rtx pat = PATTERN (insn);
3300 
3301       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3302       if (JUMP_LABEL (insn) != NULL)
3303 	return 0;
3304 
3305       if (GET_CODE (pat) == PARALLEL)
3306 	{
3307 	  int len = XVECLEN (pat, 0);
3308 	  int has_use_labelref = 0;
3309 
3310 	  for (i = len - 1; i >= 0; i--)
3311 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3312 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3313 		    == LABEL_REF))
3314 	      {
3315 	        has_use_labelref = 1;
3316 	        break;
3317 	      }
3318 
3319 	  if (! has_use_labelref)
3320 	    for (i = len - 1; i >= 0; i--)
3321 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3322 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3323 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3324 		return 1;
3325 	}
3326       else if (GET_CODE (pat) == SET
3327 	       && SET_DEST (pat) == pc_rtx
3328 	       && computed_jump_p_1 (SET_SRC (pat)))
3329 	return 1;
3330     }
3331   return 0;
3332 }
3333 
3334 
3335 
3336 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3337    the equivalent add insn and pass the result to FN, using DATA as the
3338    final argument.  */
3339 
3340 static int
3341 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3342 {
3343   rtx x = XEXP (mem, 0);
3344   switch (GET_CODE (x))
3345     {
3346     case PRE_INC:
3347     case POST_INC:
3348       {
3349 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3350 	rtx r1 = XEXP (x, 0);
3351 	rtx c = gen_int_mode (size, GET_MODE (r1));
3352 	return fn (mem, x, r1, r1, c, data);
3353       }
3354 
3355     case PRE_DEC:
3356     case POST_DEC:
3357       {
3358 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3359 	rtx r1 = XEXP (x, 0);
3360 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3361 	return fn (mem, x, r1, r1, c, data);
3362       }
3363 
3364     case PRE_MODIFY:
3365     case POST_MODIFY:
3366       {
3367 	rtx r1 = XEXP (x, 0);
3368 	rtx add = XEXP (x, 1);
3369 	return fn (mem, x, r1, add, NULL, data);
3370       }
3371 
3372     default:
3373       gcc_unreachable ();
3374     }
3375 }
3376 
3377 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3378    For each such autoinc operation found, call FN, passing it
3379    the innermost enclosing MEM, the operation itself, the RTX modified
3380    by the operation, two RTXs (the second may be NULL) that, once
3381    added, represent the value to be held by the modified RTX
3382    afterwards, and DATA.  FN is to return 0 to continue the
3383    traversal or any other value to have it returned to the caller of
3384    for_each_inc_dec.  */
3385 
3386 int
3387 for_each_inc_dec (rtx x,
3388 		  for_each_inc_dec_fn fn,
3389 		  void *data)
3390 {
3391   subrtx_var_iterator::array_type array;
3392   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3393     {
3394       rtx mem = *iter;
3395       if (mem
3396 	  && MEM_P (mem)
3397 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3398 	{
3399 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3400 	  if (res != 0)
3401 	    return res;
3402 	  iter.skip_subrtxes ();
3403 	}
3404     }
3405   return 0;
3406 }
3407 
3408 
3409 /* Searches X for any reference to REGNO, returning the rtx of the
3410    reference found if any.  Otherwise, returns NULL_RTX.  */
3411 
3412 rtx
3413 regno_use_in (unsigned int regno, rtx x)
3414 {
3415   const char *fmt;
3416   int i, j;
3417   rtx tem;
3418 
3419   if (REG_P (x) && REGNO (x) == regno)
3420     return x;
3421 
3422   fmt = GET_RTX_FORMAT (GET_CODE (x));
3423   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3424     {
3425       if (fmt[i] == 'e')
3426 	{
3427 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3428 	    return tem;
3429 	}
3430       else if (fmt[i] == 'E')
3431 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3432 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3433 	    return tem;
3434     }
3435 
3436   return NULL_RTX;
3437 }
3438 
3439 /* Return a value indicating whether OP, an operand of a commutative
3440    operation, is preferred as the first or second operand.  The more
3441    positive the value, the stronger the preference for being the first
3442    operand.  */
3443 
3444 int
3445 commutative_operand_precedence (rtx op)
3446 {
3447   enum rtx_code code = GET_CODE (op);
3448 
3449   /* Constants always become the second operand.  Prefer "nice" constants.  */
3450   if (code == CONST_INT)
3451     return -10;
3452   if (code == CONST_WIDE_INT)
3453     return -9;
3454   if (code == CONST_POLY_INT)
3455     return -8;
3456   if (code == CONST_DOUBLE)
3457     return -8;
3458   if (code == CONST_FIXED)
3459     return -8;
3460   op = avoid_constant_pool_reference (op);
3461   code = GET_CODE (op);
3462 
3463   switch (GET_RTX_CLASS (code))
3464     {
3465     case RTX_CONST_OBJ:
3466       if (code == CONST_INT)
3467 	return -7;
3468       if (code == CONST_WIDE_INT)
3469 	return -6;
3470       if (code == CONST_POLY_INT)
3471 	return -5;
3472       if (code == CONST_DOUBLE)
3473 	return -5;
3474       if (code == CONST_FIXED)
3475 	return -5;
3476       return -4;
3477 
3478     case RTX_EXTRA:
3479       /* SUBREGs of objects should come second.  */
3480       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3481         return -3;
3482       return 0;
3483 
3484     case RTX_OBJ:
3485       /* Complex expressions should be the first, so decrease priority
3486          of objects.  Prefer pointer objects over non pointer objects.  */
3487       if ((REG_P (op) && REG_POINTER (op))
3488 	  || (MEM_P (op) && MEM_POINTER (op)))
3489 	return -1;
3490       return -2;
3491 
3492     case RTX_COMM_ARITH:
3493       /* Prefer operands that are themselves commutative to be first.
3494          This helps to make things linear.  In particular,
3495          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3496       return 4;
3497 
3498     case RTX_BIN_ARITH:
3499       /* If only one operand is a binary expression, it will be the first
3500          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3501          is canonical, although it will usually be further simplified.  */
3502       return 2;
3503 
3504     case RTX_UNARY:
3505       /* Then prefer NEG and NOT.  */
3506       if (code == NEG || code == NOT)
3507         return 1;
3508       /* FALLTHRU */
3509 
3510     default:
3511       return 0;
3512     }
3513 }
3514 
3515 /* Return 1 iff it is necessary to swap operands of commutative operation
3516    in order to canonicalize expression.  */
3517 
3518 bool
3519 swap_commutative_operands_p (rtx x, rtx y)
3520 {
3521   return (commutative_operand_precedence (x)
3522 	  < commutative_operand_precedence (y));
3523 }
3524 
3525 /* Return 1 if X is an autoincrement side effect and the register is
3526    not the stack pointer.  */
3527 int
3528 auto_inc_p (const_rtx x)
3529 {
3530   switch (GET_CODE (x))
3531     {
3532     case PRE_INC:
3533     case POST_INC:
3534     case PRE_DEC:
3535     case POST_DEC:
3536     case PRE_MODIFY:
3537     case POST_MODIFY:
3538       /* There are no REG_INC notes for SP.  */
3539       if (XEXP (x, 0) != stack_pointer_rtx)
3540 	return 1;
3541     default:
3542       break;
3543     }
3544   return 0;
3545 }
3546 
3547 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3548 int
3549 loc_mentioned_in_p (rtx *loc, const_rtx in)
3550 {
3551   enum rtx_code code;
3552   const char *fmt;
3553   int i, j;
3554 
3555   if (!in)
3556     return 0;
3557 
3558   code = GET_CODE (in);
3559   fmt = GET_RTX_FORMAT (code);
3560   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3561     {
3562       if (fmt[i] == 'e')
3563 	{
3564 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3565 	    return 1;
3566 	}
3567       else if (fmt[i] == 'E')
3568 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3569 	  if (loc == &XVECEXP (in, i, j)
3570 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3571 	    return 1;
3572     }
3573   return 0;
3574 }
3575 
3576 /* Helper function for subreg_lsb.  Given a subreg's OUTER_MODE, INNER_MODE,
3577    and SUBREG_BYTE, return the bit offset where the subreg begins
3578    (counting from the least significant bit of the operand).  */
3579 
3580 poly_uint64
3581 subreg_lsb_1 (machine_mode outer_mode,
3582 	      machine_mode inner_mode,
3583 	      poly_uint64 subreg_byte)
3584 {
3585   poly_uint64 subreg_end, trailing_bytes, byte_pos;
3586 
3587   /* A paradoxical subreg begins at bit position 0.  */
3588   if (paradoxical_subreg_p (outer_mode, inner_mode))
3589     return 0;
3590 
3591   subreg_end = subreg_byte + GET_MODE_SIZE (outer_mode);
3592   trailing_bytes = GET_MODE_SIZE (inner_mode) - subreg_end;
3593   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3594     byte_pos = trailing_bytes;
3595   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3596     byte_pos = subreg_byte;
3597   else
3598     {
3599       /* When bytes and words have opposite endianness, we must be able
3600 	 to split offsets into words and bytes at compile time.  */
3601       poly_uint64 leading_word_part
3602 	= force_align_down (subreg_byte, UNITS_PER_WORD);
3603       poly_uint64 trailing_word_part
3604 	= force_align_down (trailing_bytes, UNITS_PER_WORD);
3605       /* If the subreg crosses a word boundary ensure that
3606 	 it also begins and ends on a word boundary.  */
3607       gcc_assert (known_le (subreg_end - leading_word_part,
3608 			    (unsigned int) UNITS_PER_WORD)
3609 		  || (known_eq (leading_word_part, subreg_byte)
3610 		      && known_eq (trailing_word_part, trailing_bytes)));
3611       if (WORDS_BIG_ENDIAN)
3612 	byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3613       else
3614 	byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3615     }
3616 
3617   return byte_pos * BITS_PER_UNIT;
3618 }
3619 
3620 /* Given a subreg X, return the bit offset where the subreg begins
3621    (counting from the least significant bit of the reg).  */
3622 
3623 poly_uint64
3624 subreg_lsb (const_rtx x)
3625 {
3626   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3627 		       SUBREG_BYTE (x));
3628 }
3629 
3630 /* Return the subreg byte offset for a subreg whose outer value has
3631    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3632    there are LSB_SHIFT *bits* between the lsb of the outer value and the
3633    lsb of the inner value.  This is the inverse of the calculation
3634    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
3635 
3636 poly_uint64
3637 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3638 			     poly_uint64 lsb_shift)
3639 {
3640   /* A paradoxical subreg begins at bit position 0.  */
3641   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3642   if (maybe_gt (outer_bytes, inner_bytes))
3643     {
3644       gcc_checking_assert (known_eq (lsb_shift, 0U));
3645       return 0;
3646     }
3647 
3648   poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3649   poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3650   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3651     return upper_bytes;
3652   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3653     return lower_bytes;
3654   else
3655     {
3656       /* When bytes and words have opposite endianness, we must be able
3657 	 to split offsets into words and bytes at compile time.  */
3658       poly_uint64 lower_word_part = force_align_down (lower_bytes,
3659 						      UNITS_PER_WORD);
3660       poly_uint64 upper_word_part = force_align_down (upper_bytes,
3661 						      UNITS_PER_WORD);
3662       if (WORDS_BIG_ENDIAN)
3663 	return upper_word_part + (lower_bytes - lower_word_part);
3664       else
3665 	return lower_word_part + (upper_bytes - upper_word_part);
3666     }
3667 }
3668 
3669 /* Fill in information about a subreg of a hard register.
3670    xregno - A regno of an inner hard subreg_reg (or what will become one).
3671    xmode  - The mode of xregno.
3672    offset - The byte offset.
3673    ymode  - The mode of a top level SUBREG (or what may become one).
3674    info   - Pointer to structure to fill in.
3675 
3676    Rather than considering one particular inner register (and thus one
3677    particular "outer" register) in isolation, this function really uses
3678    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
3679    function does not check whether adding INFO->offset to XREGNO gives
3680    a valid hard register; even if INFO->offset + XREGNO is out of range,
3681    there might be another register of the same type that is in range.
3682    Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3683    the new register, since that can depend on things like whether the final
3684    register number is even or odd.  Callers that want to check whether
3685    this particular subreg can be replaced by a simple (reg ...) should
3686    use simplify_subreg_regno.  */
3687 
3688 void
3689 subreg_get_info (unsigned int xregno, machine_mode xmode,
3690 		 poly_uint64 offset, machine_mode ymode,
3691 		 struct subreg_info *info)
3692 {
3693   unsigned int nregs_xmode, nregs_ymode;
3694 
3695   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3696 
3697   poly_uint64 xsize = GET_MODE_SIZE (xmode);
3698   poly_uint64 ysize = GET_MODE_SIZE (ymode);
3699 
3700   bool rknown = false;
3701 
3702   /* If the register representation of a non-scalar mode has holes in it,
3703      we expect the scalar units to be concatenated together, with the holes
3704      distributed evenly among the scalar units.  Each scalar unit must occupy
3705      at least one register.  */
3706   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3707     {
3708       /* As a consequence, we must be dealing with a constant number of
3709 	 scalars, and thus a constant offset and number of units.  */
3710       HOST_WIDE_INT coffset = offset.to_constant ();
3711       HOST_WIDE_INT cysize = ysize.to_constant ();
3712       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3713       unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3714       scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3715       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3716       gcc_assert (nregs_xmode
3717 		  == (nunits
3718 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3719       gcc_assert (hard_regno_nregs (xregno, xmode)
3720 		  == hard_regno_nregs (xregno, xmode_unit) * nunits);
3721 
3722       /* You can only ask for a SUBREG of a value with holes in the middle
3723 	 if you don't cross the holes.  (Such a SUBREG should be done by
3724 	 picking a different register class, or doing it in memory if
3725 	 necessary.)  An example of a value with holes is XCmode on 32-bit
3726 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3727 	 3 for each part, but in memory it's two 128-bit parts.
3728 	 Padding is assumed to be at the end (not necessarily the 'high part')
3729 	 of each unit.  */
3730       if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3731 	  && (coffset / GET_MODE_SIZE (xmode_unit)
3732 	      != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3733 	{
3734 	  info->representable_p = false;
3735 	  rknown = true;
3736 	}
3737     }
3738   else
3739     nregs_xmode = hard_regno_nregs (xregno, xmode);
3740 
3741   nregs_ymode = hard_regno_nregs (xregno, ymode);
3742 
3743   /* Subreg sizes must be ordered, so that we can tell whether they are
3744      partial, paradoxical or complete.  */
3745   gcc_checking_assert (ordered_p (xsize, ysize));
3746 
3747   /* Paradoxical subregs are otherwise valid.  */
3748   if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3749     {
3750       info->representable_p = true;
3751       /* If this is a big endian paradoxical subreg, which uses more
3752 	 actual hard registers than the original register, we must
3753 	 return a negative offset so that we find the proper highpart
3754 	 of the register.
3755 
3756 	 We assume that the ordering of registers within a multi-register
3757 	 value has a consistent endianness: if bytes and register words
3758 	 have different endianness, the hard registers that make up a
3759 	 multi-register value must be at least word-sized.  */
3760       if (REG_WORDS_BIG_ENDIAN)
3761 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
3762       else
3763 	info->offset = 0;
3764       info->nregs = nregs_ymode;
3765       return;
3766     }
3767 
3768   /* If registers store different numbers of bits in the different
3769      modes, we cannot generally form this subreg.  */
3770   poly_uint64 regsize_xmode, regsize_ymode;
3771   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3772       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3773       && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3774       && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3775     {
3776       if (!rknown
3777 	  && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3778 	      || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3779 	{
3780 	  info->representable_p = false;
3781 	  if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3782 	      || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3783 	    /* Checked by validate_subreg.  We must know at compile time
3784 	       which inner registers are being accessed.  */
3785 	    gcc_unreachable ();
3786 	  return;
3787 	}
3788       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3789 	 would go outside of XMODE.  */
3790       if (!rknown && maybe_gt (ysize + offset, xsize))
3791 	{
3792 	  info->representable_p = false;
3793 	  info->nregs = nregs_ymode;
3794 	  if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3795 	    /* Checked by validate_subreg.  We must know at compile time
3796 	       which inner registers are being accessed.  */
3797 	    gcc_unreachable ();
3798 	  return;
3799 	}
3800       /* Quick exit for the simple and common case of extracting whole
3801 	 subregisters from a multiregister value.  */
3802       /* ??? It would be better to integrate this into the code below,
3803 	 if we can generalize the concept enough and figure out how
3804 	 odd-sized modes can coexist with the other weird cases we support.  */
3805       HOST_WIDE_INT count;
3806       if (!rknown
3807 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3808 	  && known_eq (regsize_xmode, regsize_ymode)
3809 	  && constant_multiple_p (offset, regsize_ymode, &count))
3810 	{
3811 	  info->representable_p = true;
3812 	  info->nregs = nregs_ymode;
3813 	  info->offset = count;
3814 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3815 	  return;
3816 	}
3817     }
3818 
3819   /* Lowpart subregs are otherwise valid.  */
3820   if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3821     {
3822       info->representable_p = true;
3823       rknown = true;
3824 
3825       if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3826 	{
3827 	  info->offset = 0;
3828 	  info->nregs = nregs_ymode;
3829 	  return;
3830 	}
3831     }
3832 
3833   /* Set NUM_BLOCKS to the number of independently-representable YMODE
3834      values there are in (reg:XMODE XREGNO).  We can view the register
3835      as consisting of this number of independent "blocks", where each
3836      block occupies NREGS_YMODE registers and contains exactly one
3837      representable YMODE value.  */
3838   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3839   unsigned int num_blocks = nregs_xmode / nregs_ymode;
3840 
3841   /* Calculate the number of bytes in each block.  This must always
3842      be exact, otherwise we don't know how to verify the constraint.
3843      These conditions may be relaxed but subreg_regno_offset would
3844      need to be redesigned.  */
3845   poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3846 
3847   /* Get the number of the first block that contains the subreg and the byte
3848      offset of the subreg from the start of that block.  */
3849   unsigned int block_number;
3850   poly_uint64 subblock_offset;
3851   if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3852 			&subblock_offset))
3853     /* Checked by validate_subreg.  We must know at compile time which
3854        inner registers are being accessed.  */
3855     gcc_unreachable ();
3856 
3857   if (!rknown)
3858     {
3859       /* Only the lowpart of each block is representable.  */
3860       info->representable_p
3861 	= known_eq (subblock_offset,
3862 		    subreg_size_lowpart_offset (ysize, bytes_per_block));
3863       rknown = true;
3864     }
3865 
3866   /* We assume that the ordering of registers within a multi-register
3867      value has a consistent endianness: if bytes and register words
3868      have different endianness, the hard registers that make up a
3869      multi-register value must be at least word-sized.  */
3870   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3871     /* The block number we calculated above followed memory endianness.
3872        Convert it to register endianness by counting back from the end.
3873        (Note that, because of the assumption above, each block must be
3874        at least word-sized.)  */
3875     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3876   else
3877     info->offset = block_number * nregs_ymode;
3878   info->nregs = nregs_ymode;
3879 }
3880 
3881 /* This function returns the regno offset of a subreg expression.
3882    xregno - A regno of an inner hard subreg_reg (or what will become one).
3883    xmode  - The mode of xregno.
3884    offset - The byte offset.
3885    ymode  - The mode of a top level SUBREG (or what may become one).
3886    RETURN - The regno offset which would be used.  */
3887 unsigned int
3888 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3889 		     poly_uint64 offset, machine_mode ymode)
3890 {
3891   struct subreg_info info;
3892   subreg_get_info (xregno, xmode, offset, ymode, &info);
3893   return info.offset;
3894 }
3895 
3896 /* This function returns true when the offset is representable via
3897    subreg_offset in the given regno.
3898    xregno - A regno of an inner hard subreg_reg (or what will become one).
3899    xmode  - The mode of xregno.
3900    offset - The byte offset.
3901    ymode  - The mode of a top level SUBREG (or what may become one).
3902    RETURN - Whether the offset is representable.  */
3903 bool
3904 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3905 			       poly_uint64 offset, machine_mode ymode)
3906 {
3907   struct subreg_info info;
3908   subreg_get_info (xregno, xmode, offset, ymode, &info);
3909   return info.representable_p;
3910 }
3911 
3912 /* Return the number of a YMODE register to which
3913 
3914        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3915 
3916    can be simplified.  Return -1 if the subreg can't be simplified.
3917 
3918    XREGNO is a hard register number.  */
3919 
3920 int
3921 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3922 		       poly_uint64 offset, machine_mode ymode)
3923 {
3924   struct subreg_info info;
3925   unsigned int yregno;
3926 
3927   /* Give the backend a chance to disallow the mode change.  */
3928   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3929       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3930       && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
3931       /* We can use mode change in LRA for some transformations.  */
3932       && ! lra_in_progress)
3933     return -1;
3934 
3935   /* We shouldn't simplify stack-related registers.  */
3936   if ((!reload_completed || frame_pointer_needed)
3937       && xregno == FRAME_POINTER_REGNUM)
3938     return -1;
3939 
3940   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3941       && xregno == ARG_POINTER_REGNUM)
3942     return -1;
3943 
3944   if (xregno == STACK_POINTER_REGNUM
3945       /* We should convert hard stack register in LRA if it is
3946 	 possible.  */
3947       && ! lra_in_progress)
3948     return -1;
3949 
3950   /* Try to get the register offset.  */
3951   subreg_get_info (xregno, xmode, offset, ymode, &info);
3952   if (!info.representable_p)
3953     return -1;
3954 
3955   /* Make sure that the offsetted register value is in range.  */
3956   yregno = xregno + info.offset;
3957   if (!HARD_REGISTER_NUM_P (yregno))
3958     return -1;
3959 
3960   /* See whether (reg:YMODE YREGNO) is valid.
3961 
3962      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3963      This is a kludge to work around how complex FP arguments are passed
3964      on IA-64 and should be fixed.  See PR target/49226.  */
3965   if (!targetm.hard_regno_mode_ok (yregno, ymode)
3966       && targetm.hard_regno_mode_ok (xregno, xmode))
3967     return -1;
3968 
3969   return (int) yregno;
3970 }
3971 
3972 /* Return the final regno that a subreg expression refers to.  */
3973 unsigned int
3974 subreg_regno (const_rtx x)
3975 {
3976   unsigned int ret;
3977   rtx subreg = SUBREG_REG (x);
3978   int regno = REGNO (subreg);
3979 
3980   ret = regno + subreg_regno_offset (regno,
3981 				     GET_MODE (subreg),
3982 				     SUBREG_BYTE (x),
3983 				     GET_MODE (x));
3984   return ret;
3985 
3986 }
3987 
3988 /* Return the number of registers that a subreg expression refers
3989    to.  */
3990 unsigned int
3991 subreg_nregs (const_rtx x)
3992 {
3993   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3994 }
3995 
3996 /* Return the number of registers that a subreg REG with REGNO
3997    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
3998    changed so that the regno can be passed in. */
3999 
4000 unsigned int
4001 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4002 {
4003   struct subreg_info info;
4004   rtx subreg = SUBREG_REG (x);
4005 
4006   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4007 		   &info);
4008   return info.nregs;
4009 }
4010 
4011 struct parms_set_data
4012 {
4013   int nregs;
4014   HARD_REG_SET regs;
4015 };
4016 
4017 /* Helper function for noticing stores to parameter registers.  */
4018 static void
4019 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4020 {
4021   struct parms_set_data *const d = (struct parms_set_data *) data;
4022   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4023       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4024     {
4025       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4026       d->nregs--;
4027     }
4028 }
4029 
4030 /* Look backward for first parameter to be loaded.
4031    Note that loads of all parameters will not necessarily be
4032    found if CSE has eliminated some of them (e.g., an argument
4033    to the outer function is passed down as a parameter).
4034    Do not skip BOUNDARY.  */
4035 rtx_insn *
4036 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4037 {
4038   struct parms_set_data parm;
4039   rtx p;
4040   rtx_insn *before, *first_set;
4041 
4042   /* Since different machines initialize their parameter registers
4043      in different orders, assume nothing.  Collect the set of all
4044      parameter registers.  */
4045   CLEAR_HARD_REG_SET (parm.regs);
4046   parm.nregs = 0;
4047   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4048     if (GET_CODE (XEXP (p, 0)) == USE
4049 	&& REG_P (XEXP (XEXP (p, 0), 0))
4050 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4051       {
4052 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4053 
4054 	/* We only care about registers which can hold function
4055 	   arguments.  */
4056 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4057 	  continue;
4058 
4059 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4060 	parm.nregs++;
4061       }
4062   before = call_insn;
4063   first_set = call_insn;
4064 
4065   /* Search backward for the first set of a register in this set.  */
4066   while (parm.nregs && before != boundary)
4067     {
4068       before = PREV_INSN (before);
4069 
4070       /* It is possible that some loads got CSEed from one call to
4071          another.  Stop in that case.  */
4072       if (CALL_P (before))
4073 	break;
4074 
4075       /* Our caller needs either ensure that we will find all sets
4076          (in case code has not been optimized yet), or take care
4077          for possible labels in a way by setting boundary to preceding
4078          CODE_LABEL.  */
4079       if (LABEL_P (before))
4080 	{
4081 	  gcc_assert (before == boundary);
4082 	  break;
4083 	}
4084 
4085       if (INSN_P (before))
4086 	{
4087 	  int nregs_old = parm.nregs;
4088 	  note_stores (PATTERN (before), parms_set, &parm);
4089 	  /* If we found something that did not set a parameter reg,
4090 	     we're done.  Do not keep going, as that might result
4091 	     in hoisting an insn before the setting of a pseudo
4092 	     that is used by the hoisted insn. */
4093 	  if (nregs_old != parm.nregs)
4094 	    first_set = before;
4095 	  else
4096 	    break;
4097 	}
4098     }
4099   return first_set;
4100 }
4101 
4102 /* Return true if we should avoid inserting code between INSN and preceding
4103    call instruction.  */
4104 
4105 bool
4106 keep_with_call_p (const rtx_insn *insn)
4107 {
4108   rtx set;
4109 
4110   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4111     {
4112       if (REG_P (SET_DEST (set))
4113 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4114 	  && fixed_regs[REGNO (SET_DEST (set))]
4115 	  && general_operand (SET_SRC (set), VOIDmode))
4116 	return true;
4117       if (REG_P (SET_SRC (set))
4118 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4119 	  && REG_P (SET_DEST (set))
4120 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4121 	return true;
4122       /* There may be a stack pop just after the call and before the store
4123 	 of the return register.  Search for the actual store when deciding
4124 	 if we can break or not.  */
4125       if (SET_DEST (set) == stack_pointer_rtx)
4126 	{
4127 	  /* This CONST_CAST is okay because next_nonnote_insn just
4128 	     returns its argument and we assign it to a const_rtx
4129 	     variable.  */
4130 	  const rtx_insn *i2
4131 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4132 	  if (i2 && keep_with_call_p (i2))
4133 	    return true;
4134 	}
4135     }
4136   return false;
4137 }
4138 
4139 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4140    to non-complex jumps.  That is, direct unconditional, conditional,
4141    and tablejumps, but not computed jumps or returns.  It also does
4142    not apply to the fallthru case of a conditional jump.  */
4143 
4144 bool
4145 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4146 {
4147   rtx tmp = JUMP_LABEL (jump_insn);
4148   rtx_jump_table_data *table;
4149 
4150   if (label == tmp)
4151     return true;
4152 
4153   if (tablejump_p (jump_insn, NULL, &table))
4154     {
4155       rtvec vec = table->get_labels ();
4156       int i, veclen = GET_NUM_ELEM (vec);
4157 
4158       for (i = 0; i < veclen; ++i)
4159 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4160 	  return true;
4161     }
4162 
4163   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4164     return true;
4165 
4166   return false;
4167 }
4168 
4169 
4170 /* Return an estimate of the cost of computing rtx X.
4171    One use is in cse, to decide which expression to keep in the hash table.
4172    Another is in rtl generation, to pick the cheapest way to multiply.
4173    Other uses like the latter are expected in the future.
4174 
4175    X appears as operand OPNO in an expression with code OUTER_CODE.
4176    SPEED specifies whether costs optimized for speed or size should
4177    be returned.  */
4178 
4179 int
4180 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4181 	  int opno, bool speed)
4182 {
4183   int i, j;
4184   enum rtx_code code;
4185   const char *fmt;
4186   int total;
4187   int factor;
4188 
4189   if (x == 0)
4190     return 0;
4191 
4192   if (GET_MODE (x) != VOIDmode)
4193     mode = GET_MODE (x);
4194 
4195   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4196      many insns, taking N times as long.  */
4197   factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4198   if (factor == 0)
4199     factor = 1;
4200 
4201   /* Compute the default costs of certain things.
4202      Note that targetm.rtx_costs can override the defaults.  */
4203 
4204   code = GET_CODE (x);
4205   switch (code)
4206     {
4207     case MULT:
4208       /* Multiplication has time-complexity O(N*N), where N is the
4209 	 number of units (translated from digits) when using
4210 	 schoolbook long multiplication.  */
4211       total = factor * factor * COSTS_N_INSNS (5);
4212       break;
4213     case DIV:
4214     case UDIV:
4215     case MOD:
4216     case UMOD:
4217       /* Similarly, complexity for schoolbook long division.  */
4218       total = factor * factor * COSTS_N_INSNS (7);
4219       break;
4220     case USE:
4221       /* Used in combine.c as a marker.  */
4222       total = 0;
4223       break;
4224     case SET:
4225       /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4226 	 the mode for the factor.  */
4227       mode = GET_MODE (SET_DEST (x));
4228       factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4229       if (factor == 0)
4230 	factor = 1;
4231       /* FALLTHRU */
4232     default:
4233       total = factor * COSTS_N_INSNS (1);
4234     }
4235 
4236   switch (code)
4237     {
4238     case REG:
4239       return 0;
4240 
4241     case SUBREG:
4242       total = 0;
4243       /* If we can't tie these modes, make this expensive.  The larger
4244 	 the mode, the more expensive it is.  */
4245       if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4246 	return COSTS_N_INSNS (2 + factor);
4247       break;
4248 
4249     case TRUNCATE:
4250       if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4251 	{
4252 	  total = 0;
4253 	  break;
4254 	}
4255       /* FALLTHRU */
4256     default:
4257       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4258 	return total;
4259       break;
4260     }
4261 
4262   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4263      which is already in total.  */
4264 
4265   fmt = GET_RTX_FORMAT (code);
4266   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4267     if (fmt[i] == 'e')
4268       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4269     else if (fmt[i] == 'E')
4270       for (j = 0; j < XVECLEN (x, i); j++)
4271 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4272 
4273   return total;
4274 }
4275 
4276 /* Fill in the structure C with information about both speed and size rtx
4277    costs for X, which is operand OPNO in an expression with code OUTER.  */
4278 
4279 void
4280 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4281 		   struct full_rtx_costs *c)
4282 {
4283   c->speed = rtx_cost (x, mode, outer, opno, true);
4284   c->size = rtx_cost (x, mode, outer, opno, false);
4285 }
4286 
4287 
4288 /* Return cost of address expression X.
4289    Expect that X is properly formed address reference.
4290 
4291    SPEED parameter specify whether costs optimized for speed or size should
4292    be returned.  */
4293 
4294 int
4295 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4296 {
4297   /* We may be asked for cost of various unusual addresses, such as operands
4298      of push instruction.  It is not worthwhile to complicate writing
4299      of the target hook by such cases.  */
4300 
4301   if (!memory_address_addr_space_p (mode, x, as))
4302     return 1000;
4303 
4304   return targetm.address_cost (x, mode, as, speed);
4305 }
4306 
4307 /* If the target doesn't override, compute the cost as with arithmetic.  */
4308 
4309 int
4310 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4311 {
4312   return rtx_cost (x, Pmode, MEM, 0, speed);
4313 }
4314 
4315 
4316 unsigned HOST_WIDE_INT
4317 nonzero_bits (const_rtx x, machine_mode mode)
4318 {
4319   if (mode == VOIDmode)
4320     mode = GET_MODE (x);
4321   scalar_int_mode int_mode;
4322   if (!is_a <scalar_int_mode> (mode, &int_mode))
4323     return GET_MODE_MASK (mode);
4324   return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4325 }
4326 
4327 unsigned int
4328 num_sign_bit_copies (const_rtx x, machine_mode mode)
4329 {
4330   if (mode == VOIDmode)
4331     mode = GET_MODE (x);
4332   scalar_int_mode int_mode;
4333   if (!is_a <scalar_int_mode> (mode, &int_mode))
4334     return 1;
4335   return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4336 }
4337 
4338 /* Return true if nonzero_bits1 might recurse into both operands
4339    of X.  */
4340 
4341 static inline bool
4342 nonzero_bits_binary_arith_p (const_rtx x)
4343 {
4344   if (!ARITHMETIC_P (x))
4345     return false;
4346   switch (GET_CODE (x))
4347     {
4348     case AND:
4349     case XOR:
4350     case IOR:
4351     case UMIN:
4352     case UMAX:
4353     case SMIN:
4354     case SMAX:
4355     case PLUS:
4356     case MINUS:
4357     case MULT:
4358     case DIV:
4359     case UDIV:
4360     case MOD:
4361     case UMOD:
4362       return true;
4363     default:
4364       return false;
4365     }
4366 }
4367 
4368 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4369    It avoids exponential behavior in nonzero_bits1 when X has
4370    identical subexpressions on the first or the second level.  */
4371 
4372 static unsigned HOST_WIDE_INT
4373 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4374 		     machine_mode known_mode,
4375 		     unsigned HOST_WIDE_INT known_ret)
4376 {
4377   if (x == known_x && mode == known_mode)
4378     return known_ret;
4379 
4380   /* Try to find identical subexpressions.  If found call
4381      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4382      precomputed value for the subexpression as KNOWN_RET.  */
4383 
4384   if (nonzero_bits_binary_arith_p (x))
4385     {
4386       rtx x0 = XEXP (x, 0);
4387       rtx x1 = XEXP (x, 1);
4388 
4389       /* Check the first level.  */
4390       if (x0 == x1)
4391 	return nonzero_bits1 (x, mode, x0, mode,
4392 			      cached_nonzero_bits (x0, mode, known_x,
4393 						   known_mode, known_ret));
4394 
4395       /* Check the second level.  */
4396       if (nonzero_bits_binary_arith_p (x0)
4397 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4398 	return nonzero_bits1 (x, mode, x1, mode,
4399 			      cached_nonzero_bits (x1, mode, known_x,
4400 						   known_mode, known_ret));
4401 
4402       if (nonzero_bits_binary_arith_p (x1)
4403 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4404 	return nonzero_bits1 (x, mode, x0, mode,
4405 			      cached_nonzero_bits (x0, mode, known_x,
4406 						   known_mode, known_ret));
4407     }
4408 
4409   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4410 }
4411 
4412 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4413    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4414    is less useful.  We can't allow both, because that results in exponential
4415    run time recursion.  There is a nullstone testcase that triggered
4416    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4417 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4418 
4419 /* Given an expression, X, compute which bits in X can be nonzero.
4420    We don't care about bits outside of those defined in MODE.
4421 
4422    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4423    an arithmetic operation, we can do better.  */
4424 
4425 static unsigned HOST_WIDE_INT
4426 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4427 	       machine_mode known_mode,
4428 	       unsigned HOST_WIDE_INT known_ret)
4429 {
4430   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4431   unsigned HOST_WIDE_INT inner_nz;
4432   enum rtx_code code = GET_CODE (x);
4433   machine_mode inner_mode;
4434   unsigned int inner_width;
4435   scalar_int_mode xmode;
4436 
4437   unsigned int mode_width = GET_MODE_PRECISION (mode);
4438 
4439   if (CONST_INT_P (x))
4440     {
4441       if (SHORT_IMMEDIATES_SIGN_EXTEND
4442 	  && INTVAL (x) > 0
4443 	  && mode_width < BITS_PER_WORD
4444 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4445 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4446 
4447       return UINTVAL (x);
4448     }
4449 
4450   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4451     return nonzero;
4452   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4453 
4454   /* If X is wider than MODE, use its mode instead.  */
4455   if (xmode_width > mode_width)
4456     {
4457       mode = xmode;
4458       nonzero = GET_MODE_MASK (mode);
4459       mode_width = xmode_width;
4460     }
4461 
4462   if (mode_width > HOST_BITS_PER_WIDE_INT)
4463     /* Our only callers in this case look for single bit values.  So
4464        just return the mode mask.  Those tests will then be false.  */
4465     return nonzero;
4466 
4467   /* If MODE is wider than X, but both are a single word for both the host
4468      and target machines, we can compute this from which bits of the object
4469      might be nonzero in its own mode, taking into account the fact that, on
4470      CISC machines, accessing an object in a wider mode generally causes the
4471      high-order bits to become undefined, so they are not known to be zero.
4472      We extend this reasoning to RISC machines for rotate operations since the
4473      semantics of the operations in the larger mode is not well defined.  */
4474   if (mode_width > xmode_width
4475       && xmode_width <= BITS_PER_WORD
4476       && xmode_width <= HOST_BITS_PER_WIDE_INT
4477       && (!WORD_REGISTER_OPERATIONS || code == ROTATE || code == ROTATERT))
4478     {
4479       nonzero &= cached_nonzero_bits (x, xmode,
4480 				      known_x, known_mode, known_ret);
4481       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4482       return nonzero;
4483     }
4484 
4485   /* Please keep nonzero_bits_binary_arith_p above in sync with
4486      the code in the switch below.  */
4487   switch (code)
4488     {
4489     case REG:
4490 #if defined(POINTERS_EXTEND_UNSIGNED)
4491       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4492 	 all the bits above ptr_mode are known to be zero.  */
4493       /* As we do not know which address space the pointer is referring to,
4494 	 we can do this only if the target does not support different pointer
4495 	 or address modes depending on the address space.  */
4496       if (target_default_pointer_address_modes_p ()
4497 	  && POINTERS_EXTEND_UNSIGNED
4498 	  && xmode == Pmode
4499 	  && REG_POINTER (x)
4500 	  && !targetm.have_ptr_extend ())
4501 	nonzero &= GET_MODE_MASK (ptr_mode);
4502 #endif
4503 
4504       /* Include declared information about alignment of pointers.  */
4505       /* ??? We don't properly preserve REG_POINTER changes across
4506 	 pointer-to-integer casts, so we can't trust it except for
4507 	 things that we know must be pointers.  See execute/960116-1.c.  */
4508       if ((x == stack_pointer_rtx
4509 	   || x == frame_pointer_rtx
4510 	   || x == arg_pointer_rtx)
4511 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4512 	{
4513 	  unsigned HOST_WIDE_INT alignment
4514 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4515 
4516 #ifdef PUSH_ROUNDING
4517 	  /* If PUSH_ROUNDING is defined, it is possible for the
4518 	     stack to be momentarily aligned only to that amount,
4519 	     so we pick the least alignment.  */
4520 	  if (x == stack_pointer_rtx && PUSH_ARGS)
4521 	    {
4522 	      poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4523 	      alignment = MIN (known_alignment (rounded_1), alignment);
4524 	    }
4525 #endif
4526 
4527 	  nonzero &= ~(alignment - 1);
4528 	}
4529 
4530       {
4531 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4532 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4533 						  &nonzero_for_hook);
4534 
4535 	if (new_rtx)
4536 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4537 						   known_mode, known_ret);
4538 
4539 	return nonzero_for_hook;
4540       }
4541 
4542     case MEM:
4543       /* In many, if not most, RISC machines, reading a byte from memory
4544 	 zeros the rest of the register.  Noticing that fact saves a lot
4545 	 of extra zero-extends.  */
4546       if (load_extend_op (xmode) == ZERO_EXTEND)
4547 	nonzero &= GET_MODE_MASK (xmode);
4548       break;
4549 
4550     case EQ:  case NE:
4551     case UNEQ:  case LTGT:
4552     case GT:  case GTU:  case UNGT:
4553     case LT:  case LTU:  case UNLT:
4554     case GE:  case GEU:  case UNGE:
4555     case LE:  case LEU:  case UNLE:
4556     case UNORDERED: case ORDERED:
4557       /* If this produces an integer result, we know which bits are set.
4558 	 Code here used to clear bits outside the mode of X, but that is
4559 	 now done above.  */
4560       /* Mind that MODE is the mode the caller wants to look at this
4561 	 operation in, and not the actual operation mode.  We can wind
4562 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4563 	 that describes the results of a vector compare.  */
4564       if (GET_MODE_CLASS (xmode) == MODE_INT
4565 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4566 	nonzero = STORE_FLAG_VALUE;
4567       break;
4568 
4569     case NEG:
4570 #if 0
4571       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4572 	 and num_sign_bit_copies.  */
4573       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4574 	nonzero = 1;
4575 #endif
4576 
4577       if (xmode_width < mode_width)
4578 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4579       break;
4580 
4581     case ABS:
4582 #if 0
4583       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4584 	 and num_sign_bit_copies.  */
4585       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4586 	nonzero = 1;
4587 #endif
4588       break;
4589 
4590     case TRUNCATE:
4591       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4592 				       known_x, known_mode, known_ret)
4593 		  & GET_MODE_MASK (mode));
4594       break;
4595 
4596     case ZERO_EXTEND:
4597       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4598 				      known_x, known_mode, known_ret);
4599       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4600 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4601       break;
4602 
4603     case SIGN_EXTEND:
4604       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4605 	 Otherwise, show all the bits in the outer mode but not the inner
4606 	 may be nonzero.  */
4607       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4608 				      known_x, known_mode, known_ret);
4609       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4610 	{
4611 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4612 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4613 	    inner_nz |= (GET_MODE_MASK (mode)
4614 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4615 	}
4616 
4617       nonzero &= inner_nz;
4618       break;
4619 
4620     case AND:
4621       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4622 				       known_x, known_mode, known_ret)
4623       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4624 					known_x, known_mode, known_ret);
4625       break;
4626 
4627     case XOR:   case IOR:
4628     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4629       {
4630 	unsigned HOST_WIDE_INT nonzero0
4631 	   = cached_nonzero_bits (XEXP (x, 0), mode,
4632 				  known_x, known_mode, known_ret);
4633 
4634 	/* Don't call nonzero_bits for the second time if it cannot change
4635 	   anything.  */
4636 	if ((nonzero & nonzero0) != nonzero)
4637 	  nonzero &= nonzero0
4638       		     | cached_nonzero_bits (XEXP (x, 1), mode,
4639 					    known_x, known_mode, known_ret);
4640       }
4641       break;
4642 
4643     case PLUS:  case MINUS:
4644     case MULT:
4645     case DIV:   case UDIV:
4646     case MOD:   case UMOD:
4647       /* We can apply the rules of arithmetic to compute the number of
4648 	 high- and low-order zero bits of these operations.  We start by
4649 	 computing the width (position of the highest-order nonzero bit)
4650 	 and the number of low-order zero bits for each value.  */
4651       {
4652 	unsigned HOST_WIDE_INT nz0
4653 	  = cached_nonzero_bits (XEXP (x, 0), mode,
4654 				 known_x, known_mode, known_ret);
4655 	unsigned HOST_WIDE_INT nz1
4656 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4657 				 known_x, known_mode, known_ret);
4658 	int sign_index = xmode_width - 1;
4659 	int width0 = floor_log2 (nz0) + 1;
4660 	int width1 = floor_log2 (nz1) + 1;
4661 	int low0 = ctz_or_zero (nz0);
4662 	int low1 = ctz_or_zero (nz1);
4663 	unsigned HOST_WIDE_INT op0_maybe_minusp
4664 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
4665 	unsigned HOST_WIDE_INT op1_maybe_minusp
4666 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
4667 	unsigned int result_width = mode_width;
4668 	int result_low = 0;
4669 
4670 	switch (code)
4671 	  {
4672 	  case PLUS:
4673 	    result_width = MAX (width0, width1) + 1;
4674 	    result_low = MIN (low0, low1);
4675 	    break;
4676 	  case MINUS:
4677 	    result_low = MIN (low0, low1);
4678 	    break;
4679 	  case MULT:
4680 	    result_width = width0 + width1;
4681 	    result_low = low0 + low1;
4682 	    break;
4683 	  case DIV:
4684 	    if (width1 == 0)
4685 	      break;
4686 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4687 	      result_width = width0;
4688 	    break;
4689 	  case UDIV:
4690 	    if (width1 == 0)
4691 	      break;
4692 	    result_width = width0;
4693 	    break;
4694 	  case MOD:
4695 	    if (width1 == 0)
4696 	      break;
4697 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4698 	      result_width = MIN (width0, width1);
4699 	    result_low = MIN (low0, low1);
4700 	    break;
4701 	  case UMOD:
4702 	    if (width1 == 0)
4703 	      break;
4704 	    result_width = MIN (width0, width1);
4705 	    result_low = MIN (low0, low1);
4706 	    break;
4707 	  default:
4708 	    gcc_unreachable ();
4709 	  }
4710 
4711 	if (result_width < mode_width)
4712 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4713 
4714 	if (result_low > 0)
4715 	  nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4716       }
4717       break;
4718 
4719     case ZERO_EXTRACT:
4720       if (CONST_INT_P (XEXP (x, 1))
4721 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4722 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4723       break;
4724 
4725     case SUBREG:
4726       /* If this is a SUBREG formed for a promoted variable that has
4727 	 been zero-extended, we know that at least the high-order bits
4728 	 are zero, though others might be too.  */
4729       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4730 	nonzero = GET_MODE_MASK (xmode)
4731 		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
4732 					 known_x, known_mode, known_ret);
4733 
4734       /* If the inner mode is a single word for both the host and target
4735 	 machines, we can compute this from which bits of the inner
4736 	 object might be nonzero.  */
4737       inner_mode = GET_MODE (SUBREG_REG (x));
4738       if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4739 	  && inner_width <= BITS_PER_WORD
4740 	  && inner_width <= HOST_BITS_PER_WIDE_INT)
4741 	{
4742 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4743 					  known_x, known_mode, known_ret);
4744 
4745           /* On many CISC machines, accessing an object in a wider mode
4746 	     causes the high-order bits to become undefined.  So they are
4747 	     not known to be zero.  */
4748 	  rtx_code extend_op;
4749 	  if ((!WORD_REGISTER_OPERATIONS
4750 	       /* If this is a typical RISC machine, we only have to worry
4751 		  about the way loads are extended.  */
4752 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4753 		   ? val_signbit_known_set_p (inner_mode, nonzero)
4754 		   : extend_op != ZERO_EXTEND)
4755 	       || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4756 	      && xmode_width > inner_width)
4757 	    nonzero
4758 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4759 	}
4760       break;
4761 
4762     case ASHIFT:
4763     case ASHIFTRT:
4764     case LSHIFTRT:
4765     case ROTATE:
4766     case ROTATERT:
4767       /* The nonzero bits are in two classes: any bits within MODE
4768 	 that aren't in xmode are always significant.  The rest of the
4769 	 nonzero bits are those that are significant in the operand of
4770 	 the shift when shifted the appropriate number of bits.  This
4771 	 shows that high-order bits are cleared by the right shift and
4772 	 low-order bits by left shifts.  */
4773       if (CONST_INT_P (XEXP (x, 1))
4774 	  && INTVAL (XEXP (x, 1)) >= 0
4775 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4776 	  && INTVAL (XEXP (x, 1)) < xmode_width)
4777 	{
4778 	  int count = INTVAL (XEXP (x, 1));
4779 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4780 	  unsigned HOST_WIDE_INT op_nonzero
4781 	    = cached_nonzero_bits (XEXP (x, 0), mode,
4782 				   known_x, known_mode, known_ret);
4783 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4784 	  unsigned HOST_WIDE_INT outer = 0;
4785 
4786 	  if (mode_width > xmode_width)
4787 	    outer = (op_nonzero & nonzero & ~mode_mask);
4788 
4789 	  switch (code)
4790 	    {
4791 	    case ASHIFT:
4792 	      inner <<= count;
4793 	      break;
4794 
4795 	    case LSHIFTRT:
4796 	      inner >>= count;
4797 	      break;
4798 
4799 	    case ASHIFTRT:
4800 	      inner >>= count;
4801 
4802 	      /* If the sign bit may have been nonzero before the shift, we
4803 		 need to mark all the places it could have been copied to
4804 		 by the shift as possibly nonzero.  */
4805 	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4806 		inner |= (((HOST_WIDE_INT_1U << count) - 1)
4807 			  << (xmode_width - count));
4808 	      break;
4809 
4810 	    case ROTATE:
4811 	      inner = (inner << (count % xmode_width)
4812 		       | (inner >> (xmode_width - (count % xmode_width))))
4813 		      & mode_mask;
4814 	      break;
4815 
4816 	    case ROTATERT:
4817 	      inner = (inner >> (count % xmode_width)
4818 		       | (inner << (xmode_width - (count % xmode_width))))
4819 		      & mode_mask;
4820 	      break;
4821 
4822 	    default:
4823 	      gcc_unreachable ();
4824 	    }
4825 
4826 	  nonzero &= (outer | inner);
4827 	}
4828       break;
4829 
4830     case FFS:
4831     case POPCOUNT:
4832       /* This is at most the number of bits in the mode.  */
4833       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4834       break;
4835 
4836     case CLZ:
4837       /* If CLZ has a known value at zero, then the nonzero bits are
4838 	 that value, plus the number of bits in the mode minus one.  */
4839       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4840 	nonzero
4841 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4842       else
4843 	nonzero = -1;
4844       break;
4845 
4846     case CTZ:
4847       /* If CTZ has a known value at zero, then the nonzero bits are
4848 	 that value, plus the number of bits in the mode minus one.  */
4849       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4850 	nonzero
4851 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4852       else
4853 	nonzero = -1;
4854       break;
4855 
4856     case CLRSB:
4857       /* This is at most the number of bits in the mode minus 1.  */
4858       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4859       break;
4860 
4861     case PARITY:
4862       nonzero = 1;
4863       break;
4864 
4865     case IF_THEN_ELSE:
4866       {
4867 	unsigned HOST_WIDE_INT nonzero_true
4868 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4869 				 known_x, known_mode, known_ret);
4870 
4871 	/* Don't call nonzero_bits for the second time if it cannot change
4872 	   anything.  */
4873 	if ((nonzero & nonzero_true) != nonzero)
4874 	  nonzero &= nonzero_true
4875       		     | cached_nonzero_bits (XEXP (x, 2), mode,
4876 					    known_x, known_mode, known_ret);
4877       }
4878       break;
4879 
4880     default:
4881       break;
4882     }
4883 
4884   return nonzero;
4885 }
4886 
4887 /* See the macro definition above.  */
4888 #undef cached_num_sign_bit_copies
4889 
4890 
4891 /* Return true if num_sign_bit_copies1 might recurse into both operands
4892    of X.  */
4893 
4894 static inline bool
4895 num_sign_bit_copies_binary_arith_p (const_rtx x)
4896 {
4897   if (!ARITHMETIC_P (x))
4898     return false;
4899   switch (GET_CODE (x))
4900     {
4901     case IOR:
4902     case AND:
4903     case XOR:
4904     case SMIN:
4905     case SMAX:
4906     case UMIN:
4907     case UMAX:
4908     case PLUS:
4909     case MINUS:
4910     case MULT:
4911       return true;
4912     default:
4913       return false;
4914     }
4915 }
4916 
4917 /* The function cached_num_sign_bit_copies is a wrapper around
4918    num_sign_bit_copies1.  It avoids exponential behavior in
4919    num_sign_bit_copies1 when X has identical subexpressions on the
4920    first or the second level.  */
4921 
4922 static unsigned int
4923 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4924 			    const_rtx known_x, machine_mode known_mode,
4925 			    unsigned int known_ret)
4926 {
4927   if (x == known_x && mode == known_mode)
4928     return known_ret;
4929 
4930   /* Try to find identical subexpressions.  If found call
4931      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4932      the precomputed value for the subexpression as KNOWN_RET.  */
4933 
4934   if (num_sign_bit_copies_binary_arith_p (x))
4935     {
4936       rtx x0 = XEXP (x, 0);
4937       rtx x1 = XEXP (x, 1);
4938 
4939       /* Check the first level.  */
4940       if (x0 == x1)
4941 	return
4942 	  num_sign_bit_copies1 (x, mode, x0, mode,
4943 				cached_num_sign_bit_copies (x0, mode, known_x,
4944 							    known_mode,
4945 							    known_ret));
4946 
4947       /* Check the second level.  */
4948       if (num_sign_bit_copies_binary_arith_p (x0)
4949 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4950 	return
4951 	  num_sign_bit_copies1 (x, mode, x1, mode,
4952 				cached_num_sign_bit_copies (x1, mode, known_x,
4953 							    known_mode,
4954 							    known_ret));
4955 
4956       if (num_sign_bit_copies_binary_arith_p (x1)
4957 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4958 	return
4959 	  num_sign_bit_copies1 (x, mode, x0, mode,
4960 				cached_num_sign_bit_copies (x0, mode, known_x,
4961 							    known_mode,
4962 							    known_ret));
4963     }
4964 
4965   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4966 }
4967 
4968 /* Return the number of bits at the high-order end of X that are known to
4969    be equal to the sign bit.  X will be used in mode MODE.  The returned
4970    value will always be between 1 and the number of bits in MODE.  */
4971 
4972 static unsigned int
4973 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4974 		      machine_mode known_mode,
4975 		      unsigned int known_ret)
4976 {
4977   enum rtx_code code = GET_CODE (x);
4978   unsigned int bitwidth = GET_MODE_PRECISION (mode);
4979   int num0, num1, result;
4980   unsigned HOST_WIDE_INT nonzero;
4981 
4982   if (CONST_INT_P (x))
4983     {
4984       /* If the constant is negative, take its 1's complement and remask.
4985 	 Then see how many zero bits we have.  */
4986       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4987       if (bitwidth <= HOST_BITS_PER_WIDE_INT
4988 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4989 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
4990 
4991       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4992     }
4993 
4994   scalar_int_mode xmode, inner_mode;
4995   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4996     return 1;
4997 
4998   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4999 
5000   /* For a smaller mode, just ignore the high bits.  */
5001   if (bitwidth < xmode_width)
5002     {
5003       num0 = cached_num_sign_bit_copies (x, xmode,
5004 					 known_x, known_mode, known_ret);
5005       return MAX (1, num0 - (int) (xmode_width - bitwidth));
5006     }
5007 
5008   if (bitwidth > xmode_width)
5009     {
5010       /* If this machine does not do all register operations on the entire
5011 	 register and MODE is wider than the mode of X, we can say nothing
5012 	 at all about the high-order bits.  We extend this reasoning to every
5013 	 machine for rotate operations since the semantics of the operations
5014 	 in the larger mode is not well defined.  */
5015       if (!WORD_REGISTER_OPERATIONS || code == ROTATE || code == ROTATERT)
5016 	return 1;
5017 
5018       /* Likewise on machines that do, if the mode of the object is smaller
5019 	 than a word and loads of that size don't sign extend, we can say
5020 	 nothing about the high order bits.  */
5021       if (xmode_width < BITS_PER_WORD
5022 	  && load_extend_op (xmode) != SIGN_EXTEND)
5023 	return 1;
5024     }
5025 
5026   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5027      the code in the switch below.  */
5028   switch (code)
5029     {
5030     case REG:
5031 
5032 #if defined(POINTERS_EXTEND_UNSIGNED)
5033       /* If pointers extend signed and this is a pointer in Pmode, say that
5034 	 all the bits above ptr_mode are known to be sign bit copies.  */
5035       /* As we do not know which address space the pointer is referring to,
5036 	 we can do this only if the target does not support different pointer
5037 	 or address modes depending on the address space.  */
5038       if (target_default_pointer_address_modes_p ()
5039 	  && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5040 	  && mode == Pmode && REG_POINTER (x)
5041 	  && !targetm.have_ptr_extend ())
5042 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5043 #endif
5044 
5045       {
5046 	unsigned int copies_for_hook = 1, copies = 1;
5047 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5048 							 &copies_for_hook);
5049 
5050 	if (new_rtx)
5051 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5052 					       known_mode, known_ret);
5053 
5054 	if (copies > 1 || copies_for_hook > 1)
5055 	  return MAX (copies, copies_for_hook);
5056 
5057 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
5058       }
5059       break;
5060 
5061     case MEM:
5062       /* Some RISC machines sign-extend all loads of smaller than a word.  */
5063       if (load_extend_op (xmode) == SIGN_EXTEND)
5064 	return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5065       break;
5066 
5067     case SUBREG:
5068       /* If this is a SUBREG for a promoted object that is sign-extended
5069 	 and we are looking at it in a wider mode, we know that at least the
5070 	 high-order bits are known to be sign bit copies.  */
5071 
5072       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5073 	{
5074 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5075 					     known_x, known_mode, known_ret);
5076 	  return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5077 	}
5078 
5079       if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5080 	{
5081 	  /* For a smaller object, just ignore the high bits.  */
5082 	  if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5083 	    {
5084 	      num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5085 						 known_x, known_mode,
5086 						 known_ret);
5087 	      return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5088 					   - bitwidth));
5089 	    }
5090 
5091 	  /* For paradoxical SUBREGs on machines where all register operations
5092 	     affect the entire register, just look inside.  Note that we are
5093 	     passing MODE to the recursive call, so the number of sign bit
5094 	     copies will remain relative to that mode, not the inner mode.  */
5095 
5096 	  /* This works only if loads sign extend.  Otherwise, if we get a
5097 	     reload for the inner part, it may be loaded from the stack, and
5098 	     then we lose all sign bit copies that existed before the store
5099 	     to the stack.  */
5100 
5101 	  if (WORD_REGISTER_OPERATIONS
5102 	      && load_extend_op (inner_mode) == SIGN_EXTEND
5103 	      && paradoxical_subreg_p (x)
5104 	      && MEM_P (SUBREG_REG (x)))
5105 	    return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5106 					       known_x, known_mode, known_ret);
5107 	}
5108       break;
5109 
5110     case SIGN_EXTRACT:
5111       if (CONST_INT_P (XEXP (x, 1)))
5112 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5113       break;
5114 
5115     case SIGN_EXTEND:
5116       if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5117 	return (bitwidth - GET_MODE_PRECISION (inner_mode)
5118 		+ cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5119 					      known_x, known_mode, known_ret));
5120       break;
5121 
5122     case TRUNCATE:
5123       /* For a smaller object, just ignore the high bits.  */
5124       inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5125       num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5126 					 known_x, known_mode, known_ret);
5127       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5128 				    - bitwidth)));
5129 
5130     case NOT:
5131       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5132 					 known_x, known_mode, known_ret);
5133 
5134     case ROTATE:       case ROTATERT:
5135       /* If we are rotating left by a number of bits less than the number
5136 	 of sign bit copies, we can just subtract that amount from the
5137 	 number.  */
5138       if (CONST_INT_P (XEXP (x, 1))
5139 	  && INTVAL (XEXP (x, 1)) >= 0
5140 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5141 	{
5142 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5143 					     known_x, known_mode, known_ret);
5144 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5145 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5146 	}
5147       break;
5148 
5149     case NEG:
5150       /* In general, this subtracts one sign bit copy.  But if the value
5151 	 is known to be positive, the number of sign bit copies is the
5152 	 same as that of the input.  Finally, if the input has just one bit
5153 	 that might be nonzero, all the bits are copies of the sign bit.  */
5154       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5155 					 known_x, known_mode, known_ret);
5156       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5157 	return num0 > 1 ? num0 - 1 : 1;
5158 
5159       nonzero = nonzero_bits (XEXP (x, 0), mode);
5160       if (nonzero == 1)
5161 	return bitwidth;
5162 
5163       if (num0 > 1
5164 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5165 	num0--;
5166 
5167       return num0;
5168 
5169     case IOR:   case AND:   case XOR:
5170     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5171       /* Logical operations will preserve the number of sign-bit copies.
5172 	 MIN and MAX operations always return one of the operands.  */
5173       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5174 					 known_x, known_mode, known_ret);
5175       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5176 					 known_x, known_mode, known_ret);
5177 
5178       /* If num1 is clearing some of the top bits then regardless of
5179 	 the other term, we are guaranteed to have at least that many
5180 	 high-order zero bits.  */
5181       if (code == AND
5182 	  && num1 > 1
5183 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5184 	  && CONST_INT_P (XEXP (x, 1))
5185 	  && (UINTVAL (XEXP (x, 1))
5186 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5187 	return num1;
5188 
5189       /* Similarly for IOR when setting high-order bits.  */
5190       if (code == IOR
5191 	  && num1 > 1
5192 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5193 	  && CONST_INT_P (XEXP (x, 1))
5194 	  && (UINTVAL (XEXP (x, 1))
5195 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5196 	return num1;
5197 
5198       return MIN (num0, num1);
5199 
5200     case PLUS:  case MINUS:
5201       /* For addition and subtraction, we can have a 1-bit carry.  However,
5202 	 if we are subtracting 1 from a positive number, there will not
5203 	 be such a carry.  Furthermore, if the positive number is known to
5204 	 be 0 or 1, we know the result is either -1 or 0.  */
5205 
5206       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5207 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5208 	{
5209 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5210 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5211 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5212 		    : bitwidth - floor_log2 (nonzero) - 1);
5213 	}
5214 
5215       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5216 					 known_x, known_mode, known_ret);
5217       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5218 					 known_x, known_mode, known_ret);
5219       result = MAX (1, MIN (num0, num1) - 1);
5220 
5221       return result;
5222 
5223     case MULT:
5224       /* The number of bits of the product is the sum of the number of
5225 	 bits of both terms.  However, unless one of the terms if known
5226 	 to be positive, we must allow for an additional bit since negating
5227 	 a negative number can remove one sign bit copy.  */
5228 
5229       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5230 					 known_x, known_mode, known_ret);
5231       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5232 					 known_x, known_mode, known_ret);
5233 
5234       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5235       if (result > 0
5236 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5237 	      || (((nonzero_bits (XEXP (x, 0), mode)
5238 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5239 		  && ((nonzero_bits (XEXP (x, 1), mode)
5240 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5241 		      != 0))))
5242 	result--;
5243 
5244       return MAX (1, result);
5245 
5246     case UDIV:
5247       /* The result must be <= the first operand.  If the first operand
5248 	 has the high bit set, we know nothing about the number of sign
5249 	 bit copies.  */
5250       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5251 	return 1;
5252       else if ((nonzero_bits (XEXP (x, 0), mode)
5253 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5254 	return 1;
5255       else
5256 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5257 					   known_x, known_mode, known_ret);
5258 
5259     case UMOD:
5260       /* The result must be <= the second operand.  If the second operand
5261 	 has (or just might have) the high bit set, we know nothing about
5262 	 the number of sign bit copies.  */
5263       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5264 	return 1;
5265       else if ((nonzero_bits (XEXP (x, 1), mode)
5266 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5267 	return 1;
5268       else
5269 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5270 					   known_x, known_mode, known_ret);
5271 
5272     case DIV:
5273       /* Similar to unsigned division, except that we have to worry about
5274 	 the case where the divisor is negative, in which case we have
5275 	 to add 1.  */
5276       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5277 					   known_x, known_mode, known_ret);
5278       if (result > 1
5279 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5280 	      || (nonzero_bits (XEXP (x, 1), mode)
5281 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5282 	result--;
5283 
5284       return result;
5285 
5286     case MOD:
5287       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5288 					   known_x, known_mode, known_ret);
5289       if (result > 1
5290 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5291 	      || (nonzero_bits (XEXP (x, 1), mode)
5292 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5293 	result--;
5294 
5295       return result;
5296 
5297     case ASHIFTRT:
5298       /* Shifts by a constant add to the number of bits equal to the
5299 	 sign bit.  */
5300       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5301 					 known_x, known_mode, known_ret);
5302       if (CONST_INT_P (XEXP (x, 1))
5303 	  && INTVAL (XEXP (x, 1)) > 0
5304 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5305 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5306 
5307       return num0;
5308 
5309     case ASHIFT:
5310       /* Left shifts destroy copies.  */
5311       if (!CONST_INT_P (XEXP (x, 1))
5312 	  || INTVAL (XEXP (x, 1)) < 0
5313 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5314 	  || INTVAL (XEXP (x, 1)) >= xmode_width)
5315 	return 1;
5316 
5317       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5318 					 known_x, known_mode, known_ret);
5319       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5320 
5321     case IF_THEN_ELSE:
5322       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5323 					 known_x, known_mode, known_ret);
5324       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5325 					 known_x, known_mode, known_ret);
5326       return MIN (num0, num1);
5327 
5328     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5329     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5330     case GEU: case GTU: case LEU: case LTU:
5331     case UNORDERED: case ORDERED:
5332       /* If the constant is negative, take its 1's complement and remask.
5333 	 Then see how many zero bits we have.  */
5334       nonzero = STORE_FLAG_VALUE;
5335       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5336 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5337 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5338 
5339       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5340 
5341     default:
5342       break;
5343     }
5344 
5345   /* If we haven't been able to figure it out by one of the above rules,
5346      see if some of the high-order bits are known to be zero.  If so,
5347      count those bits and return one less than that amount.  If we can't
5348      safely compute the mask for this mode, always return BITWIDTH.  */
5349 
5350   bitwidth = GET_MODE_PRECISION (mode);
5351   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5352     return 1;
5353 
5354   nonzero = nonzero_bits (x, mode);
5355   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5356 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5357 }
5358 
5359 /* Calculate the rtx_cost of a single instruction pattern.  A return value of
5360    zero indicates an instruction pattern without a known cost.  */
5361 
5362 int
5363 pattern_cost (rtx pat, bool speed)
5364 {
5365   int i, cost;
5366   rtx set;
5367 
5368   /* Extract the single set rtx from the instruction pattern.  We
5369      can't use single_set since we only have the pattern.  We also
5370      consider PARALLELs of a normal set and a single comparison.  In
5371      that case we use the cost of the non-comparison SET operation,
5372      which is most-likely to be the real cost of this operation.  */
5373   if (GET_CODE (pat) == SET)
5374     set = pat;
5375   else if (GET_CODE (pat) == PARALLEL)
5376     {
5377       set = NULL_RTX;
5378       rtx comparison = NULL_RTX;
5379 
5380       for (i = 0; i < XVECLEN (pat, 0); i++)
5381 	{
5382 	  rtx x = XVECEXP (pat, 0, i);
5383 	  if (GET_CODE (x) == SET)
5384 	    {
5385 	      if (GET_CODE (SET_SRC (x)) == COMPARE)
5386 		{
5387 		  if (comparison)
5388 		    return 0;
5389 		  comparison = x;
5390 		}
5391 	      else
5392 		{
5393 		  if (set)
5394 		    return 0;
5395 		  set = x;
5396 		}
5397 	    }
5398 	}
5399 
5400       if (!set && comparison)
5401 	set = comparison;
5402 
5403       if (!set)
5404 	return 0;
5405     }
5406   else
5407     return 0;
5408 
5409   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5410   return cost > 0 ? cost : COSTS_N_INSNS (1);
5411 }
5412 
5413 /* Calculate the cost of a single instruction.  A return value of zero
5414    indicates an instruction pattern without a known cost.  */
5415 
5416 int
5417 insn_cost (rtx_insn *insn, bool speed)
5418 {
5419   if (targetm.insn_cost)
5420     return targetm.insn_cost (insn, speed);
5421 
5422   return pattern_cost (PATTERN (insn), speed);
5423 }
5424 
5425 /* Returns estimate on cost of computing SEQ.  */
5426 
5427 unsigned
5428 seq_cost (const rtx_insn *seq, bool speed)
5429 {
5430   unsigned cost = 0;
5431   rtx set;
5432 
5433   for (; seq; seq = NEXT_INSN (seq))
5434     {
5435       set = single_set (seq);
5436       if (set)
5437         cost += set_rtx_cost (set, speed);
5438       else if (NONDEBUG_INSN_P (seq))
5439 	{
5440 	  int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5441 	  if (this_cost > 0)
5442 	    cost += this_cost;
5443 	  else
5444 	    cost++;
5445 	}
5446     }
5447 
5448   return cost;
5449 }
5450 
5451 /* Given an insn INSN and condition COND, return the condition in a
5452    canonical form to simplify testing by callers.  Specifically:
5453 
5454    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5455    (2) Both operands will be machine operands; (cc0) will have been replaced.
5456    (3) If an operand is a constant, it will be the second operand.
5457    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5458        for GE, GEU, and LEU.
5459 
5460    If the condition cannot be understood, or is an inequality floating-point
5461    comparison which needs to be reversed, 0 will be returned.
5462 
5463    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5464 
5465    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5466    insn used in locating the condition was found.  If a replacement test
5467    of the condition is desired, it should be placed in front of that
5468    insn and we will be sure that the inputs are still valid.
5469 
5470    If WANT_REG is nonzero, we wish the condition to be relative to that
5471    register, if possible.  Therefore, do not canonicalize the condition
5472    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5473    to be a compare to a CC mode register.
5474 
5475    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5476    and at INSN.  */
5477 
5478 rtx
5479 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5480 			rtx_insn **earliest,
5481 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5482 {
5483   enum rtx_code code;
5484   rtx_insn *prev = insn;
5485   const_rtx set;
5486   rtx tem;
5487   rtx op0, op1;
5488   int reverse_code = 0;
5489   machine_mode mode;
5490   basic_block bb = BLOCK_FOR_INSN (insn);
5491 
5492   code = GET_CODE (cond);
5493   mode = GET_MODE (cond);
5494   op0 = XEXP (cond, 0);
5495   op1 = XEXP (cond, 1);
5496 
5497   if (reverse)
5498     code = reversed_comparison_code (cond, insn);
5499   if (code == UNKNOWN)
5500     return 0;
5501 
5502   if (earliest)
5503     *earliest = insn;
5504 
5505   /* If we are comparing a register with zero, see if the register is set
5506      in the previous insn to a COMPARE or a comparison operation.  Perform
5507      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5508      in cse.c  */
5509 
5510   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5511 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5512 	 && op1 == CONST0_RTX (GET_MODE (op0))
5513 	 && op0 != want_reg)
5514     {
5515       /* Set nonzero when we find something of interest.  */
5516       rtx x = 0;
5517 
5518       /* If comparison with cc0, import actual comparison from compare
5519 	 insn.  */
5520       if (op0 == cc0_rtx)
5521 	{
5522 	  if ((prev = prev_nonnote_insn (prev)) == 0
5523 	      || !NONJUMP_INSN_P (prev)
5524 	      || (set = single_set (prev)) == 0
5525 	      || SET_DEST (set) != cc0_rtx)
5526 	    return 0;
5527 
5528 	  op0 = SET_SRC (set);
5529 	  op1 = CONST0_RTX (GET_MODE (op0));
5530 	  if (earliest)
5531 	    *earliest = prev;
5532 	}
5533 
5534       /* If this is a COMPARE, pick up the two things being compared.  */
5535       if (GET_CODE (op0) == COMPARE)
5536 	{
5537 	  op1 = XEXP (op0, 1);
5538 	  op0 = XEXP (op0, 0);
5539 	  continue;
5540 	}
5541       else if (!REG_P (op0))
5542 	break;
5543 
5544       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5545 	 stop if it isn't a single set or if it has a REG_INC note because
5546 	 we don't want to bother dealing with it.  */
5547 
5548       prev = prev_nonnote_nondebug_insn (prev);
5549 
5550       if (prev == 0
5551 	  || !NONJUMP_INSN_P (prev)
5552 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5553 	  /* In cfglayout mode, there do not have to be labels at the
5554 	     beginning of a block, or jumps at the end, so the previous
5555 	     conditions would not stop us when we reach bb boundary.  */
5556 	  || BLOCK_FOR_INSN (prev) != bb)
5557 	break;
5558 
5559       set = set_of (op0, prev);
5560 
5561       if (set
5562 	  && (GET_CODE (set) != SET
5563 	      || !rtx_equal_p (SET_DEST (set), op0)))
5564 	break;
5565 
5566       /* If this is setting OP0, get what it sets it to if it looks
5567 	 relevant.  */
5568       if (set)
5569 	{
5570 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5571 #ifdef FLOAT_STORE_FLAG_VALUE
5572 	  REAL_VALUE_TYPE fsfv;
5573 #endif
5574 
5575 	  /* ??? We may not combine comparisons done in a CCmode with
5576 	     comparisons not done in a CCmode.  This is to aid targets
5577 	     like Alpha that have an IEEE compliant EQ instruction, and
5578 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5579 	     actually artificial, simply to prevent the combination, but
5580 	     should not affect other platforms.
5581 
5582 	     However, we must allow VOIDmode comparisons to match either
5583 	     CCmode or non-CCmode comparison, because some ports have
5584 	     modeless comparisons inside branch patterns.
5585 
5586 	     ??? This mode check should perhaps look more like the mode check
5587 	     in simplify_comparison in combine.  */
5588 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5589 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5590 	      && mode != VOIDmode
5591 	      && inner_mode != VOIDmode)
5592 	    break;
5593 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5594 	      || (((code == NE
5595 		    || (code == LT
5596 			&& val_signbit_known_set_p (inner_mode,
5597 						    STORE_FLAG_VALUE))
5598 #ifdef FLOAT_STORE_FLAG_VALUE
5599 		    || (code == LT
5600 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5601 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5602 			    REAL_VALUE_NEGATIVE (fsfv)))
5603 #endif
5604 		    ))
5605 		  && COMPARISON_P (SET_SRC (set))))
5606 	    x = SET_SRC (set);
5607 	  else if (((code == EQ
5608 		     || (code == GE
5609 			 && val_signbit_known_set_p (inner_mode,
5610 						     STORE_FLAG_VALUE))
5611 #ifdef FLOAT_STORE_FLAG_VALUE
5612 		     || (code == GE
5613 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5614 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5615 			     REAL_VALUE_NEGATIVE (fsfv)))
5616 #endif
5617 		     ))
5618 		   && COMPARISON_P (SET_SRC (set)))
5619 	    {
5620 	      reverse_code = 1;
5621 	      x = SET_SRC (set);
5622 	    }
5623 	  else if ((code == EQ || code == NE)
5624 		   && GET_CODE (SET_SRC (set)) == XOR)
5625 	    /* Handle sequences like:
5626 
5627 	       (set op0 (xor X Y))
5628 	       ...(eq|ne op0 (const_int 0))...
5629 
5630 	       in which case:
5631 
5632 	       (eq op0 (const_int 0)) reduces to (eq X Y)
5633 	       (ne op0 (const_int 0)) reduces to (ne X Y)
5634 
5635 	       This is the form used by MIPS16, for example.  */
5636 	    x = SET_SRC (set);
5637 	  else
5638 	    break;
5639 	}
5640 
5641       else if (reg_set_p (op0, prev))
5642 	/* If this sets OP0, but not directly, we have to give up.  */
5643 	break;
5644 
5645       if (x)
5646 	{
5647 	  /* If the caller is expecting the condition to be valid at INSN,
5648 	     make sure X doesn't change before INSN.  */
5649 	  if (valid_at_insn_p)
5650 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5651 	      break;
5652 	  if (COMPARISON_P (x))
5653 	    code = GET_CODE (x);
5654 	  if (reverse_code)
5655 	    {
5656 	      code = reversed_comparison_code (x, prev);
5657 	      if (code == UNKNOWN)
5658 		return 0;
5659 	      reverse_code = 0;
5660 	    }
5661 
5662 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5663 	  if (earliest)
5664 	    *earliest = prev;
5665 	}
5666     }
5667 
5668   /* If constant is first, put it last.  */
5669   if (CONSTANT_P (op0))
5670     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5671 
5672   /* If OP0 is the result of a comparison, we weren't able to find what
5673      was really being compared, so fail.  */
5674   if (!allow_cc_mode
5675       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5676     return 0;
5677 
5678   /* Canonicalize any ordered comparison with integers involving equality
5679      if we can do computations in the relevant mode and we do not
5680      overflow.  */
5681 
5682   scalar_int_mode op0_mode;
5683   if (CONST_INT_P (op1)
5684       && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5685       && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5686     {
5687       HOST_WIDE_INT const_val = INTVAL (op1);
5688       unsigned HOST_WIDE_INT uconst_val = const_val;
5689       unsigned HOST_WIDE_INT max_val
5690 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5691 
5692       switch (code)
5693 	{
5694 	case LE:
5695 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5696 	    code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5697 	  break;
5698 
5699 	/* When cross-compiling, const_val might be sign-extended from
5700 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5701 	case GE:
5702 	  if ((const_val & max_val)
5703 	      != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5704 	    code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5705 	  break;
5706 
5707 	case LEU:
5708 	  if (uconst_val < max_val)
5709 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5710 	  break;
5711 
5712 	case GEU:
5713 	  if (uconst_val != 0)
5714 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5715 	  break;
5716 
5717 	default:
5718 	  break;
5719 	}
5720     }
5721 
5722   /* Never return CC0; return zero instead.  */
5723   if (CC0_P (op0))
5724     return 0;
5725 
5726   /* We promised to return a comparison.  */
5727   rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5728   if (COMPARISON_P (ret))
5729     return ret;
5730   return 0;
5731 }
5732 
5733 /* Given a jump insn JUMP, return the condition that will cause it to branch
5734    to its JUMP_LABEL.  If the condition cannot be understood, or is an
5735    inequality floating-point comparison which needs to be reversed, 0 will
5736    be returned.
5737 
5738    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5739    insn used in locating the condition was found.  If a replacement test
5740    of the condition is desired, it should be placed in front of that
5741    insn and we will be sure that the inputs are still valid.  If EARLIEST
5742    is null, the returned condition will be valid at INSN.
5743 
5744    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5745    compare CC mode register.
5746 
5747    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
5748 
5749 rtx
5750 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5751 	       int valid_at_insn_p)
5752 {
5753   rtx cond;
5754   int reverse;
5755   rtx set;
5756 
5757   /* If this is not a standard conditional jump, we can't parse it.  */
5758   if (!JUMP_P (jump)
5759       || ! any_condjump_p (jump))
5760     return 0;
5761   set = pc_set (jump);
5762 
5763   cond = XEXP (SET_SRC (set), 0);
5764 
5765   /* If this branches to JUMP_LABEL when the condition is false, reverse
5766      the condition.  */
5767   reverse
5768     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5769       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5770 
5771   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5772 				 allow_cc_mode, valid_at_insn_p);
5773 }
5774 
5775 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5776    TARGET_MODE_REP_EXTENDED.
5777 
5778    Note that we assume that the property of
5779    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5780    narrower than mode B.  I.e., if A is a mode narrower than B then in
5781    order to be able to operate on it in mode B, mode A needs to
5782    satisfy the requirements set by the representation of mode B.  */
5783 
5784 static void
5785 init_num_sign_bit_copies_in_rep (void)
5786 {
5787   opt_scalar_int_mode in_mode_iter;
5788   scalar_int_mode mode;
5789 
5790   FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5791     FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5792       {
5793 	scalar_int_mode in_mode = in_mode_iter.require ();
5794 	scalar_int_mode i;
5795 
5796 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5797 	   extends to the next widest mode.  */
5798 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5799 		    || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5800 
5801 	/* We are in in_mode.  Count how many bits outside of mode
5802 	   have to be copies of the sign-bit.  */
5803 	FOR_EACH_MODE (i, mode, in_mode)
5804 	  {
5805 	    /* This must always exist (for the last iteration it will be
5806 	       IN_MODE).  */
5807 	    scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5808 
5809 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5810 		/* We can only check sign-bit copies starting from the
5811 		   top-bit.  In order to be able to check the bits we
5812 		   have already seen we pretend that subsequent bits
5813 		   have to be sign-bit copies too.  */
5814 		|| num_sign_bit_copies_in_rep [in_mode][mode])
5815 	      num_sign_bit_copies_in_rep [in_mode][mode]
5816 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5817 	  }
5818       }
5819 }
5820 
5821 /* Suppose that truncation from the machine mode of X to MODE is not a
5822    no-op.  See if there is anything special about X so that we can
5823    assume it already contains a truncated value of MODE.  */
5824 
5825 bool
5826 truncated_to_mode (machine_mode mode, const_rtx x)
5827 {
5828   /* This register has already been used in MODE without explicit
5829      truncation.  */
5830   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5831     return true;
5832 
5833   /* See if we already satisfy the requirements of MODE.  If yes we
5834      can just switch to MODE.  */
5835   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5836       && (num_sign_bit_copies (x, GET_MODE (x))
5837 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5838     return true;
5839 
5840   return false;
5841 }
5842 
5843 /* Return true if RTX code CODE has a single sequence of zero or more
5844    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
5845    entry in that case.  */
5846 
5847 static bool
5848 setup_reg_subrtx_bounds (unsigned int code)
5849 {
5850   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5851   unsigned int i = 0;
5852   for (; format[i] != 'e'; ++i)
5853     {
5854       if (!format[i])
5855 	/* No subrtxes.  Leave start and count as 0.  */
5856 	return true;
5857       if (format[i] == 'E' || format[i] == 'V')
5858 	return false;
5859     }
5860 
5861   /* Record the sequence of 'e's.  */
5862   rtx_all_subrtx_bounds[code].start = i;
5863   do
5864     ++i;
5865   while (format[i] == 'e');
5866   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5867   /* rtl-iter.h relies on this.  */
5868   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5869 
5870   for (; format[i]; ++i)
5871     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5872       return false;
5873 
5874   return true;
5875 }
5876 
5877 /* Initialize rtx_all_subrtx_bounds.  */
5878 void
5879 init_rtlanal (void)
5880 {
5881   int i;
5882   for (i = 0; i < NUM_RTX_CODE; i++)
5883     {
5884       if (!setup_reg_subrtx_bounds (i))
5885 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5886       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5887 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5888     }
5889 
5890   init_num_sign_bit_copies_in_rep ();
5891 }
5892 
5893 /* Check whether this is a constant pool constant.  */
5894 bool
5895 constant_pool_constant_p (rtx x)
5896 {
5897   x = avoid_constant_pool_reference (x);
5898   return CONST_DOUBLE_P (x);
5899 }
5900 
5901 /* If M is a bitmask that selects a field of low-order bits within an item but
5902    not the entire word, return the length of the field.  Return -1 otherwise.
5903    M is used in machine mode MODE.  */
5904 
5905 int
5906 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5907 {
5908   if (mode != VOIDmode)
5909     {
5910       if (!HWI_COMPUTABLE_MODE_P (mode))
5911 	return -1;
5912       m &= GET_MODE_MASK (mode);
5913     }
5914 
5915   return exact_log2 (m + 1);
5916 }
5917 
5918 /* Return the mode of MEM's address.  */
5919 
5920 scalar_int_mode
5921 get_address_mode (rtx mem)
5922 {
5923   machine_mode mode;
5924 
5925   gcc_assert (MEM_P (mem));
5926   mode = GET_MODE (XEXP (mem, 0));
5927   if (mode != VOIDmode)
5928     return as_a <scalar_int_mode> (mode);
5929   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5930 }
5931 
5932 /* Split up a CONST_DOUBLE or integer constant rtx
5933    into two rtx's for single words,
5934    storing in *FIRST the word that comes first in memory in the target
5935    and in *SECOND the other.
5936 
5937    TODO: This function needs to be rewritten to work on any size
5938    integer.  */
5939 
5940 void
5941 split_double (rtx value, rtx *first, rtx *second)
5942 {
5943   if (CONST_INT_P (value))
5944     {
5945       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5946 	{
5947 	  /* In this case the CONST_INT holds both target words.
5948 	     Extract the bits from it into two word-sized pieces.
5949 	     Sign extend each half to HOST_WIDE_INT.  */
5950 	  unsigned HOST_WIDE_INT low, high;
5951 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5952 	  unsigned bits_per_word = BITS_PER_WORD;
5953 
5954 	  /* Set sign_bit to the most significant bit of a word.  */
5955 	  sign_bit = 1;
5956 	  sign_bit <<= bits_per_word - 1;
5957 
5958 	  /* Set mask so that all bits of the word are set.  We could
5959 	     have used 1 << BITS_PER_WORD instead of basing the
5960 	     calculation on sign_bit.  However, on machines where
5961 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5962 	     compiler warning, even though the code would never be
5963 	     executed.  */
5964 	  mask = sign_bit << 1;
5965 	  mask--;
5966 
5967 	  /* Set sign_extend as any remaining bits.  */
5968 	  sign_extend = ~mask;
5969 
5970 	  /* Pick the lower word and sign-extend it.  */
5971 	  low = INTVAL (value);
5972 	  low &= mask;
5973 	  if (low & sign_bit)
5974 	    low |= sign_extend;
5975 
5976 	  /* Pick the higher word, shifted to the least significant
5977 	     bits, and sign-extend it.  */
5978 	  high = INTVAL (value);
5979 	  high >>= bits_per_word - 1;
5980 	  high >>= 1;
5981 	  high &= mask;
5982 	  if (high & sign_bit)
5983 	    high |= sign_extend;
5984 
5985 	  /* Store the words in the target machine order.  */
5986 	  if (WORDS_BIG_ENDIAN)
5987 	    {
5988 	      *first = GEN_INT (high);
5989 	      *second = GEN_INT (low);
5990 	    }
5991 	  else
5992 	    {
5993 	      *first = GEN_INT (low);
5994 	      *second = GEN_INT (high);
5995 	    }
5996 	}
5997       else
5998 	{
5999 	  /* The rule for using CONST_INT for a wider mode
6000 	     is that we regard the value as signed.
6001 	     So sign-extend it.  */
6002 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6003 	  if (WORDS_BIG_ENDIAN)
6004 	    {
6005 	      *first = high;
6006 	      *second = value;
6007 	    }
6008 	  else
6009 	    {
6010 	      *first = value;
6011 	      *second = high;
6012 	    }
6013 	}
6014     }
6015   else if (GET_CODE (value) == CONST_WIDE_INT)
6016     {
6017       /* All of this is scary code and needs to be converted to
6018 	 properly work with any size integer.  */
6019       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6020       if (WORDS_BIG_ENDIAN)
6021 	{
6022 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6023 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6024 	}
6025       else
6026 	{
6027 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6028 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6029 	}
6030     }
6031   else if (!CONST_DOUBLE_P (value))
6032     {
6033       if (WORDS_BIG_ENDIAN)
6034 	{
6035 	  *first = const0_rtx;
6036 	  *second = value;
6037 	}
6038       else
6039 	{
6040 	  *first = value;
6041 	  *second = const0_rtx;
6042 	}
6043     }
6044   else if (GET_MODE (value) == VOIDmode
6045 	   /* This is the old way we did CONST_DOUBLE integers.  */
6046 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6047     {
6048       /* In an integer, the words are defined as most and least significant.
6049 	 So order them by the target's convention.  */
6050       if (WORDS_BIG_ENDIAN)
6051 	{
6052 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6053 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
6054 	}
6055       else
6056 	{
6057 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
6058 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6059 	}
6060     }
6061   else
6062     {
6063       long l[2];
6064 
6065       /* Note, this converts the REAL_VALUE_TYPE to the target's
6066 	 format, splits up the floating point double and outputs
6067 	 exactly 32 bits of it into each of l[0] and l[1] --
6068 	 not necessarily BITS_PER_WORD bits.  */
6069       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6070 
6071       /* If 32 bits is an entire word for the target, but not for the host,
6072 	 then sign-extend on the host so that the number will look the same
6073 	 way on the host that it would on the target.  See for instance
6074 	 simplify_unary_operation.  The #if is needed to avoid compiler
6075 	 warnings.  */
6076 
6077 #if HOST_BITS_PER_LONG > 32
6078       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6079 	{
6080 	  if (l[0] & ((long) 1 << 31))
6081 	    l[0] |= ((unsigned long) (-1) << 32);
6082 	  if (l[1] & ((long) 1 << 31))
6083 	    l[1] |= ((unsigned long) (-1) << 32);
6084 	}
6085 #endif
6086 
6087       *first = GEN_INT (l[0]);
6088       *second = GEN_INT (l[1]);
6089     }
6090 }
6091 
6092 /* Return true if X is a sign_extract or zero_extract from the least
6093    significant bit.  */
6094 
6095 static bool
6096 lsb_bitfield_op_p (rtx x)
6097 {
6098   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6099     {
6100       machine_mode mode = GET_MODE (XEXP (x, 0));
6101       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6102       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6103       poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6104 
6105       return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6106     }
6107   return false;
6108 }
6109 
6110 /* Strip outer address "mutations" from LOC and return a pointer to the
6111    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
6112    stripped expression there.
6113 
6114    "Mutations" either convert between modes or apply some kind of
6115    extension, truncation or alignment.  */
6116 
6117 rtx *
6118 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6119 {
6120   for (;;)
6121     {
6122       enum rtx_code code = GET_CODE (*loc);
6123       if (GET_RTX_CLASS (code) == RTX_UNARY)
6124 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6125 	   used to convert between pointer sizes.  */
6126 	loc = &XEXP (*loc, 0);
6127       else if (lsb_bitfield_op_p (*loc))
6128 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6129 	   acts as a combined truncation and extension.  */
6130 	loc = &XEXP (*loc, 0);
6131       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6132 	/* (and ... (const_int -X)) is used to align to X bytes.  */
6133 	loc = &XEXP (*loc, 0);
6134       else if (code == SUBREG
6135                && !OBJECT_P (SUBREG_REG (*loc))
6136                && subreg_lowpart_p (*loc))
6137 	/* (subreg (operator ...) ...) inside and is used for mode
6138 	   conversion too.  */
6139 	loc = &SUBREG_REG (*loc);
6140       else
6141 	return loc;
6142       if (outer_code)
6143 	*outer_code = code;
6144     }
6145 }
6146 
6147 /* Return true if CODE applies some kind of scale.  The scaled value is
6148    is the first operand and the scale is the second.  */
6149 
6150 static bool
6151 binary_scale_code_p (enum rtx_code code)
6152 {
6153   return (code == MULT
6154           || code == ASHIFT
6155           /* Needed by ARM targets.  */
6156           || code == ASHIFTRT
6157           || code == LSHIFTRT
6158           || code == ROTATE
6159           || code == ROTATERT);
6160 }
6161 
6162 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6163    (see address_info).  Return null otherwise.  */
6164 
6165 static rtx *
6166 get_base_term (rtx *inner)
6167 {
6168   if (GET_CODE (*inner) == LO_SUM)
6169     inner = strip_address_mutations (&XEXP (*inner, 0));
6170   if (REG_P (*inner)
6171       || MEM_P (*inner)
6172       || GET_CODE (*inner) == SUBREG
6173       || GET_CODE (*inner) == SCRATCH)
6174     return inner;
6175   return 0;
6176 }
6177 
6178 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6179    (see address_info).  Return null otherwise.  */
6180 
6181 static rtx *
6182 get_index_term (rtx *inner)
6183 {
6184   /* At present, only constant scales are allowed.  */
6185   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6186     inner = strip_address_mutations (&XEXP (*inner, 0));
6187   if (REG_P (*inner)
6188       || MEM_P (*inner)
6189       || GET_CODE (*inner) == SUBREG
6190       || GET_CODE (*inner) == SCRATCH)
6191     return inner;
6192   return 0;
6193 }
6194 
6195 /* Set the segment part of address INFO to LOC, given that INNER is the
6196    unmutated value.  */
6197 
6198 static void
6199 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6200 {
6201   gcc_assert (!info->segment);
6202   info->segment = loc;
6203   info->segment_term = inner;
6204 }
6205 
6206 /* Set the base part of address INFO to LOC, given that INNER is the
6207    unmutated value.  */
6208 
6209 static void
6210 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6211 {
6212   gcc_assert (!info->base);
6213   info->base = loc;
6214   info->base_term = inner;
6215 }
6216 
6217 /* Set the index part of address INFO to LOC, given that INNER is the
6218    unmutated value.  */
6219 
6220 static void
6221 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6222 {
6223   gcc_assert (!info->index);
6224   info->index = loc;
6225   info->index_term = inner;
6226 }
6227 
6228 /* Set the displacement part of address INFO to LOC, given that INNER
6229    is the constant term.  */
6230 
6231 static void
6232 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6233 {
6234   gcc_assert (!info->disp);
6235   info->disp = loc;
6236   info->disp_term = inner;
6237 }
6238 
6239 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6240    rest of INFO accordingly.  */
6241 
6242 static void
6243 decompose_incdec_address (struct address_info *info)
6244 {
6245   info->autoinc_p = true;
6246 
6247   rtx *base = &XEXP (*info->inner, 0);
6248   set_address_base (info, base, base);
6249   gcc_checking_assert (info->base == info->base_term);
6250 
6251   /* These addresses are only valid when the size of the addressed
6252      value is known.  */
6253   gcc_checking_assert (info->mode != VOIDmode);
6254 }
6255 
6256 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6257    of INFO accordingly.  */
6258 
6259 static void
6260 decompose_automod_address (struct address_info *info)
6261 {
6262   info->autoinc_p = true;
6263 
6264   rtx *base = &XEXP (*info->inner, 0);
6265   set_address_base (info, base, base);
6266   gcc_checking_assert (info->base == info->base_term);
6267 
6268   rtx plus = XEXP (*info->inner, 1);
6269   gcc_assert (GET_CODE (plus) == PLUS);
6270 
6271   info->base_term2 = &XEXP (plus, 0);
6272   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6273 
6274   rtx *step = &XEXP (plus, 1);
6275   rtx *inner_step = strip_address_mutations (step);
6276   if (CONSTANT_P (*inner_step))
6277     set_address_disp (info, step, inner_step);
6278   else
6279     set_address_index (info, step, inner_step);
6280 }
6281 
6282 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6283    values in [PTR, END).  Return a pointer to the end of the used array.  */
6284 
6285 static rtx **
6286 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6287 {
6288   rtx x = *loc;
6289   if (GET_CODE (x) == PLUS)
6290     {
6291       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6292       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6293     }
6294   else
6295     {
6296       gcc_assert (ptr != end);
6297       *ptr++ = loc;
6298     }
6299   return ptr;
6300 }
6301 
6302 /* Evaluate the likelihood of X being a base or index value, returning
6303    positive if it is likely to be a base, negative if it is likely to be
6304    an index, and 0 if we can't tell.  Make the magnitude of the return
6305    value reflect the amount of confidence we have in the answer.
6306 
6307    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6308 
6309 static int
6310 baseness (rtx x, machine_mode mode, addr_space_t as,
6311 	  enum rtx_code outer_code, enum rtx_code index_code)
6312 {
6313   /* Believe *_POINTER unless the address shape requires otherwise.  */
6314   if (REG_P (x) && REG_POINTER (x))
6315     return 2;
6316   if (MEM_P (x) && MEM_POINTER (x))
6317     return 2;
6318 
6319   if (REG_P (x) && HARD_REGISTER_P (x))
6320     {
6321       /* X is a hard register.  If it only fits one of the base
6322 	 or index classes, choose that interpretation.  */
6323       int regno = REGNO (x);
6324       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6325       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6326       if (base_p != index_p)
6327 	return base_p ? 1 : -1;
6328     }
6329   return 0;
6330 }
6331 
6332 /* INFO->INNER describes a normal, non-automodified address.
6333    Fill in the rest of INFO accordingly.  */
6334 
6335 static void
6336 decompose_normal_address (struct address_info *info)
6337 {
6338   /* Treat the address as the sum of up to four values.  */
6339   rtx *ops[4];
6340   size_t n_ops = extract_plus_operands (info->inner, ops,
6341 					ops + ARRAY_SIZE (ops)) - ops;
6342 
6343   /* If there is more than one component, any base component is in a PLUS.  */
6344   if (n_ops > 1)
6345     info->base_outer_code = PLUS;
6346 
6347   /* Try to classify each sum operand now.  Leave those that could be
6348      either a base or an index in OPS.  */
6349   rtx *inner_ops[4];
6350   size_t out = 0;
6351   for (size_t in = 0; in < n_ops; ++in)
6352     {
6353       rtx *loc = ops[in];
6354       rtx *inner = strip_address_mutations (loc);
6355       if (CONSTANT_P (*inner))
6356 	set_address_disp (info, loc, inner);
6357       else if (GET_CODE (*inner) == UNSPEC)
6358 	set_address_segment (info, loc, inner);
6359       else
6360 	{
6361 	  /* The only other possibilities are a base or an index.  */
6362 	  rtx *base_term = get_base_term (inner);
6363 	  rtx *index_term = get_index_term (inner);
6364 	  gcc_assert (base_term || index_term);
6365 	  if (!base_term)
6366 	    set_address_index (info, loc, index_term);
6367 	  else if (!index_term)
6368 	    set_address_base (info, loc, base_term);
6369 	  else
6370 	    {
6371 	      gcc_assert (base_term == index_term);
6372 	      ops[out] = loc;
6373 	      inner_ops[out] = base_term;
6374 	      ++out;
6375 	    }
6376 	}
6377     }
6378 
6379   /* Classify the remaining OPS members as bases and indexes.  */
6380   if (out == 1)
6381     {
6382       /* If we haven't seen a base or an index yet, assume that this is
6383 	 the base.  If we were confident that another term was the base
6384 	 or index, treat the remaining operand as the other kind.  */
6385       if (!info->base)
6386 	set_address_base (info, ops[0], inner_ops[0]);
6387       else
6388 	set_address_index (info, ops[0], inner_ops[0]);
6389     }
6390   else if (out == 2)
6391     {
6392       /* In the event of a tie, assume the base comes first.  */
6393       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6394 		    GET_CODE (*ops[1]))
6395 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6396 		       GET_CODE (*ops[0])))
6397 	{
6398 	  set_address_base (info, ops[0], inner_ops[0]);
6399 	  set_address_index (info, ops[1], inner_ops[1]);
6400 	}
6401       else
6402 	{
6403 	  set_address_base (info, ops[1], inner_ops[1]);
6404 	  set_address_index (info, ops[0], inner_ops[0]);
6405 	}
6406     }
6407   else
6408     gcc_assert (out == 0);
6409 }
6410 
6411 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6412    or VOIDmode if not known.  AS is the address space associated with LOC.
6413    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6414 
6415 void
6416 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6417 		   addr_space_t as, enum rtx_code outer_code)
6418 {
6419   memset (info, 0, sizeof (*info));
6420   info->mode = mode;
6421   info->as = as;
6422   info->addr_outer_code = outer_code;
6423   info->outer = loc;
6424   info->inner = strip_address_mutations (loc, &outer_code);
6425   info->base_outer_code = outer_code;
6426   switch (GET_CODE (*info->inner))
6427     {
6428     case PRE_DEC:
6429     case PRE_INC:
6430     case POST_DEC:
6431     case POST_INC:
6432       decompose_incdec_address (info);
6433       break;
6434 
6435     case PRE_MODIFY:
6436     case POST_MODIFY:
6437       decompose_automod_address (info);
6438       break;
6439 
6440     default:
6441       decompose_normal_address (info);
6442       break;
6443     }
6444 }
6445 
6446 /* Describe address operand LOC in INFO.  */
6447 
6448 void
6449 decompose_lea_address (struct address_info *info, rtx *loc)
6450 {
6451   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6452 }
6453 
6454 /* Describe the address of MEM X in INFO.  */
6455 
6456 void
6457 decompose_mem_address (struct address_info *info, rtx x)
6458 {
6459   gcc_assert (MEM_P (x));
6460   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6461 		     MEM_ADDR_SPACE (x), MEM);
6462 }
6463 
6464 /* Update INFO after a change to the address it describes.  */
6465 
6466 void
6467 update_address (struct address_info *info)
6468 {
6469   decompose_address (info, info->outer, info->mode, info->as,
6470 		     info->addr_outer_code);
6471 }
6472 
6473 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6474    more complicated than that.  */
6475 
6476 HOST_WIDE_INT
6477 get_index_scale (const struct address_info *info)
6478 {
6479   rtx index = *info->index;
6480   if (GET_CODE (index) == MULT
6481       && CONST_INT_P (XEXP (index, 1))
6482       && info->index_term == &XEXP (index, 0))
6483     return INTVAL (XEXP (index, 1));
6484 
6485   if (GET_CODE (index) == ASHIFT
6486       && CONST_INT_P (XEXP (index, 1))
6487       && info->index_term == &XEXP (index, 0))
6488     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6489 
6490   if (info->index == info->index_term)
6491     return 1;
6492 
6493   return 0;
6494 }
6495 
6496 /* Return the "index code" of INFO, in the form required by
6497    ok_for_base_p_1.  */
6498 
6499 enum rtx_code
6500 get_index_code (const struct address_info *info)
6501 {
6502   if (info->index)
6503     return GET_CODE (*info->index);
6504 
6505   if (info->disp)
6506     return GET_CODE (*info->disp);
6507 
6508   return SCRATCH;
6509 }
6510 
6511 /* Return true if RTL X contains a SYMBOL_REF.  */
6512 
6513 bool
6514 contains_symbol_ref_p (const_rtx x)
6515 {
6516   subrtx_iterator::array_type array;
6517   FOR_EACH_SUBRTX (iter, array, x, ALL)
6518     if (SYMBOL_REF_P (*iter))
6519       return true;
6520 
6521   return false;
6522 }
6523 
6524 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6525 
6526 bool
6527 contains_symbolic_reference_p (const_rtx x)
6528 {
6529   subrtx_iterator::array_type array;
6530   FOR_EACH_SUBRTX (iter, array, x, ALL)
6531     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6532       return true;
6533 
6534   return false;
6535 }
6536 
6537 /* Return true if X contains a thread-local symbol.  */
6538 
6539 bool
6540 tls_referenced_p (const_rtx x)
6541 {
6542   if (!targetm.have_tls)
6543     return false;
6544 
6545   subrtx_iterator::array_type array;
6546   FOR_EACH_SUBRTX (iter, array, x, ALL)
6547     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6548       return true;
6549   return false;
6550 }
6551