1 /* Analyze RTL for GNU compiler.
2    Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h"  /* FIXME: Can go away once crtl is moved to rtl.h.  */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38 #include "hard-reg-set.h"
39 
40 /* Forward declarations */
41 static void set_of_1 (rtx, const_rtx, void *);
42 static bool covers_regno_p (const_rtx, unsigned int);
43 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
44 static int computed_jump_p_1 (const_rtx);
45 static void parms_set (rtx, const_rtx, void *);
46 
47 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
48                                                    const_rtx, machine_mode,
49                                                    unsigned HOST_WIDE_INT);
50 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
51 					     const_rtx, machine_mode,
52                                              unsigned HOST_WIDE_INT);
53 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
54 						const_rtx, machine_mode,
55                                                 unsigned int);
56 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
57 					  const_rtx, machine_mode,
58 					  unsigned int);
59 
60 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
61 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
62 
63 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
64    If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
65    SIGN_EXTEND then while narrowing we also have to enforce the
66    representation and sign-extend the value to mode DESTINATION_REP.
67 
68    If the value is already sign-extended to DESTINATION_REP mode we
69    can just switch to DESTINATION mode on it.  For each pair of
70    integral modes SOURCE and DESTINATION, when truncating from SOURCE
71    to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
72    contains the number of high-order bits in SOURCE that have to be
73    copies of the sign-bit so that we can do this mode-switch to
74    DESTINATION.  */
75 
76 static unsigned int
77 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
78 
79 /* Store X into index I of ARRAY.  ARRAY is known to have at least I
80    elements.  Return the new base of ARRAY.  */
81 
82 template <typename T>
83 typename T::value_type *
add_single_to_queue(array_type & array,value_type * base,size_t i,value_type x)84 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
85 						  value_type *base,
86 						  size_t i, value_type x)
87 {
88   if (base == array.stack)
89     {
90       if (i < LOCAL_ELEMS)
91 	{
92 	  base[i] = x;
93 	  return base;
94 	}
95       gcc_checking_assert (i == LOCAL_ELEMS);
96       /* A previous iteration might also have moved from the stack to the
97 	 heap, in which case the heap array will already be big enough.  */
98       if (vec_safe_length (array.heap) <= i)
99 	vec_safe_grow (array.heap, i + 1);
100       base = array.heap->address ();
101       memcpy (base, array.stack, sizeof (array.stack));
102       base[LOCAL_ELEMS] = x;
103       return base;
104     }
105   unsigned int length = array.heap->length ();
106   if (length > i)
107     {
108       gcc_checking_assert (base == array.heap->address ());
109       base[i] = x;
110       return base;
111     }
112   else
113     {
114       gcc_checking_assert (i == length);
115       vec_safe_push (array.heap, x);
116       return array.heap->address ();
117     }
118 }
119 
120 /* Add the subrtxes of X to worklist ARRAY, starting at END.  Return the
121    number of elements added to the worklist.  */
122 
123 template <typename T>
124 size_t
add_subrtxes_to_queue(array_type & array,value_type * base,size_t end,rtx_type x)125 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
126 						    value_type *base,
127 						    size_t end, rtx_type x)
128 {
129   enum rtx_code code = GET_CODE (x);
130   const char *format = GET_RTX_FORMAT (code);
131   size_t orig_end = end;
132   if (__builtin_expect (INSN_P (x), false))
133     {
134       /* Put the pattern at the top of the queue, since that's what
135 	 we're likely to want most.  It also allows for the SEQUENCE
136 	 code below.  */
137       for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
138 	if (format[i] == 'e')
139 	  {
140 	    value_type subx = T::get_value (x->u.fld[i].rt_rtx);
141 	    if (__builtin_expect (end < LOCAL_ELEMS, true))
142 	      base[end++] = subx;
143 	    else
144 	      base = add_single_to_queue (array, base, end++, subx);
145 	  }
146     }
147   else
148     for (int i = 0; format[i]; ++i)
149       if (format[i] == 'e')
150 	{
151 	  value_type subx = T::get_value (x->u.fld[i].rt_rtx);
152 	  if (__builtin_expect (end < LOCAL_ELEMS, true))
153 	    base[end++] = subx;
154 	  else
155 	    base = add_single_to_queue (array, base, end++, subx);
156 	}
157       else if (format[i] == 'E')
158 	{
159 	  unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
160 	  rtx *vec = x->u.fld[i].rt_rtvec->elem;
161 	  if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
162 	    for (unsigned int j = 0; j < length; j++)
163 	      base[end++] = T::get_value (vec[j]);
164 	  else
165 	    for (unsigned int j = 0; j < length; j++)
166 	      base = add_single_to_queue (array, base, end++,
167 					  T::get_value (vec[j]));
168 	  if (code == SEQUENCE && end == length)
169 	    /* If the subrtxes of the sequence fill the entire array then
170 	       we know that no other parts of a containing insn are queued.
171 	       The caller is therefore iterating over the sequence as a
172 	       PATTERN (...), so we also want the patterns of the
173 	       subinstructions.  */
174 	    for (unsigned int j = 0; j < length; j++)
175 	      {
176 		typename T::rtx_type x = T::get_rtx (base[j]);
177 		if (INSN_P (x))
178 		  base[j] = T::get_value (PATTERN (x));
179 	      }
180 	}
181   return end - orig_end;
182 }
183 
184 template <typename T>
185 void
free_array(array_type & array)186 generic_subrtx_iterator <T>::free_array (array_type &array)
187 {
188   vec_free (array.heap);
189 }
190 
191 template <typename T>
192 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
193 
194 template class generic_subrtx_iterator <const_rtx_accessor>;
195 template class generic_subrtx_iterator <rtx_var_accessor>;
196 template class generic_subrtx_iterator <rtx_ptr_accessor>;
197 
198 /* Return 1 if the value of X is unstable
199    (would be different at a different point in the program).
200    The frame pointer, arg pointer, etc. are considered stable
201    (within one function) and so is anything marked `unchanging'.  */
202 
203 int
rtx_unstable_p(const_rtx x)204 rtx_unstable_p (const_rtx x)
205 {
206   const RTX_CODE code = GET_CODE (x);
207   int i;
208   const char *fmt;
209 
210   switch (code)
211     {
212     case MEM:
213       return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
214 
215     case CONST:
216     CASE_CONST_ANY:
217     case SYMBOL_REF:
218     case LABEL_REF:
219       return 0;
220 
221     case REG:
222       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
223       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
224 	  /* The arg pointer varies if it is not a fixed register.  */
225 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
226 	return 0;
227       /* ??? When call-clobbered, the value is stable modulo the restore
228 	 that must happen after a call.  This currently screws up local-alloc
229 	 into believing that the restore is not needed.  */
230       if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
231 	return 0;
232       return 1;
233 
234     case ASM_OPERANDS:
235       if (MEM_VOLATILE_P (x))
236 	return 1;
237 
238       /* Fall through.  */
239 
240     default:
241       break;
242     }
243 
244   fmt = GET_RTX_FORMAT (code);
245   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
246     if (fmt[i] == 'e')
247       {
248 	if (rtx_unstable_p (XEXP (x, i)))
249 	  return 1;
250       }
251     else if (fmt[i] == 'E')
252       {
253 	int j;
254 	for (j = 0; j < XVECLEN (x, i); j++)
255 	  if (rtx_unstable_p (XVECEXP (x, i, j)))
256 	    return 1;
257       }
258 
259   return 0;
260 }
261 
262 /* Return 1 if X has a value that can vary even between two
263    executions of the program.  0 means X can be compared reliably
264    against certain constants or near-constants.
265    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
266    zero, we are slightly more conservative.
267    The frame pointer and the arg pointer are considered constant.  */
268 
269 bool
rtx_varies_p(const_rtx x,bool for_alias)270 rtx_varies_p (const_rtx x, bool for_alias)
271 {
272   RTX_CODE code;
273   int i;
274   const char *fmt;
275 
276   if (!x)
277     return 0;
278 
279   code = GET_CODE (x);
280   switch (code)
281     {
282     case MEM:
283       return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
284 
285     case CONST:
286     CASE_CONST_ANY:
287     case SYMBOL_REF:
288     case LABEL_REF:
289       return 0;
290 
291     case REG:
292       /* Note that we have to test for the actual rtx used for the frame
293 	 and arg pointers and not just the register number in case we have
294 	 eliminated the frame and/or arg pointer and are using it
295 	 for pseudos.  */
296       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
297 	  /* The arg pointer varies if it is not a fixed register.  */
298 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
299 	return 0;
300       if (x == pic_offset_table_rtx
301 	  /* ??? When call-clobbered, the value is stable modulo the restore
302 	     that must happen after a call.  This currently screws up
303 	     local-alloc into believing that the restore is not needed, so we
304 	     must return 0 only if we are called from alias analysis.  */
305 	  && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
306 	return 0;
307       return 1;
308 
309     case LO_SUM:
310       /* The operand 0 of a LO_SUM is considered constant
311 	 (in fact it is related specifically to operand 1)
312 	 during alias analysis.  */
313       return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
314 	     || rtx_varies_p (XEXP (x, 1), for_alias);
315 
316     case ASM_OPERANDS:
317       if (MEM_VOLATILE_P (x))
318 	return 1;
319 
320       /* Fall through.  */
321 
322     default:
323       break;
324     }
325 
326   fmt = GET_RTX_FORMAT (code);
327   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
328     if (fmt[i] == 'e')
329       {
330 	if (rtx_varies_p (XEXP (x, i), for_alias))
331 	  return 1;
332       }
333     else if (fmt[i] == 'E')
334       {
335 	int j;
336 	for (j = 0; j < XVECLEN (x, i); j++)
337 	  if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
338 	    return 1;
339       }
340 
341   return 0;
342 }
343 
344 /* Compute an approximation for the offset between the register
345    FROM and TO for the current function, as it was at the start
346    of the routine.  */
347 
348 static poly_int64
get_initial_register_offset(int from,int to)349 get_initial_register_offset (int from, int to)
350 {
351   static const struct elim_table_t
352   {
353     const int from;
354     const int to;
355   } table[] = ELIMINABLE_REGS;
356   poly_int64 offset1, offset2;
357   unsigned int i, j;
358 
359   if (to == from)
360     return 0;
361 
362   /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
363      is completed, but we need to give at least an estimate for the stack
364      pointer based on the frame size.  */
365   if (!epilogue_completed)
366     {
367       offset1 = crtl->outgoing_args_size + get_frame_size ();
368 #if !STACK_GROWS_DOWNWARD
369       offset1 = - offset1;
370 #endif
371       if (to == STACK_POINTER_REGNUM)
372 	return offset1;
373       else if (from == STACK_POINTER_REGNUM)
374 	return - offset1;
375       else
376 	return 0;
377      }
378 
379   for (i = 0; i < ARRAY_SIZE (table); i++)
380       if (table[i].from == from)
381 	{
382 	  if (table[i].to == to)
383 	    {
384 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
385 					  offset1);
386 	      return offset1;
387 	    }
388 	  for (j = 0; j < ARRAY_SIZE (table); j++)
389 	    {
390 	      if (table[j].to == to
391 		  && table[j].from == table[i].to)
392 		{
393 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
394 					      offset1);
395 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
396 					      offset2);
397 		  return offset1 + offset2;
398 		}
399 	      if (table[j].from == to
400 		  && table[j].to == table[i].to)
401 		{
402 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
403 					      offset1);
404 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
405 					      offset2);
406 		  return offset1 - offset2;
407 		}
408 	    }
409 	}
410       else if (table[i].to == from)
411 	{
412 	  if (table[i].from == to)
413 	    {
414 	      INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
415 					  offset1);
416 	      return - offset1;
417 	    }
418 	  for (j = 0; j < ARRAY_SIZE (table); j++)
419 	    {
420 	      if (table[j].to == to
421 		  && table[j].from == table[i].from)
422 		{
423 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
424 					      offset1);
425 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
426 					      offset2);
427 		  return - offset1 + offset2;
428 		}
429 	      if (table[j].from == to
430 		  && table[j].to == table[i].from)
431 		{
432 		  INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
433 					      offset1);
434 		  INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
435 					      offset2);
436 		  return - offset1 - offset2;
437 		}
438 	    }
439 	}
440 
441   /* If the requested register combination was not found,
442      try a different more simple combination.  */
443   if (from == ARG_POINTER_REGNUM)
444     return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
445   else if (to == ARG_POINTER_REGNUM)
446     return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
447   else if (from == HARD_FRAME_POINTER_REGNUM)
448     return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
449   else if (to == HARD_FRAME_POINTER_REGNUM)
450     return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
451   else
452     return 0;
453 }
454 
455 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
456    bytes can cause a trap.  MODE is the mode of the MEM (not that of X) and
457    UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
458    references on strict alignment machines.  */
459 
460 static int
rtx_addr_can_trap_p_1(const_rtx x,poly_int64 offset,poly_int64 size,machine_mode mode,bool unaligned_mems)461 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
462 		       machine_mode mode, bool unaligned_mems)
463 {
464   enum rtx_code code = GET_CODE (x);
465   gcc_checking_assert (mode == BLKmode || known_size_p (size));
466   poly_int64 const_x1;
467 
468   /* The offset must be a multiple of the mode size if we are considering
469      unaligned memory references on strict alignment machines.  */
470   if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
471     {
472       poly_int64 actual_offset = offset;
473 
474 #ifdef SPARC_STACK_BOUNDARY_HACK
475       /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
476 	     the real alignment of %sp.  However, when it does this, the
477 	     alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY.  */
478       if (SPARC_STACK_BOUNDARY_HACK
479 	  && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
480 	actual_offset -= STACK_POINTER_OFFSET;
481 #endif
482 
483       if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
484 	return 1;
485     }
486 
487   switch (code)
488     {
489     case SYMBOL_REF:
490       if (SYMBOL_REF_WEAK (x))
491 	return 1;
492       if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
493 	{
494 	  tree decl;
495 	  poly_int64 decl_size;
496 
497 	  if (maybe_lt (offset, 0))
498 	    return 1;
499 	  if (!known_size_p (size))
500 	    return maybe_ne (offset, 0);
501 
502 	  /* If the size of the access or of the symbol is unknown,
503 	     assume the worst.  */
504 	  decl = SYMBOL_REF_DECL (x);
505 
506 	  /* Else check that the access is in bounds.  TODO: restructure
507 	     expr_size/tree_expr_size/int_expr_size and just use the latter.  */
508 	  if (!decl)
509 	    decl_size = -1;
510 	  else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
511 	    {
512 	      if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
513 		decl_size = -1;
514 	    }
515 	  else if (TREE_CODE (decl) == STRING_CST)
516 	    decl_size = TREE_STRING_LENGTH (decl);
517 	  else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
518 	    decl_size = int_size_in_bytes (TREE_TYPE (decl));
519 	  else
520 	    decl_size = -1;
521 
522 	  return (!known_size_p (decl_size) || known_eq (decl_size, 0)
523 		  ? maybe_ne (offset, 0)
524 		  : !known_subrange_p (offset, size, 0, decl_size));
525         }
526 
527       return 0;
528 
529     case LABEL_REF:
530       return 0;
531 
532     case REG:
533       /* Stack references are assumed not to trap, but we need to deal with
534 	 nonsensical offsets.  */
535       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
536 	 || x == stack_pointer_rtx
537 	 /* The arg pointer varies if it is not a fixed register.  */
538 	 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
539 	{
540 #ifdef RED_ZONE_SIZE
541 	  poly_int64 red_zone_size = RED_ZONE_SIZE;
542 #else
543 	  poly_int64 red_zone_size = 0;
544 #endif
545 	  poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
546 	  poly_int64 low_bound, high_bound;
547 
548 	  if (!known_size_p (size))
549 	    return 1;
550 
551 	  if (x == frame_pointer_rtx)
552 	    {
553 	      if (FRAME_GROWS_DOWNWARD)
554 		{
555 		  high_bound = targetm.starting_frame_offset ();
556 		  low_bound  = high_bound - get_frame_size ();
557 		}
558 	      else
559 		{
560 		  low_bound  = targetm.starting_frame_offset ();
561 		  high_bound = low_bound + get_frame_size ();
562 		}
563 	    }
564 	  else if (x == hard_frame_pointer_rtx)
565 	    {
566 	      poly_int64 sp_offset
567 		= get_initial_register_offset (STACK_POINTER_REGNUM,
568 					       HARD_FRAME_POINTER_REGNUM);
569 	      poly_int64 ap_offset
570 		= get_initial_register_offset (ARG_POINTER_REGNUM,
571 					       HARD_FRAME_POINTER_REGNUM);
572 
573 #if STACK_GROWS_DOWNWARD
574 	      low_bound  = sp_offset - red_zone_size - stack_boundary;
575 	      high_bound = ap_offset
576 			   + FIRST_PARM_OFFSET (current_function_decl)
577 #if !ARGS_GROW_DOWNWARD
578 			   + crtl->args.size
579 #endif
580 			   + stack_boundary;
581 #else
582 	      high_bound = sp_offset + red_zone_size + stack_boundary;
583 	      low_bound  = ap_offset
584 			   + FIRST_PARM_OFFSET (current_function_decl)
585 #if ARGS_GROW_DOWNWARD
586 			   - crtl->args.size
587 #endif
588 			   - stack_boundary;
589 #endif
590 	    }
591 	  else if (x == stack_pointer_rtx)
592 	    {
593 	      poly_int64 ap_offset
594 		= get_initial_register_offset (ARG_POINTER_REGNUM,
595 					       STACK_POINTER_REGNUM);
596 
597 #if STACK_GROWS_DOWNWARD
598 	      low_bound  = - red_zone_size - stack_boundary;
599 	      high_bound = ap_offset
600 			   + FIRST_PARM_OFFSET (current_function_decl)
601 #if !ARGS_GROW_DOWNWARD
602 			   + crtl->args.size
603 #endif
604 			   + stack_boundary;
605 #else
606 	      high_bound = red_zone_size + stack_boundary;
607 	      low_bound  = ap_offset
608 			   + FIRST_PARM_OFFSET (current_function_decl)
609 #if ARGS_GROW_DOWNWARD
610 			   - crtl->args.size
611 #endif
612 			   - stack_boundary;
613 #endif
614 	    }
615 	  else
616 	    {
617 	      /* We assume that accesses are safe to at least the
618 		 next stack boundary.
619 		 Examples are varargs and __builtin_return_address.  */
620 #if ARGS_GROW_DOWNWARD
621 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
622 			   + stack_boundary;
623 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
624 			   - crtl->args.size - stack_boundary;
625 #else
626 	      low_bound  = FIRST_PARM_OFFSET (current_function_decl)
627 			   - stack_boundary;
628 	      high_bound = FIRST_PARM_OFFSET (current_function_decl)
629 			   + crtl->args.size + stack_boundary;
630 #endif
631 	    }
632 
633 	  if (known_ge (offset, low_bound)
634 	      && known_le (offset, high_bound - size))
635 	    return 0;
636 	  return 1;
637 	}
638       /* All of the virtual frame registers are stack references.  */
639       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
640 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
641 	return 0;
642       return 1;
643 
644     case CONST:
645       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
646 				    mode, unaligned_mems);
647 
648     case PLUS:
649       /* An address is assumed not to trap if:
650 	 - it is the pic register plus a const unspec without offset.  */
651       if (XEXP (x, 0) == pic_offset_table_rtx
652 	  && GET_CODE (XEXP (x, 1)) == CONST
653 	  && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
654 	  && known_eq (offset, 0))
655 	return 0;
656 
657       /* - or it is an address that can't trap plus a constant integer.  */
658       if (poly_int_rtx_p (XEXP (x, 1), &const_x1)
659 	  && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + const_x1,
660 				     size, mode, unaligned_mems))
661 	return 0;
662 
663       return 1;
664 
665     case LO_SUM:
666     case PRE_MODIFY:
667       return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
668 				    mode, unaligned_mems);
669 
670     case PRE_DEC:
671     case PRE_INC:
672     case POST_DEC:
673     case POST_INC:
674     case POST_MODIFY:
675       return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
676 				    mode, unaligned_mems);
677 
678     default:
679       break;
680     }
681 
682   /* If it isn't one of the case above, it can cause a trap.  */
683   return 1;
684 }
685 
686 /* Return nonzero if the use of X as an address in a MEM can cause a trap.  */
687 
688 int
rtx_addr_can_trap_p(const_rtx x)689 rtx_addr_can_trap_p (const_rtx x)
690 {
691   return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
692 }
693 
694 /* Return true if X contains a MEM subrtx.  */
695 
696 bool
contains_mem_rtx_p(rtx x)697 contains_mem_rtx_p (rtx x)
698 {
699   subrtx_iterator::array_type array;
700   FOR_EACH_SUBRTX (iter, array, x, ALL)
701     if (MEM_P (*iter))
702       return true;
703 
704   return false;
705 }
706 
707 /* Return true if X is an address that is known to not be zero.  */
708 
709 bool
nonzero_address_p(const_rtx x)710 nonzero_address_p (const_rtx x)
711 {
712   const enum rtx_code code = GET_CODE (x);
713 
714   switch (code)
715     {
716     case SYMBOL_REF:
717       return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
718 
719     case LABEL_REF:
720       return true;
721 
722     case REG:
723       /* As in rtx_varies_p, we have to use the actual rtx, not reg number.  */
724       if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
725 	  || x == stack_pointer_rtx
726 	  || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
727 	return true;
728       /* All of the virtual frame registers are stack references.  */
729       if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
730 	  && REGNO (x) <= LAST_VIRTUAL_REGISTER)
731 	return true;
732       return false;
733 
734     case CONST:
735       return nonzero_address_p (XEXP (x, 0));
736 
737     case PLUS:
738       /* Handle PIC references.  */
739       if (XEXP (x, 0) == pic_offset_table_rtx
740 	       && CONSTANT_P (XEXP (x, 1)))
741 	return true;
742       return false;
743 
744     case PRE_MODIFY:
745       /* Similar to the above; allow positive offsets.  Further, since
746 	 auto-inc is only allowed in memories, the register must be a
747 	 pointer.  */
748       if (CONST_INT_P (XEXP (x, 1))
749 	  && INTVAL (XEXP (x, 1)) > 0)
750 	return true;
751       return nonzero_address_p (XEXP (x, 0));
752 
753     case PRE_INC:
754       /* Similarly.  Further, the offset is always positive.  */
755       return true;
756 
757     case PRE_DEC:
758     case POST_DEC:
759     case POST_INC:
760     case POST_MODIFY:
761       return nonzero_address_p (XEXP (x, 0));
762 
763     case LO_SUM:
764       return nonzero_address_p (XEXP (x, 1));
765 
766     default:
767       break;
768     }
769 
770   /* If it isn't one of the case above, might be zero.  */
771   return false;
772 }
773 
774 /* Return 1 if X refers to a memory location whose address
775    cannot be compared reliably with constant addresses,
776    or if X refers to a BLKmode memory object.
777    FOR_ALIAS is nonzero if we are called from alias analysis; if it is
778    zero, we are slightly more conservative.  */
779 
780 bool
rtx_addr_varies_p(const_rtx x,bool for_alias)781 rtx_addr_varies_p (const_rtx x, bool for_alias)
782 {
783   enum rtx_code code;
784   int i;
785   const char *fmt;
786 
787   if (x == 0)
788     return 0;
789 
790   code = GET_CODE (x);
791   if (code == MEM)
792     return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
793 
794   fmt = GET_RTX_FORMAT (code);
795   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
796     if (fmt[i] == 'e')
797       {
798 	if (rtx_addr_varies_p (XEXP (x, i), for_alias))
799 	  return 1;
800       }
801     else if (fmt[i] == 'E')
802       {
803 	int j;
804 	for (j = 0; j < XVECLEN (x, i); j++)
805 	  if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
806 	    return 1;
807       }
808   return 0;
809 }
810 
811 /* Return the CALL in X if there is one.  */
812 
813 rtx
get_call_rtx_from(rtx x)814 get_call_rtx_from (rtx x)
815 {
816   if (INSN_P (x))
817     x = PATTERN (x);
818   if (GET_CODE (x) == PARALLEL)
819     x = XVECEXP (x, 0, 0);
820   if (GET_CODE (x) == SET)
821     x = SET_SRC (x);
822   if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
823     return x;
824   return NULL_RTX;
825 }
826 
827 /* Return the value of the integer term in X, if one is apparent;
828    otherwise return 0.
829    Only obvious integer terms are detected.
830    This is used in cse.c with the `related_value' field.  */
831 
832 HOST_WIDE_INT
get_integer_term(const_rtx x)833 get_integer_term (const_rtx x)
834 {
835   if (GET_CODE (x) == CONST)
836     x = XEXP (x, 0);
837 
838   if (GET_CODE (x) == MINUS
839       && CONST_INT_P (XEXP (x, 1)))
840     return - INTVAL (XEXP (x, 1));
841   if (GET_CODE (x) == PLUS
842       && CONST_INT_P (XEXP (x, 1)))
843     return INTVAL (XEXP (x, 1));
844   return 0;
845 }
846 
847 /* If X is a constant, return the value sans apparent integer term;
848    otherwise return 0.
849    Only obvious integer terms are detected.  */
850 
851 rtx
get_related_value(const_rtx x)852 get_related_value (const_rtx x)
853 {
854   if (GET_CODE (x) != CONST)
855     return 0;
856   x = XEXP (x, 0);
857   if (GET_CODE (x) == PLUS
858       && CONST_INT_P (XEXP (x, 1)))
859     return XEXP (x, 0);
860   else if (GET_CODE (x) == MINUS
861 	   && CONST_INT_P (XEXP (x, 1)))
862     return XEXP (x, 0);
863   return 0;
864 }
865 
866 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
867    to somewhere in the same object or object_block as SYMBOL.  */
868 
869 bool
offset_within_block_p(const_rtx symbol,HOST_WIDE_INT offset)870 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
871 {
872   tree decl;
873 
874   if (GET_CODE (symbol) != SYMBOL_REF)
875     return false;
876 
877   if (offset == 0)
878     return true;
879 
880   if (offset > 0)
881     {
882       if (CONSTANT_POOL_ADDRESS_P (symbol)
883 	  && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
884 	return true;
885 
886       decl = SYMBOL_REF_DECL (symbol);
887       if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
888 	return true;
889     }
890 
891   if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
892       && SYMBOL_REF_BLOCK (symbol)
893       && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
894       && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
895 	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
896     return true;
897 
898   return false;
899 }
900 
901 /* Split X into a base and a constant offset, storing them in *BASE_OUT
902    and *OFFSET_OUT respectively.  */
903 
904 void
split_const(rtx x,rtx * base_out,rtx * offset_out)905 split_const (rtx x, rtx *base_out, rtx *offset_out)
906 {
907   if (GET_CODE (x) == CONST)
908     {
909       x = XEXP (x, 0);
910       if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
911 	{
912 	  *base_out = XEXP (x, 0);
913 	  *offset_out = XEXP (x, 1);
914 	  return;
915 	}
916     }
917   *base_out = x;
918   *offset_out = const0_rtx;
919 }
920 
921 /* Express integer value X as some value Y plus a polynomial offset,
922    where Y is either const0_rtx, X or something within X (as opposed
923    to a new rtx).  Return the Y and store the offset in *OFFSET_OUT.  */
924 
925 rtx
strip_offset(rtx x,poly_int64_pod * offset_out)926 strip_offset (rtx x, poly_int64_pod *offset_out)
927 {
928   rtx base = const0_rtx;
929   rtx test = x;
930   if (GET_CODE (test) == CONST)
931     test = XEXP (test, 0);
932   if (GET_CODE (test) == PLUS)
933     {
934       base = XEXP (test, 0);
935       test = XEXP (test, 1);
936     }
937   if (poly_int_rtx_p (test, offset_out))
938     return base;
939   *offset_out = 0;
940   return x;
941 }
942 
943 /* Return the argument size in REG_ARGS_SIZE note X.  */
944 
945 poly_int64
get_args_size(const_rtx x)946 get_args_size (const_rtx x)
947 {
948   gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
949   return rtx_to_poly_int64 (XEXP (x, 0));
950 }
951 
952 /* Return the number of places FIND appears within X.  If COUNT_DEST is
953    zero, we do not count occurrences inside the destination of a SET.  */
954 
955 int
count_occurrences(const_rtx x,const_rtx find,int count_dest)956 count_occurrences (const_rtx x, const_rtx find, int count_dest)
957 {
958   int i, j;
959   enum rtx_code code;
960   const char *format_ptr;
961   int count;
962 
963   if (x == find)
964     return 1;
965 
966   code = GET_CODE (x);
967 
968   switch (code)
969     {
970     case REG:
971     CASE_CONST_ANY:
972     case SYMBOL_REF:
973     case CODE_LABEL:
974     case PC:
975     case CC0:
976       return 0;
977 
978     case EXPR_LIST:
979       count = count_occurrences (XEXP (x, 0), find, count_dest);
980       if (XEXP (x, 1))
981 	count += count_occurrences (XEXP (x, 1), find, count_dest);
982       return count;
983 
984     case MEM:
985       if (MEM_P (find) && rtx_equal_p (x, find))
986 	return 1;
987       break;
988 
989     case SET:
990       if (SET_DEST (x) == find && ! count_dest)
991 	return count_occurrences (SET_SRC (x), find, count_dest);
992       break;
993 
994     default:
995       break;
996     }
997 
998   format_ptr = GET_RTX_FORMAT (code);
999   count = 0;
1000 
1001   for (i = 0; i < GET_RTX_LENGTH (code); i++)
1002     {
1003       switch (*format_ptr++)
1004 	{
1005 	case 'e':
1006 	  count += count_occurrences (XEXP (x, i), find, count_dest);
1007 	  break;
1008 
1009 	case 'E':
1010 	  for (j = 0; j < XVECLEN (x, i); j++)
1011 	    count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1012 	  break;
1013 	}
1014     }
1015   return count;
1016 }
1017 
1018 
1019 /* Return TRUE if OP is a register or subreg of a register that
1020    holds an unsigned quantity.  Otherwise, return FALSE.  */
1021 
1022 bool
unsigned_reg_p(rtx op)1023 unsigned_reg_p (rtx op)
1024 {
1025   if (REG_P (op)
1026       && REG_EXPR (op)
1027       && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1028     return true;
1029 
1030   if (GET_CODE (op) == SUBREG
1031       && SUBREG_PROMOTED_SIGN (op))
1032     return true;
1033 
1034   return false;
1035 }
1036 
1037 
1038 /* Nonzero if register REG appears somewhere within IN.
1039    Also works if REG is not a register; in this case it checks
1040    for a subexpression of IN that is Lisp "equal" to REG.  */
1041 
1042 int
reg_mentioned_p(const_rtx reg,const_rtx in)1043 reg_mentioned_p (const_rtx reg, const_rtx in)
1044 {
1045   const char *fmt;
1046   int i;
1047   enum rtx_code code;
1048 
1049   if (in == 0)
1050     return 0;
1051 
1052   if (reg == in)
1053     return 1;
1054 
1055   if (GET_CODE (in) == LABEL_REF)
1056     return reg == label_ref_label (in);
1057 
1058   code = GET_CODE (in);
1059 
1060   switch (code)
1061     {
1062       /* Compare registers by number.  */
1063     case REG:
1064       return REG_P (reg) && REGNO (in) == REGNO (reg);
1065 
1066       /* These codes have no constituent expressions
1067 	 and are unique.  */
1068     case SCRATCH:
1069     case CC0:
1070     case PC:
1071       return 0;
1072 
1073     CASE_CONST_ANY:
1074       /* These are kept unique for a given value.  */
1075       return 0;
1076 
1077     default:
1078       break;
1079     }
1080 
1081   if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1082     return 1;
1083 
1084   fmt = GET_RTX_FORMAT (code);
1085 
1086   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1087     {
1088       if (fmt[i] == 'E')
1089 	{
1090 	  int j;
1091 	  for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1092 	    if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1093 	      return 1;
1094 	}
1095       else if (fmt[i] == 'e'
1096 	       && reg_mentioned_p (reg, XEXP (in, i)))
1097 	return 1;
1098     }
1099   return 0;
1100 }
1101 
1102 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1103    no CODE_LABEL insn.  */
1104 
1105 int
no_labels_between_p(const rtx_insn * beg,const rtx_insn * end)1106 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1107 {
1108   rtx_insn *p;
1109   if (beg == end)
1110     return 0;
1111   for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1112     if (LABEL_P (p))
1113       return 0;
1114   return 1;
1115 }
1116 
1117 /* Nonzero if register REG is used in an insn between
1118    FROM_INSN and TO_INSN (exclusive of those two).  */
1119 
1120 int
reg_used_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1121 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1122 		    const rtx_insn *to_insn)
1123 {
1124   rtx_insn *insn;
1125 
1126   if (from_insn == to_insn)
1127     return 0;
1128 
1129   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1130     if (NONDEBUG_INSN_P (insn)
1131 	&& (reg_overlap_mentioned_p (reg, PATTERN (insn))
1132 	   || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1133       return 1;
1134   return 0;
1135 }
1136 
1137 /* Nonzero if the old value of X, a register, is referenced in BODY.  If X
1138    is entirely replaced by a new value and the only use is as a SET_DEST,
1139    we do not consider it a reference.  */
1140 
1141 int
reg_referenced_p(const_rtx x,const_rtx body)1142 reg_referenced_p (const_rtx x, const_rtx body)
1143 {
1144   int i;
1145 
1146   switch (GET_CODE (body))
1147     {
1148     case SET:
1149       if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1150 	return 1;
1151 
1152       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1153 	 of a REG that occupies all of the REG, the insn references X if
1154 	 it is mentioned in the destination.  */
1155       if (GET_CODE (SET_DEST (body)) != CC0
1156 	  && GET_CODE (SET_DEST (body)) != PC
1157 	  && !REG_P (SET_DEST (body))
1158 	  && ! (GET_CODE (SET_DEST (body)) == SUBREG
1159 		&& REG_P (SUBREG_REG (SET_DEST (body)))
1160 		&& !read_modify_subreg_p (SET_DEST (body)))
1161 	  && reg_overlap_mentioned_p (x, SET_DEST (body)))
1162 	return 1;
1163       return 0;
1164 
1165     case ASM_OPERANDS:
1166       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1167 	if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1168 	  return 1;
1169       return 0;
1170 
1171     case CALL:
1172     case USE:
1173     case IF_THEN_ELSE:
1174       return reg_overlap_mentioned_p (x, body);
1175 
1176     case TRAP_IF:
1177       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1178 
1179     case PREFETCH:
1180       return reg_overlap_mentioned_p (x, XEXP (body, 0));
1181 
1182     case UNSPEC:
1183     case UNSPEC_VOLATILE:
1184       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1185 	if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1186 	  return 1;
1187       return 0;
1188 
1189     case PARALLEL:
1190       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1191 	if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1192 	  return 1;
1193       return 0;
1194 
1195     case CLOBBER:
1196       if (MEM_P (XEXP (body, 0)))
1197 	if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1198 	  return 1;
1199       return 0;
1200 
1201     case CLOBBER_HIGH:
1202       gcc_assert (REG_P (XEXP (body, 0)));
1203       return 0;
1204 
1205     case COND_EXEC:
1206       if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1207 	return 1;
1208       return reg_referenced_p (x, COND_EXEC_CODE (body));
1209 
1210     default:
1211       return 0;
1212     }
1213 }
1214 
1215 /* Nonzero if register REG is set or clobbered in an insn between
1216    FROM_INSN and TO_INSN (exclusive of those two).  */
1217 
1218 int
reg_set_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1219 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1220 		   const rtx_insn *to_insn)
1221 {
1222   const rtx_insn *insn;
1223 
1224   if (from_insn == to_insn)
1225     return 0;
1226 
1227   for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1228     if (INSN_P (insn) && reg_set_p (reg, insn))
1229       return 1;
1230   return 0;
1231 }
1232 
1233 /* Return true if REG is set or clobbered inside INSN.  */
1234 
1235 int
reg_set_p(const_rtx reg,const_rtx insn)1236 reg_set_p (const_rtx reg, const_rtx insn)
1237 {
1238   /* After delay slot handling, call and branch insns might be in a
1239      sequence.  Check all the elements there.  */
1240   if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1241     {
1242       for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1243 	if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1244 	  return true;
1245 
1246       return false;
1247     }
1248 
1249   /* We can be passed an insn or part of one.  If we are passed an insn,
1250      check if a side-effect of the insn clobbers REG.  */
1251   if (INSN_P (insn)
1252       && (FIND_REG_INC_NOTE (insn, reg)
1253 	  || (CALL_P (insn)
1254 	      && ((REG_P (reg)
1255 		   && REGNO (reg) < FIRST_PSEUDO_REGISTER
1256 		   && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1257 					       GET_MODE (reg), REGNO (reg)))
1258 		  || MEM_P (reg)
1259 		  || find_reg_fusage (insn, CLOBBER, reg)))))
1260     return true;
1261 
1262   /* There are no REG_INC notes for SP autoinc.  */
1263   if (reg == stack_pointer_rtx && INSN_P (insn))
1264     {
1265       subrtx_var_iterator::array_type array;
1266       FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1267 	{
1268 	  rtx mem = *iter;
1269 	  if (mem
1270 	      && MEM_P (mem)
1271 	      && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1272 	    {
1273 	      if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1274 		return true;
1275 	      iter.skip_subrtxes ();
1276 	    }
1277 	}
1278     }
1279 
1280   return set_of (reg, insn) != NULL_RTX;
1281 }
1282 
1283 /* Similar to reg_set_between_p, but check all registers in X.  Return 0
1284    only if none of them are modified between START and END.  Return 1 if
1285    X contains a MEM; this routine does use memory aliasing.  */
1286 
1287 int
modified_between_p(const_rtx x,const rtx_insn * start,const rtx_insn * end)1288 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1289 {
1290   const enum rtx_code code = GET_CODE (x);
1291   const char *fmt;
1292   int i, j;
1293   rtx_insn *insn;
1294 
1295   if (start == end)
1296     return 0;
1297 
1298   switch (code)
1299     {
1300     CASE_CONST_ANY:
1301     case CONST:
1302     case SYMBOL_REF:
1303     case LABEL_REF:
1304       return 0;
1305 
1306     case PC:
1307     case CC0:
1308       return 1;
1309 
1310     case MEM:
1311       if (modified_between_p (XEXP (x, 0), start, end))
1312 	return 1;
1313       if (MEM_READONLY_P (x))
1314 	return 0;
1315       for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1316 	if (memory_modified_in_insn_p (x, insn))
1317 	  return 1;
1318       return 0;
1319 
1320     case REG:
1321       return reg_set_between_p (x, start, end);
1322 
1323     default:
1324       break;
1325     }
1326 
1327   fmt = GET_RTX_FORMAT (code);
1328   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1329     {
1330       if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1331 	return 1;
1332 
1333       else if (fmt[i] == 'E')
1334 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1335 	  if (modified_between_p (XVECEXP (x, i, j), start, end))
1336 	    return 1;
1337     }
1338 
1339   return 0;
1340 }
1341 
1342 /* Similar to reg_set_p, but check all registers in X.  Return 0 only if none
1343    of them are modified in INSN.  Return 1 if X contains a MEM; this routine
1344    does use memory aliasing.  */
1345 
1346 int
modified_in_p(const_rtx x,const_rtx insn)1347 modified_in_p (const_rtx x, const_rtx insn)
1348 {
1349   const enum rtx_code code = GET_CODE (x);
1350   const char *fmt;
1351   int i, j;
1352 
1353   switch (code)
1354     {
1355     CASE_CONST_ANY:
1356     case CONST:
1357     case SYMBOL_REF:
1358     case LABEL_REF:
1359       return 0;
1360 
1361     case PC:
1362     case CC0:
1363       return 1;
1364 
1365     case MEM:
1366       if (modified_in_p (XEXP (x, 0), insn))
1367 	return 1;
1368       if (MEM_READONLY_P (x))
1369 	return 0;
1370       if (memory_modified_in_insn_p (x, insn))
1371 	return 1;
1372       return 0;
1373 
1374     case REG:
1375       return reg_set_p (x, insn);
1376 
1377     default:
1378       break;
1379     }
1380 
1381   fmt = GET_RTX_FORMAT (code);
1382   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1383     {
1384       if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1385 	return 1;
1386 
1387       else if (fmt[i] == 'E')
1388 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1389 	  if (modified_in_p (XVECEXP (x, i, j), insn))
1390 	    return 1;
1391     }
1392 
1393   return 0;
1394 }
1395 
1396 /* Return true if X is a SUBREG and if storing a value to X would
1397    preserve some of its SUBREG_REG.  For example, on a normal 32-bit
1398    target, using a SUBREG to store to one half of a DImode REG would
1399    preserve the other half.  */
1400 
1401 bool
read_modify_subreg_p(const_rtx x)1402 read_modify_subreg_p (const_rtx x)
1403 {
1404   if (GET_CODE (x) != SUBREG)
1405     return false;
1406   poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1407   poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1408   poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1409   /* The inner and outer modes of a subreg must be ordered, so that we
1410      can tell whether they're paradoxical or partial.  */
1411   gcc_checking_assert (ordered_p (isize, osize));
1412   return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1413 }
1414 
1415 /* Helper function for set_of.  */
1416 struct set_of_data
1417   {
1418     const_rtx found;
1419     const_rtx pat;
1420   };
1421 
1422 static void
set_of_1(rtx x,const_rtx pat,void * data1)1423 set_of_1 (rtx x, const_rtx pat, void *data1)
1424 {
1425   struct set_of_data *const data = (struct set_of_data *) (data1);
1426   if (rtx_equal_p (x, data->pat)
1427       || (GET_CODE (pat) == CLOBBER_HIGH
1428 	  && REGNO(data->pat) == REGNO(XEXP (pat, 0))
1429 	  && reg_is_clobbered_by_clobber_high (data->pat, XEXP (pat, 0)))
1430       || (GET_CODE (pat) != CLOBBER_HIGH && !MEM_P (x)
1431 	  && reg_overlap_mentioned_p (data->pat, x)))
1432     data->found = pat;
1433 }
1434 
1435 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1436    (either directly or via STRICT_LOW_PART and similar modifiers).  */
1437 const_rtx
set_of(const_rtx pat,const_rtx insn)1438 set_of (const_rtx pat, const_rtx insn)
1439 {
1440   struct set_of_data data;
1441   data.found = NULL_RTX;
1442   data.pat = pat;
1443   note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1444   return data.found;
1445 }
1446 
1447 /* Add all hard register in X to *PSET.  */
1448 void
find_all_hard_regs(const_rtx x,HARD_REG_SET * pset)1449 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1450 {
1451   subrtx_iterator::array_type array;
1452   FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1453     {
1454       const_rtx x = *iter;
1455       if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1456 	add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1457     }
1458 }
1459 
1460 /* This function, called through note_stores, collects sets and
1461    clobbers of hard registers in a HARD_REG_SET, which is pointed to
1462    by DATA.  */
1463 void
record_hard_reg_sets(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)1464 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1465 {
1466   HARD_REG_SET *pset = (HARD_REG_SET *)data;
1467   if (REG_P (x) && HARD_REGISTER_P (x))
1468     add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1469 }
1470 
1471 /* Examine INSN, and compute the set of hard registers written by it.
1472    Store it in *PSET.  Should only be called after reload.  */
1473 void
find_all_hard_reg_sets(const rtx_insn * insn,HARD_REG_SET * pset,bool implicit)1474 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1475 {
1476   rtx link;
1477 
1478   CLEAR_HARD_REG_SET (*pset);
1479   note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1480   if (CALL_P (insn))
1481     {
1482       if (implicit)
1483 	IOR_HARD_REG_SET (*pset, call_used_reg_set);
1484 
1485       for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1486 	record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1487     }
1488   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1489     if (REG_NOTE_KIND (link) == REG_INC)
1490       record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1491 }
1492 
1493 /* Like record_hard_reg_sets, but called through note_uses.  */
1494 void
record_hard_reg_uses(rtx * px,void * data)1495 record_hard_reg_uses (rtx *px, void *data)
1496 {
1497   find_all_hard_regs (*px, (HARD_REG_SET *) data);
1498 }
1499 
1500 /* Given an INSN, return a SET expression if this insn has only a single SET.
1501    It may also have CLOBBERs, USEs, or SET whose output
1502    will not be used, which we ignore.  */
1503 
1504 rtx
single_set_2(const rtx_insn * insn,const_rtx pat)1505 single_set_2 (const rtx_insn *insn, const_rtx pat)
1506 {
1507   rtx set = NULL;
1508   int set_verified = 1;
1509   int i;
1510 
1511   if (GET_CODE (pat) == PARALLEL)
1512     {
1513       for (i = 0; i < XVECLEN (pat, 0); i++)
1514 	{
1515 	  rtx sub = XVECEXP (pat, 0, i);
1516 	  switch (GET_CODE (sub))
1517 	    {
1518 	    case USE:
1519 	    case CLOBBER:
1520 	    case CLOBBER_HIGH:
1521 	      break;
1522 
1523 	    case SET:
1524 	      /* We can consider insns having multiple sets, where all
1525 		 but one are dead as single set insns.  In common case
1526 		 only single set is present in the pattern so we want
1527 		 to avoid checking for REG_UNUSED notes unless necessary.
1528 
1529 		 When we reach set first time, we just expect this is
1530 		 the single set we are looking for and only when more
1531 		 sets are found in the insn, we check them.  */
1532 	      if (!set_verified)
1533 		{
1534 		  if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1535 		      && !side_effects_p (set))
1536 		    set = NULL;
1537 		  else
1538 		    set_verified = 1;
1539 		}
1540 	      if (!set)
1541 		set = sub, set_verified = 0;
1542 	      else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1543 		       || side_effects_p (sub))
1544 		return NULL_RTX;
1545 	      break;
1546 
1547 	    default:
1548 	      return NULL_RTX;
1549 	    }
1550 	}
1551     }
1552   return set;
1553 }
1554 
1555 /* Given an INSN, return nonzero if it has more than one SET, else return
1556    zero.  */
1557 
1558 int
multiple_sets(const_rtx insn)1559 multiple_sets (const_rtx insn)
1560 {
1561   int found;
1562   int i;
1563 
1564   /* INSN must be an insn.  */
1565   if (! INSN_P (insn))
1566     return 0;
1567 
1568   /* Only a PARALLEL can have multiple SETs.  */
1569   if (GET_CODE (PATTERN (insn)) == PARALLEL)
1570     {
1571       for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1572 	if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1573 	  {
1574 	    /* If we have already found a SET, then return now.  */
1575 	    if (found)
1576 	      return 1;
1577 	    else
1578 	      found = 1;
1579 	  }
1580     }
1581 
1582   /* Either zero or one SET.  */
1583   return 0;
1584 }
1585 
1586 /* Return nonzero if the destination of SET equals the source
1587    and there are no side effects.  */
1588 
1589 int
set_noop_p(const_rtx set)1590 set_noop_p (const_rtx set)
1591 {
1592   rtx src = SET_SRC (set);
1593   rtx dst = SET_DEST (set);
1594 
1595   if (dst == pc_rtx && src == pc_rtx)
1596     return 1;
1597 
1598   if (MEM_P (dst) && MEM_P (src))
1599     return rtx_equal_p (dst, src) && !side_effects_p (dst);
1600 
1601   if (GET_CODE (dst) == ZERO_EXTRACT)
1602     return rtx_equal_p (XEXP (dst, 0), src)
1603 	   && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1604 	   && !side_effects_p (src);
1605 
1606   if (GET_CODE (dst) == STRICT_LOW_PART)
1607     dst = XEXP (dst, 0);
1608 
1609   if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1610     {
1611       if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1612 	return 0;
1613       src = SUBREG_REG (src);
1614       dst = SUBREG_REG (dst);
1615     }
1616 
1617   /* It is a NOOP if destination overlaps with selected src vector
1618      elements.  */
1619   if (GET_CODE (src) == VEC_SELECT
1620       && REG_P (XEXP (src, 0)) && REG_P (dst)
1621       && HARD_REGISTER_P (XEXP (src, 0))
1622       && HARD_REGISTER_P (dst))
1623     {
1624       int i;
1625       rtx par = XEXP (src, 1);
1626       rtx src0 = XEXP (src, 0);
1627       poly_int64 c0 = rtx_to_poly_int64 (XVECEXP (par, 0, 0));
1628       poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1629 
1630       for (i = 1; i < XVECLEN (par, 0); i++)
1631 	if (maybe_ne (rtx_to_poly_int64 (XVECEXP (par, 0, i)), c0 + i))
1632 	  return 0;
1633       return
1634 	REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))
1635 	&& simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1636 				  offset, GET_MODE (dst)) == (int) REGNO (dst);
1637     }
1638 
1639   return (REG_P (src) && REG_P (dst)
1640 	  && REGNO (src) == REGNO (dst));
1641 }
1642 
1643 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1644    value to itself.  */
1645 
1646 int
noop_move_p(const rtx_insn * insn)1647 noop_move_p (const rtx_insn *insn)
1648 {
1649   rtx pat = PATTERN (insn);
1650 
1651   if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1652     return 1;
1653 
1654   /* Insns carrying these notes are useful later on.  */
1655   if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1656     return 0;
1657 
1658   /* Check the code to be executed for COND_EXEC.  */
1659   if (GET_CODE (pat) == COND_EXEC)
1660     pat = COND_EXEC_CODE (pat);
1661 
1662   if (GET_CODE (pat) == SET && set_noop_p (pat))
1663     return 1;
1664 
1665   if (GET_CODE (pat) == PARALLEL)
1666     {
1667       int i;
1668       /* If nothing but SETs of registers to themselves,
1669 	 this insn can also be deleted.  */
1670       for (i = 0; i < XVECLEN (pat, 0); i++)
1671 	{
1672 	  rtx tem = XVECEXP (pat, 0, i);
1673 
1674 	  if (GET_CODE (tem) == USE
1675 	      || GET_CODE (tem) == CLOBBER
1676 	      || GET_CODE (tem) == CLOBBER_HIGH)
1677 	    continue;
1678 
1679 	  if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1680 	    return 0;
1681 	}
1682 
1683       return 1;
1684     }
1685   return 0;
1686 }
1687 
1688 
1689 /* Return nonzero if register in range [REGNO, ENDREGNO)
1690    appears either explicitly or implicitly in X
1691    other than being stored into.
1692 
1693    References contained within the substructure at LOC do not count.
1694    LOC may be zero, meaning don't ignore anything.  */
1695 
1696 bool
refers_to_regno_p(unsigned int regno,unsigned int endregno,const_rtx x,rtx * loc)1697 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1698 		   rtx *loc)
1699 {
1700   int i;
1701   unsigned int x_regno;
1702   RTX_CODE code;
1703   const char *fmt;
1704 
1705  repeat:
1706   /* The contents of a REG_NONNEG note is always zero, so we must come here
1707      upon repeat in case the last REG_NOTE is a REG_NONNEG note.  */
1708   if (x == 0)
1709     return false;
1710 
1711   code = GET_CODE (x);
1712 
1713   switch (code)
1714     {
1715     case REG:
1716       x_regno = REGNO (x);
1717 
1718       /* If we modifying the stack, frame, or argument pointer, it will
1719 	 clobber a virtual register.  In fact, we could be more precise,
1720 	 but it isn't worth it.  */
1721       if ((x_regno == STACK_POINTER_REGNUM
1722 	   || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1723 	       && x_regno == ARG_POINTER_REGNUM)
1724 	   || x_regno == FRAME_POINTER_REGNUM)
1725 	  && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1726 	return true;
1727 
1728       return endregno > x_regno && regno < END_REGNO (x);
1729 
1730     case SUBREG:
1731       /* If this is a SUBREG of a hard reg, we can see exactly which
1732 	 registers are being modified.  Otherwise, handle normally.  */
1733       if (REG_P (SUBREG_REG (x))
1734 	  && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1735 	{
1736 	  unsigned int inner_regno = subreg_regno (x);
1737 	  unsigned int inner_endregno
1738 	    = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1739 			     ? subreg_nregs (x) : 1);
1740 
1741 	  return endregno > inner_regno && regno < inner_endregno;
1742 	}
1743       break;
1744 
1745     case CLOBBER:
1746     case SET:
1747       if (&SET_DEST (x) != loc
1748 	  /* Note setting a SUBREG counts as referring to the REG it is in for
1749 	     a pseudo but not for hard registers since we can
1750 	     treat each word individually.  */
1751 	  && ((GET_CODE (SET_DEST (x)) == SUBREG
1752 	       && loc != &SUBREG_REG (SET_DEST (x))
1753 	       && REG_P (SUBREG_REG (SET_DEST (x)))
1754 	       && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1755 	       && refers_to_regno_p (regno, endregno,
1756 				     SUBREG_REG (SET_DEST (x)), loc))
1757 	      || (!REG_P (SET_DEST (x))
1758 		  && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1759 	return true;
1760 
1761       if (code == CLOBBER || loc == &SET_SRC (x))
1762 	return false;
1763       x = SET_SRC (x);
1764       goto repeat;
1765 
1766     default:
1767       break;
1768     }
1769 
1770   /* X does not match, so try its subexpressions.  */
1771 
1772   fmt = GET_RTX_FORMAT (code);
1773   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1774     {
1775       if (fmt[i] == 'e' && loc != &XEXP (x, i))
1776 	{
1777 	  if (i == 0)
1778 	    {
1779 	      x = XEXP (x, 0);
1780 	      goto repeat;
1781 	    }
1782 	  else
1783 	    if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1784 	      return true;
1785 	}
1786       else if (fmt[i] == 'E')
1787 	{
1788 	  int j;
1789 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1790 	    if (loc != &XVECEXP (x, i, j)
1791 		&& refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1792 	      return true;
1793 	}
1794     }
1795   return false;
1796 }
1797 
1798 /* Nonzero if modifying X will affect IN.  If X is a register or a SUBREG,
1799    we check if any register number in X conflicts with the relevant register
1800    numbers.  If X is a constant, return 0.  If X is a MEM, return 1 iff IN
1801    contains a MEM (we don't bother checking for memory addresses that can't
1802    conflict because we expect this to be a rare case.  */
1803 
1804 int
reg_overlap_mentioned_p(const_rtx x,const_rtx in)1805 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1806 {
1807   unsigned int regno, endregno;
1808 
1809   /* If either argument is a constant, then modifying X cannot
1810      affect IN.  Here we look at IN, we can profitably combine
1811      CONSTANT_P (x) with the switch statement below.  */
1812   if (CONSTANT_P (in))
1813     return 0;
1814 
1815  recurse:
1816   switch (GET_CODE (x))
1817     {
1818     case CLOBBER:
1819     case STRICT_LOW_PART:
1820     case ZERO_EXTRACT:
1821     case SIGN_EXTRACT:
1822       /* Overly conservative.  */
1823       x = XEXP (x, 0);
1824       goto recurse;
1825 
1826     case SUBREG:
1827       regno = REGNO (SUBREG_REG (x));
1828       if (regno < FIRST_PSEUDO_REGISTER)
1829 	regno = subreg_regno (x);
1830       endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1831 			  ? subreg_nregs (x) : 1);
1832       goto do_reg;
1833 
1834     case REG:
1835       regno = REGNO (x);
1836       endregno = END_REGNO (x);
1837     do_reg:
1838       return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1839 
1840     case MEM:
1841       {
1842 	const char *fmt;
1843 	int i;
1844 
1845 	if (MEM_P (in))
1846 	  return 1;
1847 
1848 	fmt = GET_RTX_FORMAT (GET_CODE (in));
1849 	for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1850 	  if (fmt[i] == 'e')
1851 	    {
1852 	      if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1853 		return 1;
1854 	    }
1855 	  else if (fmt[i] == 'E')
1856 	    {
1857 	      int j;
1858 	      for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1859 		if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1860 		  return 1;
1861 	    }
1862 
1863 	return 0;
1864       }
1865 
1866     case SCRATCH:
1867     case PC:
1868     case CC0:
1869       return reg_mentioned_p (x, in);
1870 
1871     case PARALLEL:
1872       {
1873 	int i;
1874 
1875 	/* If any register in here refers to it we return true.  */
1876 	for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1877 	  if (XEXP (XVECEXP (x, 0, i), 0) != 0
1878 	      && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1879 	    return 1;
1880 	return 0;
1881       }
1882 
1883     default:
1884       gcc_assert (CONSTANT_P (x));
1885       return 0;
1886     }
1887 }
1888 
1889 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1890    (X would be the pattern of an insn).  DATA is an arbitrary pointer,
1891    ignored by note_stores, but passed to FUN.
1892 
1893    FUN receives three arguments:
1894    1. the REG, MEM, CC0 or PC being stored in or clobbered,
1895    2. the SET or CLOBBER rtx that does the store,
1896    3. the pointer DATA provided to note_stores.
1897 
1898   If the item being stored in or clobbered is a SUBREG of a hard register,
1899   the SUBREG will be passed.  */
1900 
1901 void
note_stores(const_rtx x,void (* fun)(rtx,const_rtx,void *),void * data)1902 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1903 {
1904   int i;
1905 
1906   if (GET_CODE (x) == COND_EXEC)
1907     x = COND_EXEC_CODE (x);
1908 
1909   if (GET_CODE (x) == SET
1910       || GET_CODE (x) == CLOBBER
1911       || GET_CODE (x) == CLOBBER_HIGH)
1912     {
1913       rtx dest = SET_DEST (x);
1914 
1915       while ((GET_CODE (dest) == SUBREG
1916 	      && (!REG_P (SUBREG_REG (dest))
1917 		  || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1918 	     || GET_CODE (dest) == ZERO_EXTRACT
1919 	     || GET_CODE (dest) == STRICT_LOW_PART)
1920 	dest = XEXP (dest, 0);
1921 
1922       /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1923 	 each of whose first operand is a register.  */
1924       if (GET_CODE (dest) == PARALLEL)
1925 	{
1926 	  for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1927 	    if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1928 	      (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1929 	}
1930       else
1931 	(*fun) (dest, x, data);
1932     }
1933 
1934   else if (GET_CODE (x) == PARALLEL)
1935     for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1936       note_stores (XVECEXP (x, 0, i), fun, data);
1937 }
1938 
1939 /* Like notes_stores, but call FUN for each expression that is being
1940    referenced in PBODY, a pointer to the PATTERN of an insn.  We only call
1941    FUN for each expression, not any interior subexpressions.  FUN receives a
1942    pointer to the expression and the DATA passed to this function.
1943 
1944    Note that this is not quite the same test as that done in reg_referenced_p
1945    since that considers something as being referenced if it is being
1946    partially set, while we do not.  */
1947 
1948 void
note_uses(rtx * pbody,void (* fun)(rtx *,void *),void * data)1949 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1950 {
1951   rtx body = *pbody;
1952   int i;
1953 
1954   switch (GET_CODE (body))
1955     {
1956     case COND_EXEC:
1957       (*fun) (&COND_EXEC_TEST (body), data);
1958       note_uses (&COND_EXEC_CODE (body), fun, data);
1959       return;
1960 
1961     case PARALLEL:
1962       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1963 	note_uses (&XVECEXP (body, 0, i), fun, data);
1964       return;
1965 
1966     case SEQUENCE:
1967       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1968 	note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1969       return;
1970 
1971     case USE:
1972       (*fun) (&XEXP (body, 0), data);
1973       return;
1974 
1975     case ASM_OPERANDS:
1976       for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1977 	(*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1978       return;
1979 
1980     case TRAP_IF:
1981       (*fun) (&TRAP_CONDITION (body), data);
1982       return;
1983 
1984     case PREFETCH:
1985       (*fun) (&XEXP (body, 0), data);
1986       return;
1987 
1988     case UNSPEC:
1989     case UNSPEC_VOLATILE:
1990       for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1991 	(*fun) (&XVECEXP (body, 0, i), data);
1992       return;
1993 
1994     case CLOBBER:
1995       if (MEM_P (XEXP (body, 0)))
1996 	(*fun) (&XEXP (XEXP (body, 0), 0), data);
1997       return;
1998 
1999     case SET:
2000       {
2001 	rtx dest = SET_DEST (body);
2002 
2003 	/* For sets we replace everything in source plus registers in memory
2004 	   expression in store and operands of a ZERO_EXTRACT.  */
2005 	(*fun) (&SET_SRC (body), data);
2006 
2007 	if (GET_CODE (dest) == ZERO_EXTRACT)
2008 	  {
2009 	    (*fun) (&XEXP (dest, 1), data);
2010 	    (*fun) (&XEXP (dest, 2), data);
2011 	  }
2012 
2013 	while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
2014 	  dest = XEXP (dest, 0);
2015 
2016 	if (MEM_P (dest))
2017 	  (*fun) (&XEXP (dest, 0), data);
2018       }
2019       return;
2020 
2021     default:
2022       /* All the other possibilities never store.  */
2023       (*fun) (pbody, data);
2024       return;
2025     }
2026 }
2027 
2028 /* Return nonzero if X's old contents don't survive after INSN.
2029    This will be true if X is (cc0) or if X is a register and
2030    X dies in INSN or because INSN entirely sets X.
2031 
2032    "Entirely set" means set directly and not through a SUBREG, or
2033    ZERO_EXTRACT, so no trace of the old contents remains.
2034    Likewise, REG_INC does not count.
2035 
2036    REG may be a hard or pseudo reg.  Renumbering is not taken into account,
2037    but for this use that makes no difference, since regs don't overlap
2038    during their lifetimes.  Therefore, this function may be used
2039    at any time after deaths have been computed.
2040 
2041    If REG is a hard reg that occupies multiple machine registers, this
2042    function will only return 1 if each of those registers will be replaced
2043    by INSN.  */
2044 
2045 int
dead_or_set_p(const rtx_insn * insn,const_rtx x)2046 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2047 {
2048   unsigned int regno, end_regno;
2049   unsigned int i;
2050 
2051   /* Can't use cc0_rtx below since this file is used by genattrtab.c.  */
2052   if (GET_CODE (x) == CC0)
2053     return 1;
2054 
2055   gcc_assert (REG_P (x));
2056 
2057   regno = REGNO (x);
2058   end_regno = END_REGNO (x);
2059   for (i = regno; i < end_regno; i++)
2060     if (! dead_or_set_regno_p (insn, i))
2061       return 0;
2062 
2063   return 1;
2064 }
2065 
2066 /* Return TRUE iff DEST is a register or subreg of a register, is a
2067    complete rather than read-modify-write destination, and contains
2068    register TEST_REGNO.  */
2069 
2070 static bool
covers_regno_no_parallel_p(const_rtx dest,unsigned int test_regno)2071 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2072 {
2073   unsigned int regno, endregno;
2074 
2075   if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2076     dest = SUBREG_REG (dest);
2077 
2078   if (!REG_P (dest))
2079     return false;
2080 
2081   regno = REGNO (dest);
2082   endregno = END_REGNO (dest);
2083   return (test_regno >= regno && test_regno < endregno);
2084 }
2085 
2086 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2087    any member matches the covers_regno_no_parallel_p criteria.  */
2088 
2089 static bool
covers_regno_p(const_rtx dest,unsigned int test_regno)2090 covers_regno_p (const_rtx dest, unsigned int test_regno)
2091 {
2092   if (GET_CODE (dest) == PARALLEL)
2093     {
2094       /* Some targets place small structures in registers for return
2095 	 values of functions, and those registers are wrapped in
2096 	 PARALLELs that we may see as the destination of a SET.  */
2097       int i;
2098 
2099       for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2100 	{
2101 	  rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2102 	  if (inner != NULL_RTX
2103 	      && covers_regno_no_parallel_p (inner, test_regno))
2104 	    return true;
2105 	}
2106 
2107       return false;
2108     }
2109   else
2110     return covers_regno_no_parallel_p (dest, test_regno);
2111 }
2112 
2113 /* Utility function for dead_or_set_p to check an individual register. */
2114 
2115 int
dead_or_set_regno_p(const rtx_insn * insn,unsigned int test_regno)2116 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2117 {
2118   const_rtx pattern;
2119 
2120   /* See if there is a death note for something that includes TEST_REGNO.  */
2121   if (find_regno_note (insn, REG_DEAD, test_regno))
2122     return 1;
2123 
2124   if (CALL_P (insn)
2125       && find_regno_fusage (insn, CLOBBER, test_regno))
2126     return 1;
2127 
2128   pattern = PATTERN (insn);
2129 
2130   /* If a COND_EXEC is not executed, the value survives.  */
2131   if (GET_CODE (pattern) == COND_EXEC)
2132     return 0;
2133 
2134   if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2135     return covers_regno_p (SET_DEST (pattern), test_regno);
2136   else if (GET_CODE (pattern) == PARALLEL)
2137     {
2138       int i;
2139 
2140       for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2141 	{
2142 	  rtx body = XVECEXP (pattern, 0, i);
2143 
2144 	  if (GET_CODE (body) == COND_EXEC)
2145 	    body = COND_EXEC_CODE (body);
2146 
2147 	  if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2148 	      && covers_regno_p (SET_DEST (body), test_regno))
2149 	    return 1;
2150 	}
2151     }
2152 
2153   return 0;
2154 }
2155 
2156 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2157    If DATUM is nonzero, look for one whose datum is DATUM.  */
2158 
2159 rtx
find_reg_note(const_rtx insn,enum reg_note kind,const_rtx datum)2160 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2161 {
2162   rtx link;
2163 
2164   gcc_checking_assert (insn);
2165 
2166   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2167   if (! INSN_P (insn))
2168     return 0;
2169   if (datum == 0)
2170     {
2171       for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2172 	if (REG_NOTE_KIND (link) == kind)
2173 	  return link;
2174       return 0;
2175     }
2176 
2177   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2178     if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2179       return link;
2180   return 0;
2181 }
2182 
2183 /* Return the reg-note of kind KIND in insn INSN which applies to register
2184    number REGNO, if any.  Return 0 if there is no such reg-note.  Note that
2185    the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2186    it might be the case that the note overlaps REGNO.  */
2187 
2188 rtx
find_regno_note(const_rtx insn,enum reg_note kind,unsigned int regno)2189 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2190 {
2191   rtx link;
2192 
2193   /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN.  */
2194   if (! INSN_P (insn))
2195     return 0;
2196 
2197   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2198     if (REG_NOTE_KIND (link) == kind
2199 	/* Verify that it is a register, so that scratch and MEM won't cause a
2200 	   problem here.  */
2201 	&& REG_P (XEXP (link, 0))
2202 	&& REGNO (XEXP (link, 0)) <= regno
2203 	&& END_REGNO (XEXP (link, 0)) > regno)
2204       return link;
2205   return 0;
2206 }
2207 
2208 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2209    has such a note.  */
2210 
2211 rtx
find_reg_equal_equiv_note(const_rtx insn)2212 find_reg_equal_equiv_note (const_rtx insn)
2213 {
2214   rtx link;
2215 
2216   if (!INSN_P (insn))
2217     return 0;
2218 
2219   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2220     if (REG_NOTE_KIND (link) == REG_EQUAL
2221 	|| REG_NOTE_KIND (link) == REG_EQUIV)
2222       {
2223 	/* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2224 	   insns that have multiple sets.  Checking single_set to
2225 	   make sure of this is not the proper check, as explained
2226 	   in the comment in set_unique_reg_note.
2227 
2228 	   This should be changed into an assert.  */
2229 	if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2230 	  return 0;
2231 	return link;
2232       }
2233   return NULL;
2234 }
2235 
2236 /* Check whether INSN is a single_set whose source is known to be
2237    equivalent to a constant.  Return that constant if so, otherwise
2238    return null.  */
2239 
2240 rtx
find_constant_src(const rtx_insn * insn)2241 find_constant_src (const rtx_insn *insn)
2242 {
2243   rtx note, set, x;
2244 
2245   set = single_set (insn);
2246   if (set)
2247     {
2248       x = avoid_constant_pool_reference (SET_SRC (set));
2249       if (CONSTANT_P (x))
2250 	return x;
2251     }
2252 
2253   note = find_reg_equal_equiv_note (insn);
2254   if (note && CONSTANT_P (XEXP (note, 0)))
2255     return XEXP (note, 0);
2256 
2257   return NULL_RTX;
2258 }
2259 
2260 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2261    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2262 
2263 int
find_reg_fusage(const_rtx insn,enum rtx_code code,const_rtx datum)2264 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2265 {
2266   /* If it's not a CALL_INSN, it can't possibly have a
2267      CALL_INSN_FUNCTION_USAGE field, so don't bother checking.  */
2268   if (!CALL_P (insn))
2269     return 0;
2270 
2271   gcc_assert (datum);
2272 
2273   if (!REG_P (datum))
2274     {
2275       rtx link;
2276 
2277       for (link = CALL_INSN_FUNCTION_USAGE (insn);
2278 	   link;
2279 	   link = XEXP (link, 1))
2280 	if (GET_CODE (XEXP (link, 0)) == code
2281 	    && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2282 	  return 1;
2283     }
2284   else
2285     {
2286       unsigned int regno = REGNO (datum);
2287 
2288       /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2289 	 to pseudo registers, so don't bother checking.  */
2290 
2291       if (regno < FIRST_PSEUDO_REGISTER)
2292 	{
2293 	  unsigned int end_regno = END_REGNO (datum);
2294 	  unsigned int i;
2295 
2296 	  for (i = regno; i < end_regno; i++)
2297 	    if (find_regno_fusage (insn, code, i))
2298 	      return 1;
2299 	}
2300     }
2301 
2302   return 0;
2303 }
2304 
2305 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2306    in the CALL_INSN_FUNCTION_USAGE information of INSN.  */
2307 
2308 int
find_regno_fusage(const_rtx insn,enum rtx_code code,unsigned int regno)2309 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2310 {
2311   rtx link;
2312 
2313   /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2314      to pseudo registers, so don't bother checking.  */
2315 
2316   if (regno >= FIRST_PSEUDO_REGISTER
2317       || !CALL_P (insn) )
2318     return 0;
2319 
2320   for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2321     {
2322       rtx op, reg;
2323 
2324       if (GET_CODE (op = XEXP (link, 0)) == code
2325 	  && REG_P (reg = XEXP (op, 0))
2326 	  && REGNO (reg) <= regno
2327 	  && END_REGNO (reg) > regno)
2328 	return 1;
2329     }
2330 
2331   return 0;
2332 }
2333 
2334 
2335 /* Return true if KIND is an integer REG_NOTE.  */
2336 
2337 static bool
int_reg_note_p(enum reg_note kind)2338 int_reg_note_p (enum reg_note kind)
2339 {
2340   return kind == REG_BR_PROB;
2341 }
2342 
2343 /* Allocate a register note with kind KIND and datum DATUM.  LIST is
2344    stored as the pointer to the next register note.  */
2345 
2346 rtx
alloc_reg_note(enum reg_note kind,rtx datum,rtx list)2347 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2348 {
2349   rtx note;
2350 
2351   gcc_checking_assert (!int_reg_note_p (kind));
2352   switch (kind)
2353     {
2354     case REG_CC_SETTER:
2355     case REG_CC_USER:
2356     case REG_LABEL_TARGET:
2357     case REG_LABEL_OPERAND:
2358     case REG_TM:
2359       /* These types of register notes use an INSN_LIST rather than an
2360 	 EXPR_LIST, so that copying is done right and dumps look
2361 	 better.  */
2362       note = alloc_INSN_LIST (datum, list);
2363       PUT_REG_NOTE_KIND (note, kind);
2364       break;
2365 
2366     default:
2367       note = alloc_EXPR_LIST (kind, datum, list);
2368       break;
2369     }
2370 
2371   return note;
2372 }
2373 
2374 /* Add register note with kind KIND and datum DATUM to INSN.  */
2375 
2376 void
add_reg_note(rtx insn,enum reg_note kind,rtx datum)2377 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2378 {
2379   REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2380 }
2381 
2382 /* Add an integer register note with kind KIND and datum DATUM to INSN.  */
2383 
2384 void
add_int_reg_note(rtx_insn * insn,enum reg_note kind,int datum)2385 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2386 {
2387   gcc_checking_assert (int_reg_note_p (kind));
2388   REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2389 				       datum, REG_NOTES (insn));
2390 }
2391 
2392 /* Add a REG_ARGS_SIZE note to INSN with value VALUE.  */
2393 
2394 void
add_args_size_note(rtx_insn * insn,poly_int64 value)2395 add_args_size_note (rtx_insn *insn, poly_int64 value)
2396 {
2397   gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2398   add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2399 }
2400 
2401 /* Add a register note like NOTE to INSN.  */
2402 
2403 void
add_shallow_copy_of_reg_note(rtx_insn * insn,rtx note)2404 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2405 {
2406   if (GET_CODE (note) == INT_LIST)
2407     add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2408   else
2409     add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2410 }
2411 
2412 /* Duplicate NOTE and return the copy.  */
2413 rtx
duplicate_reg_note(rtx note)2414 duplicate_reg_note (rtx note)
2415 {
2416   reg_note kind = REG_NOTE_KIND (note);
2417 
2418   if (GET_CODE (note) == INT_LIST)
2419     return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2420   else if (GET_CODE (note) == EXPR_LIST)
2421     return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2422   else
2423     return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2424 }
2425 
2426 /* Remove register note NOTE from the REG_NOTES of INSN.  */
2427 
2428 void
remove_note(rtx_insn * insn,const_rtx note)2429 remove_note (rtx_insn *insn, const_rtx note)
2430 {
2431   rtx link;
2432 
2433   if (note == NULL_RTX)
2434     return;
2435 
2436   if (REG_NOTES (insn) == note)
2437     REG_NOTES (insn) = XEXP (note, 1);
2438   else
2439     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2440       if (XEXP (link, 1) == note)
2441 	{
2442 	  XEXP (link, 1) = XEXP (note, 1);
2443 	  break;
2444 	}
2445 
2446   switch (REG_NOTE_KIND (note))
2447     {
2448     case REG_EQUAL:
2449     case REG_EQUIV:
2450       df_notes_rescan (insn);
2451       break;
2452     default:
2453       break;
2454     }
2455 }
2456 
2457 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2458    Return true if any note has been removed.  */
2459 
2460 bool
remove_reg_equal_equiv_notes(rtx_insn * insn)2461 remove_reg_equal_equiv_notes (rtx_insn *insn)
2462 {
2463   rtx *loc;
2464   bool ret = false;
2465 
2466   loc = &REG_NOTES (insn);
2467   while (*loc)
2468     {
2469       enum reg_note kind = REG_NOTE_KIND (*loc);
2470       if (kind == REG_EQUAL || kind == REG_EQUIV)
2471 	{
2472 	  *loc = XEXP (*loc, 1);
2473 	  ret = true;
2474 	}
2475       else
2476 	loc = &XEXP (*loc, 1);
2477     }
2478   return ret;
2479 }
2480 
2481 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO.  */
2482 
2483 void
remove_reg_equal_equiv_notes_for_regno(unsigned int regno)2484 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2485 {
2486   df_ref eq_use;
2487 
2488   if (!df)
2489     return;
2490 
2491   /* This loop is a little tricky.  We cannot just go down the chain because
2492      it is being modified by some actions in the loop.  So we just iterate
2493      over the head.  We plan to drain the list anyway.  */
2494   while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2495     {
2496       rtx_insn *insn = DF_REF_INSN (eq_use);
2497       rtx note = find_reg_equal_equiv_note (insn);
2498 
2499       /* This assert is generally triggered when someone deletes a REG_EQUAL
2500 	 or REG_EQUIV note by hacking the list manually rather than calling
2501 	 remove_note.  */
2502       gcc_assert (note);
2503 
2504       remove_note (insn, note);
2505     }
2506 }
2507 
2508 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2509    return 1 if it is found.  A simple equality test is used to determine if
2510    NODE matches.  */
2511 
2512 bool
in_insn_list_p(const rtx_insn_list * listp,const rtx_insn * node)2513 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2514 {
2515   const_rtx x;
2516 
2517   for (x = listp; x; x = XEXP (x, 1))
2518     if (node == XEXP (x, 0))
2519       return true;
2520 
2521   return false;
2522 }
2523 
2524 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2525    remove that entry from the list if it is found.
2526 
2527    A simple equality test is used to determine if NODE matches.  */
2528 
2529 void
remove_node_from_expr_list(const_rtx node,rtx_expr_list ** listp)2530 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2531 {
2532   rtx_expr_list *temp = *listp;
2533   rtx_expr_list *prev = NULL;
2534 
2535   while (temp)
2536     {
2537       if (node == temp->element ())
2538 	{
2539 	  /* Splice the node out of the list.  */
2540 	  if (prev)
2541 	    XEXP (prev, 1) = temp->next ();
2542 	  else
2543 	    *listp = temp->next ();
2544 
2545 	  return;
2546 	}
2547 
2548       prev = temp;
2549       temp = temp->next ();
2550     }
2551 }
2552 
2553 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2554    remove that entry from the list if it is found.
2555 
2556    A simple equality test is used to determine if NODE matches.  */
2557 
2558 void
remove_node_from_insn_list(const rtx_insn * node,rtx_insn_list ** listp)2559 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2560 {
2561   rtx_insn_list *temp = *listp;
2562   rtx_insn_list *prev = NULL;
2563 
2564   while (temp)
2565     {
2566       if (node == temp->insn ())
2567 	{
2568 	  /* Splice the node out of the list.  */
2569 	  if (prev)
2570 	    XEXP (prev, 1) = temp->next ();
2571 	  else
2572 	    *listp = temp->next ();
2573 
2574 	  return;
2575 	}
2576 
2577       prev = temp;
2578       temp = temp->next ();
2579     }
2580 }
2581 
2582 /* Nonzero if X contains any volatile instructions.  These are instructions
2583    which may cause unpredictable machine state instructions, and thus no
2584    instructions or register uses should be moved or combined across them.
2585    This includes only volatile asms and UNSPEC_VOLATILE instructions.  */
2586 
2587 int
volatile_insn_p(const_rtx x)2588 volatile_insn_p (const_rtx x)
2589 {
2590   const RTX_CODE code = GET_CODE (x);
2591   switch (code)
2592     {
2593     case LABEL_REF:
2594     case SYMBOL_REF:
2595     case CONST:
2596     CASE_CONST_ANY:
2597     case CC0:
2598     case PC:
2599     case REG:
2600     case SCRATCH:
2601     case CLOBBER:
2602     case ADDR_VEC:
2603     case ADDR_DIFF_VEC:
2604     case CALL:
2605     case MEM:
2606       return 0;
2607 
2608     case UNSPEC_VOLATILE:
2609       return 1;
2610 
2611     case ASM_INPUT:
2612     case ASM_OPERANDS:
2613       if (MEM_VOLATILE_P (x))
2614 	return 1;
2615 
2616     default:
2617       break;
2618     }
2619 
2620   /* Recursively scan the operands of this expression.  */
2621 
2622   {
2623     const char *const fmt = GET_RTX_FORMAT (code);
2624     int i;
2625 
2626     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2627       {
2628 	if (fmt[i] == 'e')
2629 	  {
2630 	    if (volatile_insn_p (XEXP (x, i)))
2631 	      return 1;
2632 	  }
2633 	else if (fmt[i] == 'E')
2634 	  {
2635 	    int j;
2636 	    for (j = 0; j < XVECLEN (x, i); j++)
2637 	      if (volatile_insn_p (XVECEXP (x, i, j)))
2638 		return 1;
2639 	  }
2640       }
2641   }
2642   return 0;
2643 }
2644 
2645 /* Nonzero if X contains any volatile memory references
2646    UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions.  */
2647 
2648 int
volatile_refs_p(const_rtx x)2649 volatile_refs_p (const_rtx x)
2650 {
2651   const RTX_CODE code = GET_CODE (x);
2652   switch (code)
2653     {
2654     case LABEL_REF:
2655     case SYMBOL_REF:
2656     case CONST:
2657     CASE_CONST_ANY:
2658     case CC0:
2659     case PC:
2660     case REG:
2661     case SCRATCH:
2662     case CLOBBER:
2663     case ADDR_VEC:
2664     case ADDR_DIFF_VEC:
2665       return 0;
2666 
2667     case UNSPEC_VOLATILE:
2668       return 1;
2669 
2670     case MEM:
2671     case ASM_INPUT:
2672     case ASM_OPERANDS:
2673       if (MEM_VOLATILE_P (x))
2674 	return 1;
2675 
2676     default:
2677       break;
2678     }
2679 
2680   /* Recursively scan the operands of this expression.  */
2681 
2682   {
2683     const char *const fmt = GET_RTX_FORMAT (code);
2684     int i;
2685 
2686     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2687       {
2688 	if (fmt[i] == 'e')
2689 	  {
2690 	    if (volatile_refs_p (XEXP (x, i)))
2691 	      return 1;
2692 	  }
2693 	else if (fmt[i] == 'E')
2694 	  {
2695 	    int j;
2696 	    for (j = 0; j < XVECLEN (x, i); j++)
2697 	      if (volatile_refs_p (XVECEXP (x, i, j)))
2698 		return 1;
2699 	  }
2700       }
2701   }
2702   return 0;
2703 }
2704 
2705 /* Similar to above, except that it also rejects register pre- and post-
2706    incrementing.  */
2707 
2708 int
side_effects_p(const_rtx x)2709 side_effects_p (const_rtx x)
2710 {
2711   const RTX_CODE code = GET_CODE (x);
2712   switch (code)
2713     {
2714     case LABEL_REF:
2715     case SYMBOL_REF:
2716     case CONST:
2717     CASE_CONST_ANY:
2718     case CC0:
2719     case PC:
2720     case REG:
2721     case SCRATCH:
2722     case ADDR_VEC:
2723     case ADDR_DIFF_VEC:
2724     case VAR_LOCATION:
2725       return 0;
2726 
2727     case CLOBBER:
2728       /* Reject CLOBBER with a non-VOID mode.  These are made by combine.c
2729 	 when some combination can't be done.  If we see one, don't think
2730 	 that we can simplify the expression.  */
2731       return (GET_MODE (x) != VOIDmode);
2732 
2733     case PRE_INC:
2734     case PRE_DEC:
2735     case POST_INC:
2736     case POST_DEC:
2737     case PRE_MODIFY:
2738     case POST_MODIFY:
2739     case CALL:
2740     case UNSPEC_VOLATILE:
2741       return 1;
2742 
2743     case MEM:
2744     case ASM_INPUT:
2745     case ASM_OPERANDS:
2746       if (MEM_VOLATILE_P (x))
2747 	return 1;
2748 
2749     default:
2750       break;
2751     }
2752 
2753   /* Recursively scan the operands of this expression.  */
2754 
2755   {
2756     const char *fmt = GET_RTX_FORMAT (code);
2757     int i;
2758 
2759     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2760       {
2761 	if (fmt[i] == 'e')
2762 	  {
2763 	    if (side_effects_p (XEXP (x, i)))
2764 	      return 1;
2765 	  }
2766 	else if (fmt[i] == 'E')
2767 	  {
2768 	    int j;
2769 	    for (j = 0; j < XVECLEN (x, i); j++)
2770 	      if (side_effects_p (XVECEXP (x, i, j)))
2771 		return 1;
2772 	  }
2773       }
2774   }
2775   return 0;
2776 }
2777 
2778 /* Return nonzero if evaluating rtx X might cause a trap.
2779    FLAGS controls how to consider MEMs.  A nonzero means the context
2780    of the access may have changed from the original, such that the
2781    address may have become invalid.  */
2782 
2783 int
may_trap_p_1(const_rtx x,unsigned flags)2784 may_trap_p_1 (const_rtx x, unsigned flags)
2785 {
2786   int i;
2787   enum rtx_code code;
2788   const char *fmt;
2789 
2790   /* We make no distinction currently, but this function is part of
2791      the internal target-hooks ABI so we keep the parameter as
2792      "unsigned flags".  */
2793   bool code_changed = flags != 0;
2794 
2795   if (x == 0)
2796     return 0;
2797   code = GET_CODE (x);
2798   switch (code)
2799     {
2800       /* Handle these cases quickly.  */
2801     CASE_CONST_ANY:
2802     case SYMBOL_REF:
2803     case LABEL_REF:
2804     case CONST:
2805     case PC:
2806     case CC0:
2807     case REG:
2808     case SCRATCH:
2809       return 0;
2810 
2811     case UNSPEC:
2812       return targetm.unspec_may_trap_p (x, flags);
2813 
2814     case UNSPEC_VOLATILE:
2815     case ASM_INPUT:
2816     case TRAP_IF:
2817       return 1;
2818 
2819     case ASM_OPERANDS:
2820       return MEM_VOLATILE_P (x);
2821 
2822       /* Memory ref can trap unless it's a static var or a stack slot.  */
2823     case MEM:
2824       /* Recognize specific pattern of stack checking probes.  */
2825       if (flag_stack_check
2826 	  && MEM_VOLATILE_P (x)
2827 	  && XEXP (x, 0) == stack_pointer_rtx)
2828 	return 1;
2829       if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2830 	     reference; moving it out of context such as when moving code
2831 	     when optimizing, might cause its address to become invalid.  */
2832 	  code_changed
2833 	  || !MEM_NOTRAP_P (x))
2834 	{
2835 	  poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2836 	  return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2837 					GET_MODE (x), code_changed);
2838 	}
2839 
2840       return 0;
2841 
2842       /* Division by a non-constant might trap.  */
2843     case DIV:
2844     case MOD:
2845     case UDIV:
2846     case UMOD:
2847       if (HONOR_SNANS (x))
2848 	return 1;
2849       if (FLOAT_MODE_P (GET_MODE (x)))
2850 	return flag_trapping_math;
2851       if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2852 	return 1;
2853       if (GET_CODE (XEXP (x, 1)) == CONST_VECTOR)
2854 	{
2855 	  /* For CONST_VECTOR, return 1 if any element is or might be zero.  */
2856 	  unsigned int n_elts;
2857 	  rtx op = XEXP (x, 1);
2858 	  if (!GET_MODE_NUNITS (GET_MODE (op)).is_constant (&n_elts))
2859 	    {
2860 	      if (!CONST_VECTOR_DUPLICATE_P (op))
2861 		return 1;
2862 	      for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0); i++)
2863 		if (CONST_VECTOR_ENCODED_ELT (op, i) == const0_rtx)
2864 		  return 1;
2865 	    }
2866 	  else
2867 	    for (unsigned i = 0; i < n_elts; i++)
2868 	      if (CONST_VECTOR_ELT (op, i) == const0_rtx)
2869 		return 1;
2870 	}
2871       break;
2872 
2873     case EXPR_LIST:
2874       /* An EXPR_LIST is used to represent a function call.  This
2875 	 certainly may trap.  */
2876       return 1;
2877 
2878     case GE:
2879     case GT:
2880     case LE:
2881     case LT:
2882     case LTGT:
2883     case COMPARE:
2884       /* Some floating point comparisons may trap.  */
2885       if (!flag_trapping_math)
2886 	break;
2887       /* ??? There is no machine independent way to check for tests that trap
2888 	 when COMPARE is used, though many targets do make this distinction.
2889 	 For instance, sparc uses CCFPE for compares which generate exceptions
2890 	 and CCFP for compares which do not generate exceptions.  */
2891       if (HONOR_NANS (x))
2892 	return 1;
2893       /* But often the compare has some CC mode, so check operand
2894 	 modes as well.  */
2895       if (HONOR_NANS (XEXP (x, 0))
2896 	  || HONOR_NANS (XEXP (x, 1)))
2897 	return 1;
2898       break;
2899 
2900     case EQ:
2901     case NE:
2902       if (HONOR_SNANS (x))
2903 	return 1;
2904       /* Often comparison is CC mode, so check operand modes.  */
2905       if (HONOR_SNANS (XEXP (x, 0))
2906 	  || HONOR_SNANS (XEXP (x, 1)))
2907 	return 1;
2908       break;
2909 
2910     case FIX:
2911       /* Conversion of floating point might trap.  */
2912       if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2913 	return 1;
2914       break;
2915 
2916     case NEG:
2917     case ABS:
2918     case SUBREG:
2919     case VEC_MERGE:
2920     case VEC_SELECT:
2921     case VEC_CONCAT:
2922     case VEC_DUPLICATE:
2923       /* These operations don't trap even with floating point.  */
2924       break;
2925 
2926     default:
2927       /* Any floating arithmetic may trap.  */
2928       if (FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2929 	return 1;
2930     }
2931 
2932   fmt = GET_RTX_FORMAT (code);
2933   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2934     {
2935       if (fmt[i] == 'e')
2936 	{
2937 	  if (may_trap_p_1 (XEXP (x, i), flags))
2938 	    return 1;
2939 	}
2940       else if (fmt[i] == 'E')
2941 	{
2942 	  int j;
2943 	  for (j = 0; j < XVECLEN (x, i); j++)
2944 	    if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2945 	      return 1;
2946 	}
2947     }
2948   return 0;
2949 }
2950 
2951 /* Return nonzero if evaluating rtx X might cause a trap.  */
2952 
2953 int
may_trap_p(const_rtx x)2954 may_trap_p (const_rtx x)
2955 {
2956   return may_trap_p_1 (x, 0);
2957 }
2958 
2959 /* Same as above, but additionally return nonzero if evaluating rtx X might
2960    cause a fault.  We define a fault for the purpose of this function as a
2961    erroneous execution condition that cannot be encountered during the normal
2962    execution of a valid program; the typical example is an unaligned memory
2963    access on a strict alignment machine.  The compiler guarantees that it
2964    doesn't generate code that will fault from a valid program, but this
2965    guarantee doesn't mean anything for individual instructions.  Consider
2966    the following example:
2967 
2968       struct S { int d; union { char *cp; int *ip; }; };
2969 
2970       int foo(struct S *s)
2971       {
2972 	if (s->d == 1)
2973 	  return *s->ip;
2974 	else
2975 	  return *s->cp;
2976       }
2977 
2978    on a strict alignment machine.  In a valid program, foo will never be
2979    invoked on a structure for which d is equal to 1 and the underlying
2980    unique field of the union not aligned on a 4-byte boundary, but the
2981    expression *s->ip might cause a fault if considered individually.
2982 
2983    At the RTL level, potentially problematic expressions will almost always
2984    verify may_trap_p; for example, the above dereference can be emitted as
2985    (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2986    However, suppose that foo is inlined in a caller that causes s->cp to
2987    point to a local character variable and guarantees that s->d is not set
2988    to 1; foo may have been effectively translated into pseudo-RTL as:
2989 
2990       if ((reg:SI) == 1)
2991 	(set (reg:SI) (mem:SI (%fp - 7)))
2992       else
2993 	(set (reg:QI) (mem:QI (%fp - 7)))
2994 
2995    Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2996    memory reference to a stack slot, but it will certainly cause a fault
2997    on a strict alignment machine.  */
2998 
2999 int
may_trap_or_fault_p(const_rtx x)3000 may_trap_or_fault_p (const_rtx x)
3001 {
3002   return may_trap_p_1 (x, 1);
3003 }
3004 
3005 /* Return nonzero if X contains a comparison that is not either EQ or NE,
3006    i.e., an inequality.  */
3007 
3008 int
inequality_comparisons_p(const_rtx x)3009 inequality_comparisons_p (const_rtx x)
3010 {
3011   const char *fmt;
3012   int len, i;
3013   const enum rtx_code code = GET_CODE (x);
3014 
3015   switch (code)
3016     {
3017     case REG:
3018     case SCRATCH:
3019     case PC:
3020     case CC0:
3021     CASE_CONST_ANY:
3022     case CONST:
3023     case LABEL_REF:
3024     case SYMBOL_REF:
3025       return 0;
3026 
3027     case LT:
3028     case LTU:
3029     case GT:
3030     case GTU:
3031     case LE:
3032     case LEU:
3033     case GE:
3034     case GEU:
3035       return 1;
3036 
3037     default:
3038       break;
3039     }
3040 
3041   len = GET_RTX_LENGTH (code);
3042   fmt = GET_RTX_FORMAT (code);
3043 
3044   for (i = 0; i < len; i++)
3045     {
3046       if (fmt[i] == 'e')
3047 	{
3048 	  if (inequality_comparisons_p (XEXP (x, i)))
3049 	    return 1;
3050 	}
3051       else if (fmt[i] == 'E')
3052 	{
3053 	  int j;
3054 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3055 	    if (inequality_comparisons_p (XVECEXP (x, i, j)))
3056 	      return 1;
3057 	}
3058     }
3059 
3060   return 0;
3061 }
3062 
3063 /* Replace any occurrence of FROM in X with TO.  The function does
3064    not enter into CONST_DOUBLE for the replace.
3065 
3066    Note that copying is not done so X must not be shared unless all copies
3067    are to be modified.
3068 
3069    ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3070    those pointer-equal ones.  */
3071 
3072 rtx
replace_rtx(rtx x,rtx from,rtx to,bool all_regs)3073 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3074 {
3075   int i, j;
3076   const char *fmt;
3077 
3078   if (x == from)
3079     return to;
3080 
3081   /* Allow this function to make replacements in EXPR_LISTs.  */
3082   if (x == 0)
3083     return 0;
3084 
3085   if (all_regs
3086       && REG_P (x)
3087       && REG_P (from)
3088       && REGNO (x) == REGNO (from))
3089     {
3090       gcc_assert (GET_MODE (x) == GET_MODE (from));
3091       return to;
3092     }
3093   else if (GET_CODE (x) == SUBREG)
3094     {
3095       rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3096 
3097       if (CONST_INT_P (new_rtx))
3098 	{
3099 	  x = simplify_subreg (GET_MODE (x), new_rtx,
3100 			       GET_MODE (SUBREG_REG (x)),
3101 			       SUBREG_BYTE (x));
3102 	  gcc_assert (x);
3103 	}
3104       else
3105 	SUBREG_REG (x) = new_rtx;
3106 
3107       return x;
3108     }
3109   else if (GET_CODE (x) == ZERO_EXTEND)
3110     {
3111       rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3112 
3113       if (CONST_INT_P (new_rtx))
3114 	{
3115 	  x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3116 					new_rtx, GET_MODE (XEXP (x, 0)));
3117 	  gcc_assert (x);
3118 	}
3119       else
3120 	XEXP (x, 0) = new_rtx;
3121 
3122       return x;
3123     }
3124 
3125   fmt = GET_RTX_FORMAT (GET_CODE (x));
3126   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3127     {
3128       if (fmt[i] == 'e')
3129 	XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3130       else if (fmt[i] == 'E')
3131 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3132 	  XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3133 					   from, to, all_regs);
3134     }
3135 
3136   return x;
3137 }
3138 
3139 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL.  Also track
3140    the change in LABEL_NUSES if UPDATE_LABEL_NUSES.  */
3141 
3142 void
replace_label(rtx * loc,rtx old_label,rtx new_label,bool update_label_nuses)3143 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3144 {
3145   /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long.  */
3146   rtx x = *loc;
3147   if (JUMP_TABLE_DATA_P (x))
3148     {
3149       x = PATTERN (x);
3150       rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3151       int len = GET_NUM_ELEM (vec);
3152       for (int i = 0; i < len; ++i)
3153 	{
3154 	  rtx ref = RTVEC_ELT (vec, i);
3155 	  if (XEXP (ref, 0) == old_label)
3156 	    {
3157 	      XEXP (ref, 0) = new_label;
3158 	      if (update_label_nuses)
3159 		{
3160 		  ++LABEL_NUSES (new_label);
3161 		  --LABEL_NUSES (old_label);
3162 		}
3163 	    }
3164 	}
3165       return;
3166     }
3167 
3168   /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3169      field.  This is not handled by the iterator because it doesn't
3170      handle unprinted ('0') fields.  */
3171   if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3172     JUMP_LABEL (x) = new_label;
3173 
3174   subrtx_ptr_iterator::array_type array;
3175   FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3176     {
3177       rtx *loc = *iter;
3178       if (rtx x = *loc)
3179 	{
3180 	  if (GET_CODE (x) == SYMBOL_REF
3181 	      && CONSTANT_POOL_ADDRESS_P (x))
3182 	    {
3183 	      rtx c = get_pool_constant (x);
3184 	      if (rtx_referenced_p (old_label, c))
3185 		{
3186 		  /* Create a copy of constant C; replace the label inside
3187 		     but do not update LABEL_NUSES because uses in constant pool
3188 		     are not counted.  */
3189 		  rtx new_c = copy_rtx (c);
3190 		  replace_label (&new_c, old_label, new_label, false);
3191 
3192 		  /* Add the new constant NEW_C to constant pool and replace
3193 		     the old reference to constant by new reference.  */
3194 		  rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3195 		  *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3196 		}
3197 	    }
3198 
3199 	  if ((GET_CODE (x) == LABEL_REF
3200 	       || GET_CODE (x) == INSN_LIST)
3201 	      && XEXP (x, 0) == old_label)
3202 	    {
3203 	      XEXP (x, 0) = new_label;
3204 	      if (update_label_nuses)
3205 		{
3206 		  ++LABEL_NUSES (new_label);
3207 		  --LABEL_NUSES (old_label);
3208 		}
3209 	    }
3210 	}
3211     }
3212 }
3213 
3214 void
replace_label_in_insn(rtx_insn * insn,rtx_insn * old_label,rtx_insn * new_label,bool update_label_nuses)3215 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3216 		       rtx_insn *new_label, bool update_label_nuses)
3217 {
3218   rtx insn_as_rtx = insn;
3219   replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3220   gcc_checking_assert (insn_as_rtx == insn);
3221 }
3222 
3223 /* Return true if X is referenced in BODY.  */
3224 
3225 bool
rtx_referenced_p(const_rtx x,const_rtx body)3226 rtx_referenced_p (const_rtx x, const_rtx body)
3227 {
3228   subrtx_iterator::array_type array;
3229   FOR_EACH_SUBRTX (iter, array, body, ALL)
3230     if (const_rtx y = *iter)
3231       {
3232 	/* Check if a label_ref Y refers to label X.  */
3233 	if (GET_CODE (y) == LABEL_REF
3234 	    && LABEL_P (x)
3235 	    && label_ref_label (y) == x)
3236 	  return true;
3237 
3238 	if (rtx_equal_p (x, y))
3239 	  return true;
3240 
3241 	/* If Y is a reference to pool constant traverse the constant.  */
3242 	if (GET_CODE (y) == SYMBOL_REF
3243 	    && CONSTANT_POOL_ADDRESS_P (y))
3244 	  iter.substitute (get_pool_constant (y));
3245       }
3246   return false;
3247 }
3248 
3249 /* If INSN is a tablejump return true and store the label (before jump table) to
3250    *LABELP and the jump table to *TABLEP.  LABELP and TABLEP may be NULL.  */
3251 
3252 bool
tablejump_p(const rtx_insn * insn,rtx_insn ** labelp,rtx_jump_table_data ** tablep)3253 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3254 	     rtx_jump_table_data **tablep)
3255 {
3256   if (!JUMP_P (insn))
3257     return false;
3258 
3259   rtx target = JUMP_LABEL (insn);
3260   if (target == NULL_RTX || ANY_RETURN_P (target))
3261     return false;
3262 
3263   rtx_insn *label = as_a<rtx_insn *> (target);
3264   rtx_insn *table = next_insn (label);
3265   if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3266     return false;
3267 
3268   if (labelp)
3269     *labelp = label;
3270   if (tablep)
3271     *tablep = as_a <rtx_jump_table_data *> (table);
3272   return true;
3273 }
3274 
3275 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3276    constant that is not in the constant pool and not in the condition
3277    of an IF_THEN_ELSE.  */
3278 
3279 static int
computed_jump_p_1(const_rtx x)3280 computed_jump_p_1 (const_rtx x)
3281 {
3282   const enum rtx_code code = GET_CODE (x);
3283   int i, j;
3284   const char *fmt;
3285 
3286   switch (code)
3287     {
3288     case LABEL_REF:
3289     case PC:
3290       return 0;
3291 
3292     case CONST:
3293     CASE_CONST_ANY:
3294     case SYMBOL_REF:
3295     case REG:
3296       return 1;
3297 
3298     case MEM:
3299       return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3300 		&& CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3301 
3302     case IF_THEN_ELSE:
3303       return (computed_jump_p_1 (XEXP (x, 1))
3304 	      || computed_jump_p_1 (XEXP (x, 2)));
3305 
3306     default:
3307       break;
3308     }
3309 
3310   fmt = GET_RTX_FORMAT (code);
3311   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3312     {
3313       if (fmt[i] == 'e'
3314 	  && computed_jump_p_1 (XEXP (x, i)))
3315 	return 1;
3316 
3317       else if (fmt[i] == 'E')
3318 	for (j = 0; j < XVECLEN (x, i); j++)
3319 	  if (computed_jump_p_1 (XVECEXP (x, i, j)))
3320 	    return 1;
3321     }
3322 
3323   return 0;
3324 }
3325 
3326 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3327 
3328    Tablejumps and casesi insns are not considered indirect jumps;
3329    we can recognize them by a (use (label_ref)).  */
3330 
3331 int
computed_jump_p(const rtx_insn * insn)3332 computed_jump_p (const rtx_insn *insn)
3333 {
3334   int i;
3335   if (JUMP_P (insn))
3336     {
3337       rtx pat = PATTERN (insn);
3338 
3339       /* If we have a JUMP_LABEL set, we're not a computed jump.  */
3340       if (JUMP_LABEL (insn) != NULL)
3341 	return 0;
3342 
3343       if (GET_CODE (pat) == PARALLEL)
3344 	{
3345 	  int len = XVECLEN (pat, 0);
3346 	  int has_use_labelref = 0;
3347 
3348 	  for (i = len - 1; i >= 0; i--)
3349 	    if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3350 		&& (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3351 		    == LABEL_REF))
3352 	      {
3353 	        has_use_labelref = 1;
3354 	        break;
3355 	      }
3356 
3357 	  if (! has_use_labelref)
3358 	    for (i = len - 1; i >= 0; i--)
3359 	      if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3360 		  && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3361 		  && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3362 		return 1;
3363 	}
3364       else if (GET_CODE (pat) == SET
3365 	       && SET_DEST (pat) == pc_rtx
3366 	       && computed_jump_p_1 (SET_SRC (pat)))
3367 	return 1;
3368     }
3369   return 0;
3370 }
3371 
3372 
3373 
3374 /* MEM has a PRE/POST-INC/DEC/MODIFY address X.  Extract the operands of
3375    the equivalent add insn and pass the result to FN, using DATA as the
3376    final argument.  */
3377 
3378 static int
for_each_inc_dec_find_inc_dec(rtx mem,for_each_inc_dec_fn fn,void * data)3379 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3380 {
3381   rtx x = XEXP (mem, 0);
3382   switch (GET_CODE (x))
3383     {
3384     case PRE_INC:
3385     case POST_INC:
3386       {
3387 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3388 	rtx r1 = XEXP (x, 0);
3389 	rtx c = gen_int_mode (size, GET_MODE (r1));
3390 	return fn (mem, x, r1, r1, c, data);
3391       }
3392 
3393     case PRE_DEC:
3394     case POST_DEC:
3395       {
3396 	poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3397 	rtx r1 = XEXP (x, 0);
3398 	rtx c = gen_int_mode (-size, GET_MODE (r1));
3399 	return fn (mem, x, r1, r1, c, data);
3400       }
3401 
3402     case PRE_MODIFY:
3403     case POST_MODIFY:
3404       {
3405 	rtx r1 = XEXP (x, 0);
3406 	rtx add = XEXP (x, 1);
3407 	return fn (mem, x, r1, add, NULL, data);
3408       }
3409 
3410     default:
3411       gcc_unreachable ();
3412     }
3413 }
3414 
3415 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3416    For each such autoinc operation found, call FN, passing it
3417    the innermost enclosing MEM, the operation itself, the RTX modified
3418    by the operation, two RTXs (the second may be NULL) that, once
3419    added, represent the value to be held by the modified RTX
3420    afterwards, and DATA.  FN is to return 0 to continue the
3421    traversal or any other value to have it returned to the caller of
3422    for_each_inc_dec.  */
3423 
3424 int
for_each_inc_dec(rtx x,for_each_inc_dec_fn fn,void * data)3425 for_each_inc_dec (rtx x,
3426 		  for_each_inc_dec_fn fn,
3427 		  void *data)
3428 {
3429   subrtx_var_iterator::array_type array;
3430   FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3431     {
3432       rtx mem = *iter;
3433       if (mem
3434 	  && MEM_P (mem)
3435 	  && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3436 	{
3437 	  int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3438 	  if (res != 0)
3439 	    return res;
3440 	  iter.skip_subrtxes ();
3441 	}
3442     }
3443   return 0;
3444 }
3445 
3446 
3447 /* Searches X for any reference to REGNO, returning the rtx of the
3448    reference found if any.  Otherwise, returns NULL_RTX.  */
3449 
3450 rtx
regno_use_in(unsigned int regno,rtx x)3451 regno_use_in (unsigned int regno, rtx x)
3452 {
3453   const char *fmt;
3454   int i, j;
3455   rtx tem;
3456 
3457   if (REG_P (x) && REGNO (x) == regno)
3458     return x;
3459 
3460   fmt = GET_RTX_FORMAT (GET_CODE (x));
3461   for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3462     {
3463       if (fmt[i] == 'e')
3464 	{
3465 	  if ((tem = regno_use_in (regno, XEXP (x, i))))
3466 	    return tem;
3467 	}
3468       else if (fmt[i] == 'E')
3469 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3470 	  if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3471 	    return tem;
3472     }
3473 
3474   return NULL_RTX;
3475 }
3476 
3477 /* Return a value indicating whether OP, an operand of a commutative
3478    operation, is preferred as the first or second operand.  The more
3479    positive the value, the stronger the preference for being the first
3480    operand.  */
3481 
3482 int
commutative_operand_precedence(rtx op)3483 commutative_operand_precedence (rtx op)
3484 {
3485   enum rtx_code code = GET_CODE (op);
3486 
3487   /* Constants always become the second operand.  Prefer "nice" constants.  */
3488   if (code == CONST_INT)
3489     return -10;
3490   if (code == CONST_WIDE_INT)
3491     return -9;
3492   if (code == CONST_POLY_INT)
3493     return -8;
3494   if (code == CONST_DOUBLE)
3495     return -8;
3496   if (code == CONST_FIXED)
3497     return -8;
3498   op = avoid_constant_pool_reference (op);
3499   code = GET_CODE (op);
3500 
3501   switch (GET_RTX_CLASS (code))
3502     {
3503     case RTX_CONST_OBJ:
3504       if (code == CONST_INT)
3505 	return -7;
3506       if (code == CONST_WIDE_INT)
3507 	return -6;
3508       if (code == CONST_POLY_INT)
3509 	return -5;
3510       if (code == CONST_DOUBLE)
3511 	return -5;
3512       if (code == CONST_FIXED)
3513 	return -5;
3514       return -4;
3515 
3516     case RTX_EXTRA:
3517       /* SUBREGs of objects should come second.  */
3518       if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3519         return -3;
3520       return 0;
3521 
3522     case RTX_OBJ:
3523       /* Complex expressions should be the first, so decrease priority
3524          of objects.  Prefer pointer objects over non pointer objects.  */
3525       if ((REG_P (op) && REG_POINTER (op))
3526 	  || (MEM_P (op) && MEM_POINTER (op)))
3527 	return -1;
3528       return -2;
3529 
3530     case RTX_COMM_ARITH:
3531       /* Prefer operands that are themselves commutative to be first.
3532          This helps to make things linear.  In particular,
3533          (and (and (reg) (reg)) (not (reg))) is canonical.  */
3534       return 4;
3535 
3536     case RTX_BIN_ARITH:
3537       /* If only one operand is a binary expression, it will be the first
3538          operand.  In particular,  (plus (minus (reg) (reg)) (neg (reg)))
3539          is canonical, although it will usually be further simplified.  */
3540       return 2;
3541 
3542     case RTX_UNARY:
3543       /* Then prefer NEG and NOT.  */
3544       if (code == NEG || code == NOT)
3545         return 1;
3546       /* FALLTHRU */
3547 
3548     default:
3549       return 0;
3550     }
3551 }
3552 
3553 /* Return 1 iff it is necessary to swap operands of commutative operation
3554    in order to canonicalize expression.  */
3555 
3556 bool
swap_commutative_operands_p(rtx x,rtx y)3557 swap_commutative_operands_p (rtx x, rtx y)
3558 {
3559   return (commutative_operand_precedence (x)
3560 	  < commutative_operand_precedence (y));
3561 }
3562 
3563 /* Return 1 if X is an autoincrement side effect and the register is
3564    not the stack pointer.  */
3565 int
auto_inc_p(const_rtx x)3566 auto_inc_p (const_rtx x)
3567 {
3568   switch (GET_CODE (x))
3569     {
3570     case PRE_INC:
3571     case POST_INC:
3572     case PRE_DEC:
3573     case POST_DEC:
3574     case PRE_MODIFY:
3575     case POST_MODIFY:
3576       /* There are no REG_INC notes for SP.  */
3577       if (XEXP (x, 0) != stack_pointer_rtx)
3578 	return 1;
3579     default:
3580       break;
3581     }
3582   return 0;
3583 }
3584 
3585 /* Return nonzero if IN contains a piece of rtl that has the address LOC.  */
3586 int
loc_mentioned_in_p(rtx * loc,const_rtx in)3587 loc_mentioned_in_p (rtx *loc, const_rtx in)
3588 {
3589   enum rtx_code code;
3590   const char *fmt;
3591   int i, j;
3592 
3593   if (!in)
3594     return 0;
3595 
3596   code = GET_CODE (in);
3597   fmt = GET_RTX_FORMAT (code);
3598   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3599     {
3600       if (fmt[i] == 'e')
3601 	{
3602 	  if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3603 	    return 1;
3604 	}
3605       else if (fmt[i] == 'E')
3606 	for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3607 	  if (loc == &XVECEXP (in, i, j)
3608 	      || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3609 	    return 1;
3610     }
3611   return 0;
3612 }
3613 
3614 /* Helper function for subreg_lsb.  Given a subreg's OUTER_MODE, INNER_MODE,
3615    and SUBREG_BYTE, return the bit offset where the subreg begins
3616    (counting from the least significant bit of the operand).  */
3617 
3618 poly_uint64
subreg_lsb_1(machine_mode outer_mode,machine_mode inner_mode,poly_uint64 subreg_byte)3619 subreg_lsb_1 (machine_mode outer_mode,
3620 	      machine_mode inner_mode,
3621 	      poly_uint64 subreg_byte)
3622 {
3623   poly_uint64 subreg_end, trailing_bytes, byte_pos;
3624 
3625   /* A paradoxical subreg begins at bit position 0.  */
3626   if (paradoxical_subreg_p (outer_mode, inner_mode))
3627     return 0;
3628 
3629   subreg_end = subreg_byte + GET_MODE_SIZE (outer_mode);
3630   trailing_bytes = GET_MODE_SIZE (inner_mode) - subreg_end;
3631   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3632     byte_pos = trailing_bytes;
3633   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3634     byte_pos = subreg_byte;
3635   else
3636     {
3637       /* When bytes and words have opposite endianness, we must be able
3638 	 to split offsets into words and bytes at compile time.  */
3639       poly_uint64 leading_word_part
3640 	= force_align_down (subreg_byte, UNITS_PER_WORD);
3641       poly_uint64 trailing_word_part
3642 	= force_align_down (trailing_bytes, UNITS_PER_WORD);
3643       /* If the subreg crosses a word boundary ensure that
3644 	 it also begins and ends on a word boundary.  */
3645       gcc_assert (known_le (subreg_end - leading_word_part,
3646 			    (unsigned int) UNITS_PER_WORD)
3647 		  || (known_eq (leading_word_part, subreg_byte)
3648 		      && known_eq (trailing_word_part, trailing_bytes)));
3649       if (WORDS_BIG_ENDIAN)
3650 	byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3651       else
3652 	byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3653     }
3654 
3655   return byte_pos * BITS_PER_UNIT;
3656 }
3657 
3658 /* Given a subreg X, return the bit offset where the subreg begins
3659    (counting from the least significant bit of the reg).  */
3660 
3661 poly_uint64
subreg_lsb(const_rtx x)3662 subreg_lsb (const_rtx x)
3663 {
3664   return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3665 		       SUBREG_BYTE (x));
3666 }
3667 
3668 /* Return the subreg byte offset for a subreg whose outer value has
3669    OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3670    there are LSB_SHIFT *bits* between the lsb of the outer value and the
3671    lsb of the inner value.  This is the inverse of the calculation
3672    performed by subreg_lsb_1 (which converts byte offsets to bit shifts).  */
3673 
3674 poly_uint64
subreg_size_offset_from_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 lsb_shift)3675 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3676 			     poly_uint64 lsb_shift)
3677 {
3678   /* A paradoxical subreg begins at bit position 0.  */
3679   gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3680   if (maybe_gt (outer_bytes, inner_bytes))
3681     {
3682       gcc_checking_assert (known_eq (lsb_shift, 0U));
3683       return 0;
3684     }
3685 
3686   poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3687   poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3688   if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3689     return upper_bytes;
3690   else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3691     return lower_bytes;
3692   else
3693     {
3694       /* When bytes and words have opposite endianness, we must be able
3695 	 to split offsets into words and bytes at compile time.  */
3696       poly_uint64 lower_word_part = force_align_down (lower_bytes,
3697 						      UNITS_PER_WORD);
3698       poly_uint64 upper_word_part = force_align_down (upper_bytes,
3699 						      UNITS_PER_WORD);
3700       if (WORDS_BIG_ENDIAN)
3701 	return upper_word_part + (lower_bytes - lower_word_part);
3702       else
3703 	return lower_word_part + (upper_bytes - upper_word_part);
3704     }
3705 }
3706 
3707 /* Fill in information about a subreg of a hard register.
3708    xregno - A regno of an inner hard subreg_reg (or what will become one).
3709    xmode  - The mode of xregno.
3710    offset - The byte offset.
3711    ymode  - The mode of a top level SUBREG (or what may become one).
3712    info   - Pointer to structure to fill in.
3713 
3714    Rather than considering one particular inner register (and thus one
3715    particular "outer" register) in isolation, this function really uses
3716    XREGNO as a model for a sequence of isomorphic hard registers.  Thus the
3717    function does not check whether adding INFO->offset to XREGNO gives
3718    a valid hard register; even if INFO->offset + XREGNO is out of range,
3719    there might be another register of the same type that is in range.
3720    Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3721    the new register, since that can depend on things like whether the final
3722    register number is even or odd.  Callers that want to check whether
3723    this particular subreg can be replaced by a simple (reg ...) should
3724    use simplify_subreg_regno.  */
3725 
3726 void
subreg_get_info(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode,struct subreg_info * info)3727 subreg_get_info (unsigned int xregno, machine_mode xmode,
3728 		 poly_uint64 offset, machine_mode ymode,
3729 		 struct subreg_info *info)
3730 {
3731   unsigned int nregs_xmode, nregs_ymode;
3732 
3733   gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3734 
3735   poly_uint64 xsize = GET_MODE_SIZE (xmode);
3736   poly_uint64 ysize = GET_MODE_SIZE (ymode);
3737 
3738   bool rknown = false;
3739 
3740   /* If the register representation of a non-scalar mode has holes in it,
3741      we expect the scalar units to be concatenated together, with the holes
3742      distributed evenly among the scalar units.  Each scalar unit must occupy
3743      at least one register.  */
3744   if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3745     {
3746       /* As a consequence, we must be dealing with a constant number of
3747 	 scalars, and thus a constant offset and number of units.  */
3748       HOST_WIDE_INT coffset = offset.to_constant ();
3749       HOST_WIDE_INT cysize = ysize.to_constant ();
3750       nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3751       unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3752       scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3753       gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3754       gcc_assert (nregs_xmode
3755 		  == (nunits
3756 		      * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3757       gcc_assert (hard_regno_nregs (xregno, xmode)
3758 		  == hard_regno_nregs (xregno, xmode_unit) * nunits);
3759 
3760       /* You can only ask for a SUBREG of a value with holes in the middle
3761 	 if you don't cross the holes.  (Such a SUBREG should be done by
3762 	 picking a different register class, or doing it in memory if
3763 	 necessary.)  An example of a value with holes is XCmode on 32-bit
3764 	 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3765 	 3 for each part, but in memory it's two 128-bit parts.
3766 	 Padding is assumed to be at the end (not necessarily the 'high part')
3767 	 of each unit.  */
3768       if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3769 	  && (coffset / GET_MODE_SIZE (xmode_unit)
3770 	      != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3771 	{
3772 	  info->representable_p = false;
3773 	  rknown = true;
3774 	}
3775     }
3776   else
3777     nregs_xmode = hard_regno_nregs (xregno, xmode);
3778 
3779   nregs_ymode = hard_regno_nregs (xregno, ymode);
3780 
3781   /* Subreg sizes must be ordered, so that we can tell whether they are
3782      partial, paradoxical or complete.  */
3783   gcc_checking_assert (ordered_p (xsize, ysize));
3784 
3785   /* Paradoxical subregs are otherwise valid.  */
3786   if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3787     {
3788       info->representable_p = true;
3789       /* If this is a big endian paradoxical subreg, which uses more
3790 	 actual hard registers than the original register, we must
3791 	 return a negative offset so that we find the proper highpart
3792 	 of the register.
3793 
3794 	 We assume that the ordering of registers within a multi-register
3795 	 value has a consistent endianness: if bytes and register words
3796 	 have different endianness, the hard registers that make up a
3797 	 multi-register value must be at least word-sized.  */
3798       if (REG_WORDS_BIG_ENDIAN)
3799 	info->offset = (int) nregs_xmode - (int) nregs_ymode;
3800       else
3801 	info->offset = 0;
3802       info->nregs = nregs_ymode;
3803       return;
3804     }
3805 
3806   /* If registers store different numbers of bits in the different
3807      modes, we cannot generally form this subreg.  */
3808   poly_uint64 regsize_xmode, regsize_ymode;
3809   if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3810       && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3811       && multiple_p (xsize, nregs_xmode, &regsize_xmode)
3812       && multiple_p (ysize, nregs_ymode, &regsize_ymode))
3813     {
3814       if (!rknown
3815 	  && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3816 	      || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3817 	{
3818 	  info->representable_p = false;
3819 	  if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3820 	      || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3821 	    /* Checked by validate_subreg.  We must know at compile time
3822 	       which inner registers are being accessed.  */
3823 	    gcc_unreachable ();
3824 	  return;
3825 	}
3826       /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3827 	 would go outside of XMODE.  */
3828       if (!rknown && maybe_gt (ysize + offset, xsize))
3829 	{
3830 	  info->representable_p = false;
3831 	  info->nregs = nregs_ymode;
3832 	  if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3833 	    /* Checked by validate_subreg.  We must know at compile time
3834 	       which inner registers are being accessed.  */
3835 	    gcc_unreachable ();
3836 	  return;
3837 	}
3838       /* Quick exit for the simple and common case of extracting whole
3839 	 subregisters from a multiregister value.  */
3840       /* ??? It would be better to integrate this into the code below,
3841 	 if we can generalize the concept enough and figure out how
3842 	 odd-sized modes can coexist with the other weird cases we support.  */
3843       HOST_WIDE_INT count;
3844       if (!rknown
3845 	  && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3846 	  && known_eq (regsize_xmode, regsize_ymode)
3847 	  && constant_multiple_p (offset, regsize_ymode, &count))
3848 	{
3849 	  info->representable_p = true;
3850 	  info->nregs = nregs_ymode;
3851 	  info->offset = count;
3852 	  gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3853 	  return;
3854 	}
3855     }
3856 
3857   /* Lowpart subregs are otherwise valid.  */
3858   if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3859     {
3860       info->representable_p = true;
3861       rknown = true;
3862 
3863       if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3864 	{
3865 	  info->offset = 0;
3866 	  info->nregs = nregs_ymode;
3867 	  return;
3868 	}
3869     }
3870 
3871   /* Set NUM_BLOCKS to the number of independently-representable YMODE
3872      values there are in (reg:XMODE XREGNO).  We can view the register
3873      as consisting of this number of independent "blocks", where each
3874      block occupies NREGS_YMODE registers and contains exactly one
3875      representable YMODE value.  */
3876   gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3877   unsigned int num_blocks = nregs_xmode / nregs_ymode;
3878 
3879   /* Calculate the number of bytes in each block.  This must always
3880      be exact, otherwise we don't know how to verify the constraint.
3881      These conditions may be relaxed but subreg_regno_offset would
3882      need to be redesigned.  */
3883   poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3884 
3885   /* Get the number of the first block that contains the subreg and the byte
3886      offset of the subreg from the start of that block.  */
3887   unsigned int block_number;
3888   poly_uint64 subblock_offset;
3889   if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3890 			&subblock_offset))
3891     /* Checked by validate_subreg.  We must know at compile time which
3892        inner registers are being accessed.  */
3893     gcc_unreachable ();
3894 
3895   if (!rknown)
3896     {
3897       /* Only the lowpart of each block is representable.  */
3898       info->representable_p
3899 	= known_eq (subblock_offset,
3900 		    subreg_size_lowpart_offset (ysize, bytes_per_block));
3901       rknown = true;
3902     }
3903 
3904   /* We assume that the ordering of registers within a multi-register
3905      value has a consistent endianness: if bytes and register words
3906      have different endianness, the hard registers that make up a
3907      multi-register value must be at least word-sized.  */
3908   if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3909     /* The block number we calculated above followed memory endianness.
3910        Convert it to register endianness by counting back from the end.
3911        (Note that, because of the assumption above, each block must be
3912        at least word-sized.)  */
3913     info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3914   else
3915     info->offset = block_number * nregs_ymode;
3916   info->nregs = nregs_ymode;
3917 }
3918 
3919 /* This function returns the regno offset of a subreg expression.
3920    xregno - A regno of an inner hard subreg_reg (or what will become one).
3921    xmode  - The mode of xregno.
3922    offset - The byte offset.
3923    ymode  - The mode of a top level SUBREG (or what may become one).
3924    RETURN - The regno offset which would be used.  */
3925 unsigned int
subreg_regno_offset(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3926 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3927 		     poly_uint64 offset, machine_mode ymode)
3928 {
3929   struct subreg_info info;
3930   subreg_get_info (xregno, xmode, offset, ymode, &info);
3931   return info.offset;
3932 }
3933 
3934 /* This function returns true when the offset is representable via
3935    subreg_offset in the given regno.
3936    xregno - A regno of an inner hard subreg_reg (or what will become one).
3937    xmode  - The mode of xregno.
3938    offset - The byte offset.
3939    ymode  - The mode of a top level SUBREG (or what may become one).
3940    RETURN - Whether the offset is representable.  */
3941 bool
subreg_offset_representable_p(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3942 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3943 			       poly_uint64 offset, machine_mode ymode)
3944 {
3945   struct subreg_info info;
3946   subreg_get_info (xregno, xmode, offset, ymode, &info);
3947   return info.representable_p;
3948 }
3949 
3950 /* Return the number of a YMODE register to which
3951 
3952        (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3953 
3954    can be simplified.  Return -1 if the subreg can't be simplified.
3955 
3956    XREGNO is a hard register number.  */
3957 
3958 int
simplify_subreg_regno(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3959 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3960 		       poly_uint64 offset, machine_mode ymode)
3961 {
3962   struct subreg_info info;
3963   unsigned int yregno;
3964 
3965   /* Give the backend a chance to disallow the mode change.  */
3966   if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3967       && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3968       && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
3969       /* We can use mode change in LRA for some transformations.  */
3970       && ! lra_in_progress)
3971     return -1;
3972 
3973   /* We shouldn't simplify stack-related registers.  */
3974   if ((!reload_completed || frame_pointer_needed)
3975       && xregno == FRAME_POINTER_REGNUM)
3976     return -1;
3977 
3978   if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3979       && xregno == ARG_POINTER_REGNUM)
3980     return -1;
3981 
3982   if (xregno == STACK_POINTER_REGNUM
3983       /* We should convert hard stack register in LRA if it is
3984 	 possible.  */
3985       && ! lra_in_progress)
3986     return -1;
3987 
3988   /* Try to get the register offset.  */
3989   subreg_get_info (xregno, xmode, offset, ymode, &info);
3990   if (!info.representable_p)
3991     return -1;
3992 
3993   /* Make sure that the offsetted register value is in range.  */
3994   yregno = xregno + info.offset;
3995   if (!HARD_REGISTER_NUM_P (yregno))
3996     return -1;
3997 
3998   /* See whether (reg:YMODE YREGNO) is valid.
3999 
4000      ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4001      This is a kludge to work around how complex FP arguments are passed
4002      on IA-64 and should be fixed.  See PR target/49226.  */
4003   if (!targetm.hard_regno_mode_ok (yregno, ymode)
4004       && targetm.hard_regno_mode_ok (xregno, xmode))
4005     return -1;
4006 
4007   return (int) yregno;
4008 }
4009 
4010 /* Return the final regno that a subreg expression refers to.  */
4011 unsigned int
subreg_regno(const_rtx x)4012 subreg_regno (const_rtx x)
4013 {
4014   unsigned int ret;
4015   rtx subreg = SUBREG_REG (x);
4016   int regno = REGNO (subreg);
4017 
4018   ret = regno + subreg_regno_offset (regno,
4019 				     GET_MODE (subreg),
4020 				     SUBREG_BYTE (x),
4021 				     GET_MODE (x));
4022   return ret;
4023 
4024 }
4025 
4026 /* Return the number of registers that a subreg expression refers
4027    to.  */
4028 unsigned int
subreg_nregs(const_rtx x)4029 subreg_nregs (const_rtx x)
4030 {
4031   return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
4032 }
4033 
4034 /* Return the number of registers that a subreg REG with REGNO
4035    expression refers to.  This is a copy of the rtlanal.c:subreg_nregs
4036    changed so that the regno can be passed in. */
4037 
4038 unsigned int
subreg_nregs_with_regno(unsigned int regno,const_rtx x)4039 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4040 {
4041   struct subreg_info info;
4042   rtx subreg = SUBREG_REG (x);
4043 
4044   subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4045 		   &info);
4046   return info.nregs;
4047 }
4048 
4049 struct parms_set_data
4050 {
4051   int nregs;
4052   HARD_REG_SET regs;
4053 };
4054 
4055 /* Helper function for noticing stores to parameter registers.  */
4056 static void
parms_set(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)4057 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4058 {
4059   struct parms_set_data *const d = (struct parms_set_data *) data;
4060   if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4061       && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4062     {
4063       CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4064       d->nregs--;
4065     }
4066 }
4067 
4068 /* Look backward for first parameter to be loaded.
4069    Note that loads of all parameters will not necessarily be
4070    found if CSE has eliminated some of them (e.g., an argument
4071    to the outer function is passed down as a parameter).
4072    Do not skip BOUNDARY.  */
4073 rtx_insn *
find_first_parameter_load(rtx_insn * call_insn,rtx_insn * boundary)4074 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4075 {
4076   struct parms_set_data parm;
4077   rtx p;
4078   rtx_insn *before, *first_set;
4079 
4080   /* Since different machines initialize their parameter registers
4081      in different orders, assume nothing.  Collect the set of all
4082      parameter registers.  */
4083   CLEAR_HARD_REG_SET (parm.regs);
4084   parm.nregs = 0;
4085   for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4086     if (GET_CODE (XEXP (p, 0)) == USE
4087 	&& REG_P (XEXP (XEXP (p, 0), 0))
4088 	&& !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4089       {
4090 	gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4091 
4092 	/* We only care about registers which can hold function
4093 	   arguments.  */
4094 	if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4095 	  continue;
4096 
4097 	SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4098 	parm.nregs++;
4099       }
4100   before = call_insn;
4101   first_set = call_insn;
4102 
4103   /* Search backward for the first set of a register in this set.  */
4104   while (parm.nregs && before != boundary)
4105     {
4106       before = PREV_INSN (before);
4107 
4108       /* It is possible that some loads got CSEed from one call to
4109          another.  Stop in that case.  */
4110       if (CALL_P (before))
4111 	break;
4112 
4113       /* Our caller needs either ensure that we will find all sets
4114          (in case code has not been optimized yet), or take care
4115          for possible labels in a way by setting boundary to preceding
4116          CODE_LABEL.  */
4117       if (LABEL_P (before))
4118 	{
4119 	  gcc_assert (before == boundary);
4120 	  break;
4121 	}
4122 
4123       if (INSN_P (before))
4124 	{
4125 	  int nregs_old = parm.nregs;
4126 	  note_stores (PATTERN (before), parms_set, &parm);
4127 	  /* If we found something that did not set a parameter reg,
4128 	     we're done.  Do not keep going, as that might result
4129 	     in hoisting an insn before the setting of a pseudo
4130 	     that is used by the hoisted insn. */
4131 	  if (nregs_old != parm.nregs)
4132 	    first_set = before;
4133 	  else
4134 	    break;
4135 	}
4136     }
4137   return first_set;
4138 }
4139 
4140 /* Return true if we should avoid inserting code between INSN and preceding
4141    call instruction.  */
4142 
4143 bool
keep_with_call_p(const rtx_insn * insn)4144 keep_with_call_p (const rtx_insn *insn)
4145 {
4146   rtx set;
4147 
4148   if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4149     {
4150       if (REG_P (SET_DEST (set))
4151 	  && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4152 	  && fixed_regs[REGNO (SET_DEST (set))]
4153 	  && general_operand (SET_SRC (set), VOIDmode))
4154 	return true;
4155       if (REG_P (SET_SRC (set))
4156 	  && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4157 	  && REG_P (SET_DEST (set))
4158 	  && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4159 	return true;
4160       /* There may be a stack pop just after the call and before the store
4161 	 of the return register.  Search for the actual store when deciding
4162 	 if we can break or not.  */
4163       if (SET_DEST (set) == stack_pointer_rtx)
4164 	{
4165 	  /* This CONST_CAST is okay because next_nonnote_insn just
4166 	     returns its argument and we assign it to a const_rtx
4167 	     variable.  */
4168 	  const rtx_insn *i2
4169 	    = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4170 	  if (i2 && keep_with_call_p (i2))
4171 	    return true;
4172 	}
4173     }
4174   return false;
4175 }
4176 
4177 /* Return true if LABEL is a target of JUMP_INSN.  This applies only
4178    to non-complex jumps.  That is, direct unconditional, conditional,
4179    and tablejumps, but not computed jumps or returns.  It also does
4180    not apply to the fallthru case of a conditional jump.  */
4181 
4182 bool
label_is_jump_target_p(const_rtx label,const rtx_insn * jump_insn)4183 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4184 {
4185   rtx tmp = JUMP_LABEL (jump_insn);
4186   rtx_jump_table_data *table;
4187 
4188   if (label == tmp)
4189     return true;
4190 
4191   if (tablejump_p (jump_insn, NULL, &table))
4192     {
4193       rtvec vec = table->get_labels ();
4194       int i, veclen = GET_NUM_ELEM (vec);
4195 
4196       for (i = 0; i < veclen; ++i)
4197 	if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4198 	  return true;
4199     }
4200 
4201   if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4202     return true;
4203 
4204   return false;
4205 }
4206 
4207 
4208 /* Return an estimate of the cost of computing rtx X.
4209    One use is in cse, to decide which expression to keep in the hash table.
4210    Another is in rtl generation, to pick the cheapest way to multiply.
4211    Other uses like the latter are expected in the future.
4212 
4213    X appears as operand OPNO in an expression with code OUTER_CODE.
4214    SPEED specifies whether costs optimized for speed or size should
4215    be returned.  */
4216 
4217 int
rtx_cost(rtx x,machine_mode mode,enum rtx_code outer_code,int opno,bool speed)4218 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4219 	  int opno, bool speed)
4220 {
4221   int i, j;
4222   enum rtx_code code;
4223   const char *fmt;
4224   int total;
4225   int factor;
4226 
4227   if (x == 0)
4228     return 0;
4229 
4230   if (GET_MODE (x) != VOIDmode)
4231     mode = GET_MODE (x);
4232 
4233   /* A size N times larger than UNITS_PER_WORD likely needs N times as
4234      many insns, taking N times as long.  */
4235   factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4236   if (factor == 0)
4237     factor = 1;
4238 
4239   /* Compute the default costs of certain things.
4240      Note that targetm.rtx_costs can override the defaults.  */
4241 
4242   code = GET_CODE (x);
4243   switch (code)
4244     {
4245     case MULT:
4246       /* Multiplication has time-complexity O(N*N), where N is the
4247 	 number of units (translated from digits) when using
4248 	 schoolbook long multiplication.  */
4249       total = factor * factor * COSTS_N_INSNS (5);
4250       break;
4251     case DIV:
4252     case UDIV:
4253     case MOD:
4254     case UMOD:
4255       /* Similarly, complexity for schoolbook long division.  */
4256       total = factor * factor * COSTS_N_INSNS (7);
4257       break;
4258     case USE:
4259       /* Used in combine.c as a marker.  */
4260       total = 0;
4261       break;
4262     case SET:
4263       /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4264 	 the mode for the factor.  */
4265       mode = GET_MODE (SET_DEST (x));
4266       factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4267       if (factor == 0)
4268 	factor = 1;
4269       /* FALLTHRU */
4270     default:
4271       total = factor * COSTS_N_INSNS (1);
4272     }
4273 
4274   switch (code)
4275     {
4276     case REG:
4277       return 0;
4278 
4279     case SUBREG:
4280       total = 0;
4281       /* If we can't tie these modes, make this expensive.  The larger
4282 	 the mode, the more expensive it is.  */
4283       if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4284 	return COSTS_N_INSNS (2 + factor);
4285       break;
4286 
4287     case TRUNCATE:
4288       if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4289 	{
4290 	  total = 0;
4291 	  break;
4292 	}
4293       /* FALLTHRU */
4294     default:
4295       if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4296 	return total;
4297       break;
4298     }
4299 
4300   /* Sum the costs of the sub-rtx's, plus cost of this operation,
4301      which is already in total.  */
4302 
4303   fmt = GET_RTX_FORMAT (code);
4304   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4305     if (fmt[i] == 'e')
4306       total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4307     else if (fmt[i] == 'E')
4308       for (j = 0; j < XVECLEN (x, i); j++)
4309 	total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4310 
4311   return total;
4312 }
4313 
4314 /* Fill in the structure C with information about both speed and size rtx
4315    costs for X, which is operand OPNO in an expression with code OUTER.  */
4316 
4317 void
get_full_rtx_cost(rtx x,machine_mode mode,enum rtx_code outer,int opno,struct full_rtx_costs * c)4318 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4319 		   struct full_rtx_costs *c)
4320 {
4321   c->speed = rtx_cost (x, mode, outer, opno, true);
4322   c->size = rtx_cost (x, mode, outer, opno, false);
4323 }
4324 
4325 
4326 /* Return cost of address expression X.
4327    Expect that X is properly formed address reference.
4328 
4329    SPEED parameter specify whether costs optimized for speed or size should
4330    be returned.  */
4331 
4332 int
address_cost(rtx x,machine_mode mode,addr_space_t as,bool speed)4333 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4334 {
4335   /* We may be asked for cost of various unusual addresses, such as operands
4336      of push instruction.  It is not worthwhile to complicate writing
4337      of the target hook by such cases.  */
4338 
4339   if (!memory_address_addr_space_p (mode, x, as))
4340     return 1000;
4341 
4342   return targetm.address_cost (x, mode, as, speed);
4343 }
4344 
4345 /* If the target doesn't override, compute the cost as with arithmetic.  */
4346 
4347 int
default_address_cost(rtx x,machine_mode,addr_space_t,bool speed)4348 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4349 {
4350   return rtx_cost (x, Pmode, MEM, 0, speed);
4351 }
4352 
4353 
4354 unsigned HOST_WIDE_INT
nonzero_bits(const_rtx x,machine_mode mode)4355 nonzero_bits (const_rtx x, machine_mode mode)
4356 {
4357   if (mode == VOIDmode)
4358     mode = GET_MODE (x);
4359   scalar_int_mode int_mode;
4360   if (!is_a <scalar_int_mode> (mode, &int_mode))
4361     return GET_MODE_MASK (mode);
4362   return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4363 }
4364 
4365 unsigned int
num_sign_bit_copies(const_rtx x,machine_mode mode)4366 num_sign_bit_copies (const_rtx x, machine_mode mode)
4367 {
4368   if (mode == VOIDmode)
4369     mode = GET_MODE (x);
4370   scalar_int_mode int_mode;
4371   if (!is_a <scalar_int_mode> (mode, &int_mode))
4372     return 1;
4373   return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4374 }
4375 
4376 /* Return true if nonzero_bits1 might recurse into both operands
4377    of X.  */
4378 
4379 static inline bool
nonzero_bits_binary_arith_p(const_rtx x)4380 nonzero_bits_binary_arith_p (const_rtx x)
4381 {
4382   if (!ARITHMETIC_P (x))
4383     return false;
4384   switch (GET_CODE (x))
4385     {
4386     case AND:
4387     case XOR:
4388     case IOR:
4389     case UMIN:
4390     case UMAX:
4391     case SMIN:
4392     case SMAX:
4393     case PLUS:
4394     case MINUS:
4395     case MULT:
4396     case DIV:
4397     case UDIV:
4398     case MOD:
4399     case UMOD:
4400       return true;
4401     default:
4402       return false;
4403     }
4404 }
4405 
4406 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4407    It avoids exponential behavior in nonzero_bits1 when X has
4408    identical subexpressions on the first or the second level.  */
4409 
4410 static unsigned HOST_WIDE_INT
cached_nonzero_bits(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4411 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4412 		     machine_mode known_mode,
4413 		     unsigned HOST_WIDE_INT known_ret)
4414 {
4415   if (x == known_x && mode == known_mode)
4416     return known_ret;
4417 
4418   /* Try to find identical subexpressions.  If found call
4419      nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4420      precomputed value for the subexpression as KNOWN_RET.  */
4421 
4422   if (nonzero_bits_binary_arith_p (x))
4423     {
4424       rtx x0 = XEXP (x, 0);
4425       rtx x1 = XEXP (x, 1);
4426 
4427       /* Check the first level.  */
4428       if (x0 == x1)
4429 	return nonzero_bits1 (x, mode, x0, mode,
4430 			      cached_nonzero_bits (x0, mode, known_x,
4431 						   known_mode, known_ret));
4432 
4433       /* Check the second level.  */
4434       if (nonzero_bits_binary_arith_p (x0)
4435 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4436 	return nonzero_bits1 (x, mode, x1, mode,
4437 			      cached_nonzero_bits (x1, mode, known_x,
4438 						   known_mode, known_ret));
4439 
4440       if (nonzero_bits_binary_arith_p (x1)
4441 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4442 	return nonzero_bits1 (x, mode, x0, mode,
4443 			      cached_nonzero_bits (x0, mode, known_x,
4444 						   known_mode, known_ret));
4445     }
4446 
4447   return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4448 }
4449 
4450 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4451    We don't let nonzero_bits recur into num_sign_bit_copies, because that
4452    is less useful.  We can't allow both, because that results in exponential
4453    run time recursion.  There is a nullstone testcase that triggered
4454    this.  This macro avoids accidental uses of num_sign_bit_copies.  */
4455 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4456 
4457 /* Given an expression, X, compute which bits in X can be nonzero.
4458    We don't care about bits outside of those defined in MODE.
4459 
4460    For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4461    an arithmetic operation, we can do better.  */
4462 
4463 static unsigned HOST_WIDE_INT
nonzero_bits1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4464 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4465 	       machine_mode known_mode,
4466 	       unsigned HOST_WIDE_INT known_ret)
4467 {
4468   unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4469   unsigned HOST_WIDE_INT inner_nz;
4470   enum rtx_code code = GET_CODE (x);
4471   machine_mode inner_mode;
4472   unsigned int inner_width;
4473   scalar_int_mode xmode;
4474 
4475   unsigned int mode_width = GET_MODE_PRECISION (mode);
4476 
4477   if (CONST_INT_P (x))
4478     {
4479       if (SHORT_IMMEDIATES_SIGN_EXTEND
4480 	  && INTVAL (x) > 0
4481 	  && mode_width < BITS_PER_WORD
4482 	  && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4483 	return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4484 
4485       return UINTVAL (x);
4486     }
4487 
4488   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4489     return nonzero;
4490   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4491 
4492   /* If X is wider than MODE, use its mode instead.  */
4493   if (xmode_width > mode_width)
4494     {
4495       mode = xmode;
4496       nonzero = GET_MODE_MASK (mode);
4497       mode_width = xmode_width;
4498     }
4499 
4500   if (mode_width > HOST_BITS_PER_WIDE_INT)
4501     /* Our only callers in this case look for single bit values.  So
4502        just return the mode mask.  Those tests will then be false.  */
4503     return nonzero;
4504 
4505   /* If MODE is wider than X, but both are a single word for both the host
4506      and target machines, we can compute this from which bits of the object
4507      might be nonzero in its own mode, taking into account the fact that, on
4508      CISC machines, accessing an object in a wider mode generally causes the
4509      high-order bits to become undefined, so they are not known to be zero.
4510      We extend this reasoning to RISC machines for operations that might not
4511      operate on the full registers.  */
4512   if (mode_width > xmode_width
4513       && xmode_width <= BITS_PER_WORD
4514       && xmode_width <= HOST_BITS_PER_WIDE_INT
4515       && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4516     {
4517       nonzero &= cached_nonzero_bits (x, xmode,
4518 				      known_x, known_mode, known_ret);
4519       nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4520       return nonzero;
4521     }
4522 
4523   /* Please keep nonzero_bits_binary_arith_p above in sync with
4524      the code in the switch below.  */
4525   switch (code)
4526     {
4527     case REG:
4528 #if defined(POINTERS_EXTEND_UNSIGNED)
4529       /* If pointers extend unsigned and this is a pointer in Pmode, say that
4530 	 all the bits above ptr_mode are known to be zero.  */
4531       /* As we do not know which address space the pointer is referring to,
4532 	 we can do this only if the target does not support different pointer
4533 	 or address modes depending on the address space.  */
4534       if (target_default_pointer_address_modes_p ()
4535 	  && POINTERS_EXTEND_UNSIGNED
4536 	  && xmode == Pmode
4537 	  && REG_POINTER (x)
4538 	  && !targetm.have_ptr_extend ())
4539 	nonzero &= GET_MODE_MASK (ptr_mode);
4540 #endif
4541 
4542       /* Include declared information about alignment of pointers.  */
4543       /* ??? We don't properly preserve REG_POINTER changes across
4544 	 pointer-to-integer casts, so we can't trust it except for
4545 	 things that we know must be pointers.  See execute/960116-1.c.  */
4546       if ((x == stack_pointer_rtx
4547 	   || x == frame_pointer_rtx
4548 	   || x == arg_pointer_rtx)
4549 	  && REGNO_POINTER_ALIGN (REGNO (x)))
4550 	{
4551 	  unsigned HOST_WIDE_INT alignment
4552 	    = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4553 
4554 #ifdef PUSH_ROUNDING
4555 	  /* If PUSH_ROUNDING is defined, it is possible for the
4556 	     stack to be momentarily aligned only to that amount,
4557 	     so we pick the least alignment.  */
4558 	  if (x == stack_pointer_rtx && PUSH_ARGS)
4559 	    {
4560 	      poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4561 	      alignment = MIN (known_alignment (rounded_1), alignment);
4562 	    }
4563 #endif
4564 
4565 	  nonzero &= ~(alignment - 1);
4566 	}
4567 
4568       {
4569 	unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4570 	rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4571 						  &nonzero_for_hook);
4572 
4573 	if (new_rtx)
4574 	  nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4575 						   known_mode, known_ret);
4576 
4577 	return nonzero_for_hook;
4578       }
4579 
4580     case MEM:
4581       /* In many, if not most, RISC machines, reading a byte from memory
4582 	 zeros the rest of the register.  Noticing that fact saves a lot
4583 	 of extra zero-extends.  */
4584       if (load_extend_op (xmode) == ZERO_EXTEND)
4585 	nonzero &= GET_MODE_MASK (xmode);
4586       break;
4587 
4588     case EQ:  case NE:
4589     case UNEQ:  case LTGT:
4590     case GT:  case GTU:  case UNGT:
4591     case LT:  case LTU:  case UNLT:
4592     case GE:  case GEU:  case UNGE:
4593     case LE:  case LEU:  case UNLE:
4594     case UNORDERED: case ORDERED:
4595       /* If this produces an integer result, we know which bits are set.
4596 	 Code here used to clear bits outside the mode of X, but that is
4597 	 now done above.  */
4598       /* Mind that MODE is the mode the caller wants to look at this
4599 	 operation in, and not the actual operation mode.  We can wind
4600 	 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4601 	 that describes the results of a vector compare.  */
4602       if (GET_MODE_CLASS (xmode) == MODE_INT
4603 	  && mode_width <= HOST_BITS_PER_WIDE_INT)
4604 	nonzero = STORE_FLAG_VALUE;
4605       break;
4606 
4607     case NEG:
4608 #if 0
4609       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4610 	 and num_sign_bit_copies.  */
4611       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4612 	nonzero = 1;
4613 #endif
4614 
4615       if (xmode_width < mode_width)
4616 	nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4617       break;
4618 
4619     case ABS:
4620 #if 0
4621       /* Disabled to avoid exponential mutual recursion between nonzero_bits
4622 	 and num_sign_bit_copies.  */
4623       if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4624 	nonzero = 1;
4625 #endif
4626       break;
4627 
4628     case TRUNCATE:
4629       nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4630 				       known_x, known_mode, known_ret)
4631 		  & GET_MODE_MASK (mode));
4632       break;
4633 
4634     case ZERO_EXTEND:
4635       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4636 				      known_x, known_mode, known_ret);
4637       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4638 	nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4639       break;
4640 
4641     case SIGN_EXTEND:
4642       /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4643 	 Otherwise, show all the bits in the outer mode but not the inner
4644 	 may be nonzero.  */
4645       inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4646 				      known_x, known_mode, known_ret);
4647       if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4648 	{
4649 	  inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4650 	  if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4651 	    inner_nz |= (GET_MODE_MASK (mode)
4652 			 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4653 	}
4654 
4655       nonzero &= inner_nz;
4656       break;
4657 
4658     case AND:
4659       nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4660 				       known_x, known_mode, known_ret)
4661       		 & cached_nonzero_bits (XEXP (x, 1), mode,
4662 					known_x, known_mode, known_ret);
4663       break;
4664 
4665     case XOR:   case IOR:
4666     case UMIN:  case UMAX:  case SMIN:  case SMAX:
4667       {
4668 	unsigned HOST_WIDE_INT nonzero0
4669 	   = cached_nonzero_bits (XEXP (x, 0), mode,
4670 				  known_x, known_mode, known_ret);
4671 
4672 	/* Don't call nonzero_bits for the second time if it cannot change
4673 	   anything.  */
4674 	if ((nonzero & nonzero0) != nonzero)
4675 	  nonzero &= nonzero0
4676       		     | cached_nonzero_bits (XEXP (x, 1), mode,
4677 					    known_x, known_mode, known_ret);
4678       }
4679       break;
4680 
4681     case PLUS:  case MINUS:
4682     case MULT:
4683     case DIV:   case UDIV:
4684     case MOD:   case UMOD:
4685       /* We can apply the rules of arithmetic to compute the number of
4686 	 high- and low-order zero bits of these operations.  We start by
4687 	 computing the width (position of the highest-order nonzero bit)
4688 	 and the number of low-order zero bits for each value.  */
4689       {
4690 	unsigned HOST_WIDE_INT nz0
4691 	  = cached_nonzero_bits (XEXP (x, 0), mode,
4692 				 known_x, known_mode, known_ret);
4693 	unsigned HOST_WIDE_INT nz1
4694 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4695 				 known_x, known_mode, known_ret);
4696 	int sign_index = xmode_width - 1;
4697 	int width0 = floor_log2 (nz0) + 1;
4698 	int width1 = floor_log2 (nz1) + 1;
4699 	int low0 = ctz_or_zero (nz0);
4700 	int low1 = ctz_or_zero (nz1);
4701 	unsigned HOST_WIDE_INT op0_maybe_minusp
4702 	  = nz0 & (HOST_WIDE_INT_1U << sign_index);
4703 	unsigned HOST_WIDE_INT op1_maybe_minusp
4704 	  = nz1 & (HOST_WIDE_INT_1U << sign_index);
4705 	unsigned int result_width = mode_width;
4706 	int result_low = 0;
4707 
4708 	switch (code)
4709 	  {
4710 	  case PLUS:
4711 	    result_width = MAX (width0, width1) + 1;
4712 	    result_low = MIN (low0, low1);
4713 	    break;
4714 	  case MINUS:
4715 	    result_low = MIN (low0, low1);
4716 	    break;
4717 	  case MULT:
4718 	    result_width = width0 + width1;
4719 	    result_low = low0 + low1;
4720 	    break;
4721 	  case DIV:
4722 	    if (width1 == 0)
4723 	      break;
4724 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4725 	      result_width = width0;
4726 	    break;
4727 	  case UDIV:
4728 	    if (width1 == 0)
4729 	      break;
4730 	    result_width = width0;
4731 	    break;
4732 	  case MOD:
4733 	    if (width1 == 0)
4734 	      break;
4735 	    if (!op0_maybe_minusp && !op1_maybe_minusp)
4736 	      result_width = MIN (width0, width1);
4737 	    result_low = MIN (low0, low1);
4738 	    break;
4739 	  case UMOD:
4740 	    if (width1 == 0)
4741 	      break;
4742 	    result_width = MIN (width0, width1);
4743 	    result_low = MIN (low0, low1);
4744 	    break;
4745 	  default:
4746 	    gcc_unreachable ();
4747 	  }
4748 
4749 	if (result_width < mode_width)
4750 	  nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4751 
4752 	if (result_low > 0)
4753 	  nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4754       }
4755       break;
4756 
4757     case ZERO_EXTRACT:
4758       if (CONST_INT_P (XEXP (x, 1))
4759 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4760 	nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4761       break;
4762 
4763     case SUBREG:
4764       /* If this is a SUBREG formed for a promoted variable that has
4765 	 been zero-extended, we know that at least the high-order bits
4766 	 are zero, though others might be too.  */
4767       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4768 	nonzero = GET_MODE_MASK (xmode)
4769 		  & cached_nonzero_bits (SUBREG_REG (x), xmode,
4770 					 known_x, known_mode, known_ret);
4771 
4772       /* If the inner mode is a single word for both the host and target
4773 	 machines, we can compute this from which bits of the inner
4774 	 object might be nonzero.  */
4775       inner_mode = GET_MODE (SUBREG_REG (x));
4776       if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4777 	  && inner_width <= BITS_PER_WORD
4778 	  && inner_width <= HOST_BITS_PER_WIDE_INT)
4779 	{
4780 	  nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4781 					  known_x, known_mode, known_ret);
4782 
4783           /* On a typical CISC machine, accessing an object in a wider mode
4784 	     causes the high-order bits to become undefined.  So they are
4785 	     not known to be zero.
4786 
4787 	     On a typical RISC machine, we only have to worry about the way
4788 	     loads are extended.  Otherwise, if we get a reload for the inner
4789 	     part, it may be loaded from the stack, and then we may lose all
4790 	     the zero bits that existed before the store to the stack.  */
4791 	  rtx_code extend_op;
4792 	  if ((!WORD_REGISTER_OPERATIONS
4793 	       || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4794 		   ? val_signbit_known_set_p (inner_mode, nonzero)
4795 		   : extend_op != ZERO_EXTEND)
4796 	       || (!MEM_P (SUBREG_REG (x)) && !REG_P (SUBREG_REG (x))))
4797 	      && xmode_width > inner_width)
4798 	    nonzero
4799 	      |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4800 	}
4801       break;
4802 
4803     case ASHIFT:
4804     case ASHIFTRT:
4805     case LSHIFTRT:
4806     case ROTATE:
4807     case ROTATERT:
4808       /* The nonzero bits are in two classes: any bits within MODE
4809 	 that aren't in xmode are always significant.  The rest of the
4810 	 nonzero bits are those that are significant in the operand of
4811 	 the shift when shifted the appropriate number of bits.  This
4812 	 shows that high-order bits are cleared by the right shift and
4813 	 low-order bits by left shifts.  */
4814       if (CONST_INT_P (XEXP (x, 1))
4815 	  && INTVAL (XEXP (x, 1)) >= 0
4816 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4817 	  && INTVAL (XEXP (x, 1)) < xmode_width)
4818 	{
4819 	  int count = INTVAL (XEXP (x, 1));
4820 	  unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4821 	  unsigned HOST_WIDE_INT op_nonzero
4822 	    = cached_nonzero_bits (XEXP (x, 0), mode,
4823 				   known_x, known_mode, known_ret);
4824 	  unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4825 	  unsigned HOST_WIDE_INT outer = 0;
4826 
4827 	  if (mode_width > xmode_width)
4828 	    outer = (op_nonzero & nonzero & ~mode_mask);
4829 
4830 	  switch (code)
4831 	    {
4832 	    case ASHIFT:
4833 	      inner <<= count;
4834 	      break;
4835 
4836 	    case LSHIFTRT:
4837 	      inner >>= count;
4838 	      break;
4839 
4840 	    case ASHIFTRT:
4841 	      inner >>= count;
4842 
4843 	      /* If the sign bit may have been nonzero before the shift, we
4844 		 need to mark all the places it could have been copied to
4845 		 by the shift as possibly nonzero.  */
4846 	      if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4847 		inner |= (((HOST_WIDE_INT_1U << count) - 1)
4848 			  << (xmode_width - count));
4849 	      break;
4850 
4851 	    case ROTATE:
4852 	      inner = (inner << (count % xmode_width)
4853 		       | (inner >> (xmode_width - (count % xmode_width))))
4854 		      & mode_mask;
4855 	      break;
4856 
4857 	    case ROTATERT:
4858 	      inner = (inner >> (count % xmode_width)
4859 		       | (inner << (xmode_width - (count % xmode_width))))
4860 		      & mode_mask;
4861 	      break;
4862 
4863 	    default:
4864 	      gcc_unreachable ();
4865 	    }
4866 
4867 	  nonzero &= (outer | inner);
4868 	}
4869       break;
4870 
4871     case FFS:
4872     case POPCOUNT:
4873       /* This is at most the number of bits in the mode.  */
4874       nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4875       break;
4876 
4877     case CLZ:
4878       /* If CLZ has a known value at zero, then the nonzero bits are
4879 	 that value, plus the number of bits in the mode minus one.  */
4880       if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4881 	nonzero
4882 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4883       else
4884 	nonzero = -1;
4885       break;
4886 
4887     case CTZ:
4888       /* If CTZ has a known value at zero, then the nonzero bits are
4889 	 that value, plus the number of bits in the mode minus one.  */
4890       if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4891 	nonzero
4892 	  |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4893       else
4894 	nonzero = -1;
4895       break;
4896 
4897     case CLRSB:
4898       /* This is at most the number of bits in the mode minus 1.  */
4899       nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4900       break;
4901 
4902     case PARITY:
4903       nonzero = 1;
4904       break;
4905 
4906     case IF_THEN_ELSE:
4907       {
4908 	unsigned HOST_WIDE_INT nonzero_true
4909 	  = cached_nonzero_bits (XEXP (x, 1), mode,
4910 				 known_x, known_mode, known_ret);
4911 
4912 	/* Don't call nonzero_bits for the second time if it cannot change
4913 	   anything.  */
4914 	if ((nonzero & nonzero_true) != nonzero)
4915 	  nonzero &= nonzero_true
4916       		     | cached_nonzero_bits (XEXP (x, 2), mode,
4917 					    known_x, known_mode, known_ret);
4918       }
4919       break;
4920 
4921     default:
4922       break;
4923     }
4924 
4925   return nonzero;
4926 }
4927 
4928 /* See the macro definition above.  */
4929 #undef cached_num_sign_bit_copies
4930 
4931 
4932 /* Return true if num_sign_bit_copies1 might recurse into both operands
4933    of X.  */
4934 
4935 static inline bool
num_sign_bit_copies_binary_arith_p(const_rtx x)4936 num_sign_bit_copies_binary_arith_p (const_rtx x)
4937 {
4938   if (!ARITHMETIC_P (x))
4939     return false;
4940   switch (GET_CODE (x))
4941     {
4942     case IOR:
4943     case AND:
4944     case XOR:
4945     case SMIN:
4946     case SMAX:
4947     case UMIN:
4948     case UMAX:
4949     case PLUS:
4950     case MINUS:
4951     case MULT:
4952       return true;
4953     default:
4954       return false;
4955     }
4956 }
4957 
4958 /* The function cached_num_sign_bit_copies is a wrapper around
4959    num_sign_bit_copies1.  It avoids exponential behavior in
4960    num_sign_bit_copies1 when X has identical subexpressions on the
4961    first or the second level.  */
4962 
4963 static unsigned int
cached_num_sign_bit_copies(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)4964 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4965 			    const_rtx known_x, machine_mode known_mode,
4966 			    unsigned int known_ret)
4967 {
4968   if (x == known_x && mode == known_mode)
4969     return known_ret;
4970 
4971   /* Try to find identical subexpressions.  If found call
4972      num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4973      the precomputed value for the subexpression as KNOWN_RET.  */
4974 
4975   if (num_sign_bit_copies_binary_arith_p (x))
4976     {
4977       rtx x0 = XEXP (x, 0);
4978       rtx x1 = XEXP (x, 1);
4979 
4980       /* Check the first level.  */
4981       if (x0 == x1)
4982 	return
4983 	  num_sign_bit_copies1 (x, mode, x0, mode,
4984 				cached_num_sign_bit_copies (x0, mode, known_x,
4985 							    known_mode,
4986 							    known_ret));
4987 
4988       /* Check the second level.  */
4989       if (num_sign_bit_copies_binary_arith_p (x0)
4990 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4991 	return
4992 	  num_sign_bit_copies1 (x, mode, x1, mode,
4993 				cached_num_sign_bit_copies (x1, mode, known_x,
4994 							    known_mode,
4995 							    known_ret));
4996 
4997       if (num_sign_bit_copies_binary_arith_p (x1)
4998 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4999 	return
5000 	  num_sign_bit_copies1 (x, mode, x0, mode,
5001 				cached_num_sign_bit_copies (x0, mode, known_x,
5002 							    known_mode,
5003 							    known_ret));
5004     }
5005 
5006   return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5007 }
5008 
5009 /* Return the number of bits at the high-order end of X that are known to
5010    be equal to the sign bit.  X will be used in mode MODE.  The returned
5011    value will always be between 1 and the number of bits in MODE.  */
5012 
5013 static unsigned int
num_sign_bit_copies1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)5014 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5015 		      machine_mode known_mode,
5016 		      unsigned int known_ret)
5017 {
5018   enum rtx_code code = GET_CODE (x);
5019   unsigned int bitwidth = GET_MODE_PRECISION (mode);
5020   int num0, num1, result;
5021   unsigned HOST_WIDE_INT nonzero;
5022 
5023   if (CONST_INT_P (x))
5024     {
5025       /* If the constant is negative, take its 1's complement and remask.
5026 	 Then see how many zero bits we have.  */
5027       nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
5028       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5029 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5030 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5031 
5032       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5033     }
5034 
5035   scalar_int_mode xmode, inner_mode;
5036   if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5037     return 1;
5038 
5039   unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5040 
5041   /* For a smaller mode, just ignore the high bits.  */
5042   if (bitwidth < xmode_width)
5043     {
5044       num0 = cached_num_sign_bit_copies (x, xmode,
5045 					 known_x, known_mode, known_ret);
5046       return MAX (1, num0 - (int) (xmode_width - bitwidth));
5047     }
5048 
5049   if (bitwidth > xmode_width)
5050     {
5051       /* If this machine does not do all register operations on the entire
5052 	 register and MODE is wider than the mode of X, we can say nothing
5053 	 at all about the high-order bits.  We extend this reasoning to RISC
5054 	 machines for operations that might not operate on full registers.  */
5055       if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5056 	return 1;
5057 
5058       /* Likewise on machines that do, if the mode of the object is smaller
5059 	 than a word and loads of that size don't sign extend, we can say
5060 	 nothing about the high order bits.  */
5061       if (xmode_width < BITS_PER_WORD
5062 	  && load_extend_op (xmode) != SIGN_EXTEND)
5063 	return 1;
5064     }
5065 
5066   /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5067      the code in the switch below.  */
5068   switch (code)
5069     {
5070     case REG:
5071 
5072 #if defined(POINTERS_EXTEND_UNSIGNED)
5073       /* If pointers extend signed and this is a pointer in Pmode, say that
5074 	 all the bits above ptr_mode are known to be sign bit copies.  */
5075       /* As we do not know which address space the pointer is referring to,
5076 	 we can do this only if the target does not support different pointer
5077 	 or address modes depending on the address space.  */
5078       if (target_default_pointer_address_modes_p ()
5079 	  && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5080 	  && mode == Pmode && REG_POINTER (x)
5081 	  && !targetm.have_ptr_extend ())
5082 	return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5083 #endif
5084 
5085       {
5086 	unsigned int copies_for_hook = 1, copies = 1;
5087 	rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5088 							 &copies_for_hook);
5089 
5090 	if (new_rtx)
5091 	  copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5092 					       known_mode, known_ret);
5093 
5094 	if (copies > 1 || copies_for_hook > 1)
5095 	  return MAX (copies, copies_for_hook);
5096 
5097 	/* Else, use nonzero_bits to guess num_sign_bit_copies (see below).  */
5098       }
5099       break;
5100 
5101     case MEM:
5102       /* Some RISC machines sign-extend all loads of smaller than a word.  */
5103       if (load_extend_op (xmode) == SIGN_EXTEND)
5104 	return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5105       break;
5106 
5107     case SUBREG:
5108       /* If this is a SUBREG for a promoted object that is sign-extended
5109 	 and we are looking at it in a wider mode, we know that at least the
5110 	 high-order bits are known to be sign bit copies.  */
5111 
5112       if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5113 	{
5114 	  num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5115 					     known_x, known_mode, known_ret);
5116 	  return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5117 	}
5118 
5119       if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5120 	{
5121 	  /* For a smaller object, just ignore the high bits.  */
5122 	  if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5123 	    {
5124 	      num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5125 						 known_x, known_mode,
5126 						 known_ret);
5127 	      return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5128 					   - bitwidth));
5129 	    }
5130 
5131 	  /* For paradoxical SUBREGs on machines where all register operations
5132 	     affect the entire register, just look inside.  Note that we are
5133 	     passing MODE to the recursive call, so the number of sign bit
5134 	     copies will remain relative to that mode, not the inner mode.
5135 
5136 	     This works only if loads sign extend.  Otherwise, if we get a
5137 	     reload for the inner part, it may be loaded from the stack, and
5138 	     then we lose all sign bit copies that existed before the store
5139 	     to the stack.  */
5140 	  if (WORD_REGISTER_OPERATIONS
5141 	      && load_extend_op (inner_mode) == SIGN_EXTEND
5142 	      && paradoxical_subreg_p (x)
5143 	      && MEM_P (SUBREG_REG (x)))
5144 	    return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5145 					       known_x, known_mode, known_ret);
5146 	}
5147       break;
5148 
5149     case SIGN_EXTRACT:
5150       if (CONST_INT_P (XEXP (x, 1)))
5151 	return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5152       break;
5153 
5154     case SIGN_EXTEND:
5155       if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5156 	return (bitwidth - GET_MODE_PRECISION (inner_mode)
5157 		+ cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5158 					      known_x, known_mode, known_ret));
5159       break;
5160 
5161     case TRUNCATE:
5162       /* For a smaller object, just ignore the high bits.  */
5163       inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5164       num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5165 					 known_x, known_mode, known_ret);
5166       return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5167 				    - bitwidth)));
5168 
5169     case NOT:
5170       return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5171 					 known_x, known_mode, known_ret);
5172 
5173     case ROTATE:       case ROTATERT:
5174       /* If we are rotating left by a number of bits less than the number
5175 	 of sign bit copies, we can just subtract that amount from the
5176 	 number.  */
5177       if (CONST_INT_P (XEXP (x, 1))
5178 	  && INTVAL (XEXP (x, 1)) >= 0
5179 	  && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5180 	{
5181 	  num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5182 					     known_x, known_mode, known_ret);
5183 	  return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5184 				 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5185 	}
5186       break;
5187 
5188     case NEG:
5189       /* In general, this subtracts one sign bit copy.  But if the value
5190 	 is known to be positive, the number of sign bit copies is the
5191 	 same as that of the input.  Finally, if the input has just one bit
5192 	 that might be nonzero, all the bits are copies of the sign bit.  */
5193       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5194 					 known_x, known_mode, known_ret);
5195       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5196 	return num0 > 1 ? num0 - 1 : 1;
5197 
5198       nonzero = nonzero_bits (XEXP (x, 0), mode);
5199       if (nonzero == 1)
5200 	return bitwidth;
5201 
5202       if (num0 > 1
5203 	  && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5204 	num0--;
5205 
5206       return num0;
5207 
5208     case IOR:   case AND:   case XOR:
5209     case SMIN:  case SMAX:  case UMIN:  case UMAX:
5210       /* Logical operations will preserve the number of sign-bit copies.
5211 	 MIN and MAX operations always return one of the operands.  */
5212       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5213 					 known_x, known_mode, known_ret);
5214       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5215 					 known_x, known_mode, known_ret);
5216 
5217       /* If num1 is clearing some of the top bits then regardless of
5218 	 the other term, we are guaranteed to have at least that many
5219 	 high-order zero bits.  */
5220       if (code == AND
5221 	  && num1 > 1
5222 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5223 	  && CONST_INT_P (XEXP (x, 1))
5224 	  && (UINTVAL (XEXP (x, 1))
5225 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5226 	return num1;
5227 
5228       /* Similarly for IOR when setting high-order bits.  */
5229       if (code == IOR
5230 	  && num1 > 1
5231 	  && bitwidth <= HOST_BITS_PER_WIDE_INT
5232 	  && CONST_INT_P (XEXP (x, 1))
5233 	  && (UINTVAL (XEXP (x, 1))
5234 	      & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5235 	return num1;
5236 
5237       return MIN (num0, num1);
5238 
5239     case PLUS:  case MINUS:
5240       /* For addition and subtraction, we can have a 1-bit carry.  However,
5241 	 if we are subtracting 1 from a positive number, there will not
5242 	 be such a carry.  Furthermore, if the positive number is known to
5243 	 be 0 or 1, we know the result is either -1 or 0.  */
5244 
5245       if (code == PLUS && XEXP (x, 1) == constm1_rtx
5246 	  && bitwidth <= HOST_BITS_PER_WIDE_INT)
5247 	{
5248 	  nonzero = nonzero_bits (XEXP (x, 0), mode);
5249 	  if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5250 	    return (nonzero == 1 || nonzero == 0 ? bitwidth
5251 		    : bitwidth - floor_log2 (nonzero) - 1);
5252 	}
5253 
5254       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5255 					 known_x, known_mode, known_ret);
5256       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5257 					 known_x, known_mode, known_ret);
5258       result = MAX (1, MIN (num0, num1) - 1);
5259 
5260       return result;
5261 
5262     case MULT:
5263       /* The number of bits of the product is the sum of the number of
5264 	 bits of both terms.  However, unless one of the terms if known
5265 	 to be positive, we must allow for an additional bit since negating
5266 	 a negative number can remove one sign bit copy.  */
5267 
5268       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5269 					 known_x, known_mode, known_ret);
5270       num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5271 					 known_x, known_mode, known_ret);
5272 
5273       result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5274       if (result > 0
5275 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5276 	      || (((nonzero_bits (XEXP (x, 0), mode)
5277 		    & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5278 		  && ((nonzero_bits (XEXP (x, 1), mode)
5279 		       & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5280 		      != 0))))
5281 	result--;
5282 
5283       return MAX (1, result);
5284 
5285     case UDIV:
5286       /* The result must be <= the first operand.  If the first operand
5287 	 has the high bit set, we know nothing about the number of sign
5288 	 bit copies.  */
5289       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5290 	return 1;
5291       else if ((nonzero_bits (XEXP (x, 0), mode)
5292 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5293 	return 1;
5294       else
5295 	return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5296 					   known_x, known_mode, known_ret);
5297 
5298     case UMOD:
5299       /* The result must be <= the second operand.  If the second operand
5300 	 has (or just might have) the high bit set, we know nothing about
5301 	 the number of sign bit copies.  */
5302       if (bitwidth > HOST_BITS_PER_WIDE_INT)
5303 	return 1;
5304       else if ((nonzero_bits (XEXP (x, 1), mode)
5305 		& (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5306 	return 1;
5307       else
5308 	return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5309 					   known_x, known_mode, known_ret);
5310 
5311     case DIV:
5312       /* Similar to unsigned division, except that we have to worry about
5313 	 the case where the divisor is negative, in which case we have
5314 	 to add 1.  */
5315       result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5316 					   known_x, known_mode, known_ret);
5317       if (result > 1
5318 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5319 	      || (nonzero_bits (XEXP (x, 1), mode)
5320 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5321 	result--;
5322 
5323       return result;
5324 
5325     case MOD:
5326       result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5327 					   known_x, known_mode, known_ret);
5328       if (result > 1
5329 	  && (bitwidth > HOST_BITS_PER_WIDE_INT
5330 	      || (nonzero_bits (XEXP (x, 1), mode)
5331 		  & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5332 	result--;
5333 
5334       return result;
5335 
5336     case ASHIFTRT:
5337       /* Shifts by a constant add to the number of bits equal to the
5338 	 sign bit.  */
5339       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5340 					 known_x, known_mode, known_ret);
5341       if (CONST_INT_P (XEXP (x, 1))
5342 	  && INTVAL (XEXP (x, 1)) > 0
5343 	  && INTVAL (XEXP (x, 1)) < xmode_width)
5344 	num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5345 
5346       return num0;
5347 
5348     case ASHIFT:
5349       /* Left shifts destroy copies.  */
5350       if (!CONST_INT_P (XEXP (x, 1))
5351 	  || INTVAL (XEXP (x, 1)) < 0
5352 	  || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5353 	  || INTVAL (XEXP (x, 1)) >= xmode_width)
5354 	return 1;
5355 
5356       num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5357 					 known_x, known_mode, known_ret);
5358       return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5359 
5360     case IF_THEN_ELSE:
5361       num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5362 					 known_x, known_mode, known_ret);
5363       num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5364 					 known_x, known_mode, known_ret);
5365       return MIN (num0, num1);
5366 
5367     case EQ:  case NE:  case GE:  case GT:  case LE:  case LT:
5368     case UNEQ:  case LTGT:  case UNGE:  case UNGT:  case UNLE:  case UNLT:
5369     case GEU: case GTU: case LEU: case LTU:
5370     case UNORDERED: case ORDERED:
5371       /* If the constant is negative, take its 1's complement and remask.
5372 	 Then see how many zero bits we have.  */
5373       nonzero = STORE_FLAG_VALUE;
5374       if (bitwidth <= HOST_BITS_PER_WIDE_INT
5375 	  && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5376 	nonzero = (~nonzero) & GET_MODE_MASK (mode);
5377 
5378       return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5379 
5380     default:
5381       break;
5382     }
5383 
5384   /* If we haven't been able to figure it out by one of the above rules,
5385      see if some of the high-order bits are known to be zero.  If so,
5386      count those bits and return one less than that amount.  If we can't
5387      safely compute the mask for this mode, always return BITWIDTH.  */
5388 
5389   bitwidth = GET_MODE_PRECISION (mode);
5390   if (bitwidth > HOST_BITS_PER_WIDE_INT)
5391     return 1;
5392 
5393   nonzero = nonzero_bits (x, mode);
5394   return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5395 	 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5396 }
5397 
5398 /* Calculate the rtx_cost of a single instruction pattern.  A return value of
5399    zero indicates an instruction pattern without a known cost.  */
5400 
5401 int
pattern_cost(rtx pat,bool speed)5402 pattern_cost (rtx pat, bool speed)
5403 {
5404   int i, cost;
5405   rtx set;
5406 
5407   /* Extract the single set rtx from the instruction pattern.  We
5408      can't use single_set since we only have the pattern.  We also
5409      consider PARALLELs of a normal set and a single comparison.  In
5410      that case we use the cost of the non-comparison SET operation,
5411      which is most-likely to be the real cost of this operation.  */
5412   if (GET_CODE (pat) == SET)
5413     set = pat;
5414   else if (GET_CODE (pat) == PARALLEL)
5415     {
5416       set = NULL_RTX;
5417       rtx comparison = NULL_RTX;
5418 
5419       for (i = 0; i < XVECLEN (pat, 0); i++)
5420 	{
5421 	  rtx x = XVECEXP (pat, 0, i);
5422 	  if (GET_CODE (x) == SET)
5423 	    {
5424 	      if (GET_CODE (SET_SRC (x)) == COMPARE)
5425 		{
5426 		  if (comparison)
5427 		    return 0;
5428 		  comparison = x;
5429 		}
5430 	      else
5431 		{
5432 		  if (set)
5433 		    return 0;
5434 		  set = x;
5435 		}
5436 	    }
5437 	}
5438 
5439       if (!set && comparison)
5440 	set = comparison;
5441 
5442       if (!set)
5443 	return 0;
5444     }
5445   else
5446     return 0;
5447 
5448   cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5449   return cost > 0 ? cost : COSTS_N_INSNS (1);
5450 }
5451 
5452 /* Calculate the cost of a single instruction.  A return value of zero
5453    indicates an instruction pattern without a known cost.  */
5454 
5455 int
insn_cost(rtx_insn * insn,bool speed)5456 insn_cost (rtx_insn *insn, bool speed)
5457 {
5458   if (targetm.insn_cost)
5459     return targetm.insn_cost (insn, speed);
5460 
5461   return pattern_cost (PATTERN (insn), speed);
5462 }
5463 
5464 /* Returns estimate on cost of computing SEQ.  */
5465 
5466 unsigned
seq_cost(const rtx_insn * seq,bool speed)5467 seq_cost (const rtx_insn *seq, bool speed)
5468 {
5469   unsigned cost = 0;
5470   rtx set;
5471 
5472   for (; seq; seq = NEXT_INSN (seq))
5473     {
5474       set = single_set (seq);
5475       if (set)
5476         cost += set_rtx_cost (set, speed);
5477       else if (NONDEBUG_INSN_P (seq))
5478 	{
5479 	  int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5480 	  if (this_cost > 0)
5481 	    cost += this_cost;
5482 	  else
5483 	    cost++;
5484 	}
5485     }
5486 
5487   return cost;
5488 }
5489 
5490 /* Given an insn INSN and condition COND, return the condition in a
5491    canonical form to simplify testing by callers.  Specifically:
5492 
5493    (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5494    (2) Both operands will be machine operands; (cc0) will have been replaced.
5495    (3) If an operand is a constant, it will be the second operand.
5496    (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5497        for GE, GEU, and LEU.
5498 
5499    If the condition cannot be understood, or is an inequality floating-point
5500    comparison which needs to be reversed, 0 will be returned.
5501 
5502    If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5503 
5504    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5505    insn used in locating the condition was found.  If a replacement test
5506    of the condition is desired, it should be placed in front of that
5507    insn and we will be sure that the inputs are still valid.
5508 
5509    If WANT_REG is nonzero, we wish the condition to be relative to that
5510    register, if possible.  Therefore, do not canonicalize the condition
5511    further.  If ALLOW_CC_MODE is nonzero, allow the condition returned
5512    to be a compare to a CC mode register.
5513 
5514    If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5515    and at INSN.  */
5516 
5517 rtx
canonicalize_condition(rtx_insn * insn,rtx cond,int reverse,rtx_insn ** earliest,rtx want_reg,int allow_cc_mode,int valid_at_insn_p)5518 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5519 			rtx_insn **earliest,
5520 			rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5521 {
5522   enum rtx_code code;
5523   rtx_insn *prev = insn;
5524   const_rtx set;
5525   rtx tem;
5526   rtx op0, op1;
5527   int reverse_code = 0;
5528   machine_mode mode;
5529   basic_block bb = BLOCK_FOR_INSN (insn);
5530 
5531   code = GET_CODE (cond);
5532   mode = GET_MODE (cond);
5533   op0 = XEXP (cond, 0);
5534   op1 = XEXP (cond, 1);
5535 
5536   if (reverse)
5537     code = reversed_comparison_code (cond, insn);
5538   if (code == UNKNOWN)
5539     return 0;
5540 
5541   if (earliest)
5542     *earliest = insn;
5543 
5544   /* If we are comparing a register with zero, see if the register is set
5545      in the previous insn to a COMPARE or a comparison operation.  Perform
5546      the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5547      in cse.c  */
5548 
5549   while ((GET_RTX_CLASS (code) == RTX_COMPARE
5550 	  || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5551 	 && op1 == CONST0_RTX (GET_MODE (op0))
5552 	 && op0 != want_reg)
5553     {
5554       /* Set nonzero when we find something of interest.  */
5555       rtx x = 0;
5556 
5557       /* If comparison with cc0, import actual comparison from compare
5558 	 insn.  */
5559       if (op0 == cc0_rtx)
5560 	{
5561 	  if ((prev = prev_nonnote_insn (prev)) == 0
5562 	      || !NONJUMP_INSN_P (prev)
5563 	      || (set = single_set (prev)) == 0
5564 	      || SET_DEST (set) != cc0_rtx)
5565 	    return 0;
5566 
5567 	  op0 = SET_SRC (set);
5568 	  op1 = CONST0_RTX (GET_MODE (op0));
5569 	  if (earliest)
5570 	    *earliest = prev;
5571 	}
5572 
5573       /* If this is a COMPARE, pick up the two things being compared.  */
5574       if (GET_CODE (op0) == COMPARE)
5575 	{
5576 	  op1 = XEXP (op0, 1);
5577 	  op0 = XEXP (op0, 0);
5578 	  continue;
5579 	}
5580       else if (!REG_P (op0))
5581 	break;
5582 
5583       /* Go back to the previous insn.  Stop if it is not an INSN.  We also
5584 	 stop if it isn't a single set or if it has a REG_INC note because
5585 	 we don't want to bother dealing with it.  */
5586 
5587       prev = prev_nonnote_nondebug_insn (prev);
5588 
5589       if (prev == 0
5590 	  || !NONJUMP_INSN_P (prev)
5591 	  || FIND_REG_INC_NOTE (prev, NULL_RTX)
5592 	  /* In cfglayout mode, there do not have to be labels at the
5593 	     beginning of a block, or jumps at the end, so the previous
5594 	     conditions would not stop us when we reach bb boundary.  */
5595 	  || BLOCK_FOR_INSN (prev) != bb)
5596 	break;
5597 
5598       set = set_of (op0, prev);
5599 
5600       if (set
5601 	  && (GET_CODE (set) != SET
5602 	      || !rtx_equal_p (SET_DEST (set), op0)))
5603 	break;
5604 
5605       /* If this is setting OP0, get what it sets it to if it looks
5606 	 relevant.  */
5607       if (set)
5608 	{
5609 	  machine_mode inner_mode = GET_MODE (SET_DEST (set));
5610 #ifdef FLOAT_STORE_FLAG_VALUE
5611 	  REAL_VALUE_TYPE fsfv;
5612 #endif
5613 
5614 	  /* ??? We may not combine comparisons done in a CCmode with
5615 	     comparisons not done in a CCmode.  This is to aid targets
5616 	     like Alpha that have an IEEE compliant EQ instruction, and
5617 	     a non-IEEE compliant BEQ instruction.  The use of CCmode is
5618 	     actually artificial, simply to prevent the combination, but
5619 	     should not affect other platforms.
5620 
5621 	     However, we must allow VOIDmode comparisons to match either
5622 	     CCmode or non-CCmode comparison, because some ports have
5623 	     modeless comparisons inside branch patterns.
5624 
5625 	     ??? This mode check should perhaps look more like the mode check
5626 	     in simplify_comparison in combine.  */
5627 	  if (((GET_MODE_CLASS (mode) == MODE_CC)
5628 	       != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5629 	      && mode != VOIDmode
5630 	      && inner_mode != VOIDmode)
5631 	    break;
5632 	  if (GET_CODE (SET_SRC (set)) == COMPARE
5633 	      || (((code == NE
5634 		    || (code == LT
5635 			&& val_signbit_known_set_p (inner_mode,
5636 						    STORE_FLAG_VALUE))
5637 #ifdef FLOAT_STORE_FLAG_VALUE
5638 		    || (code == LT
5639 			&& SCALAR_FLOAT_MODE_P (inner_mode)
5640 			&& (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5641 			    REAL_VALUE_NEGATIVE (fsfv)))
5642 #endif
5643 		    ))
5644 		  && COMPARISON_P (SET_SRC (set))))
5645 	    x = SET_SRC (set);
5646 	  else if (((code == EQ
5647 		     || (code == GE
5648 			 && val_signbit_known_set_p (inner_mode,
5649 						     STORE_FLAG_VALUE))
5650 #ifdef FLOAT_STORE_FLAG_VALUE
5651 		     || (code == GE
5652 			 && SCALAR_FLOAT_MODE_P (inner_mode)
5653 			 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5654 			     REAL_VALUE_NEGATIVE (fsfv)))
5655 #endif
5656 		     ))
5657 		   && COMPARISON_P (SET_SRC (set)))
5658 	    {
5659 	      reverse_code = 1;
5660 	      x = SET_SRC (set);
5661 	    }
5662 	  else if ((code == EQ || code == NE)
5663 		   && GET_CODE (SET_SRC (set)) == XOR)
5664 	    /* Handle sequences like:
5665 
5666 	       (set op0 (xor X Y))
5667 	       ...(eq|ne op0 (const_int 0))...
5668 
5669 	       in which case:
5670 
5671 	       (eq op0 (const_int 0)) reduces to (eq X Y)
5672 	       (ne op0 (const_int 0)) reduces to (ne X Y)
5673 
5674 	       This is the form used by MIPS16, for example.  */
5675 	    x = SET_SRC (set);
5676 	  else
5677 	    break;
5678 	}
5679 
5680       else if (reg_set_p (op0, prev))
5681 	/* If this sets OP0, but not directly, we have to give up.  */
5682 	break;
5683 
5684       if (x)
5685 	{
5686 	  /* If the caller is expecting the condition to be valid at INSN,
5687 	     make sure X doesn't change before INSN.  */
5688 	  if (valid_at_insn_p)
5689 	    if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5690 	      break;
5691 	  if (COMPARISON_P (x))
5692 	    code = GET_CODE (x);
5693 	  if (reverse_code)
5694 	    {
5695 	      code = reversed_comparison_code (x, prev);
5696 	      if (code == UNKNOWN)
5697 		return 0;
5698 	      reverse_code = 0;
5699 	    }
5700 
5701 	  op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5702 	  if (earliest)
5703 	    *earliest = prev;
5704 	}
5705     }
5706 
5707   /* If constant is first, put it last.  */
5708   if (CONSTANT_P (op0))
5709     code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5710 
5711   /* If OP0 is the result of a comparison, we weren't able to find what
5712      was really being compared, so fail.  */
5713   if (!allow_cc_mode
5714       && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5715     return 0;
5716 
5717   /* Canonicalize any ordered comparison with integers involving equality
5718      if we can do computations in the relevant mode and we do not
5719      overflow.  */
5720 
5721   scalar_int_mode op0_mode;
5722   if (CONST_INT_P (op1)
5723       && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5724       && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5725     {
5726       HOST_WIDE_INT const_val = INTVAL (op1);
5727       unsigned HOST_WIDE_INT uconst_val = const_val;
5728       unsigned HOST_WIDE_INT max_val
5729 	= (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5730 
5731       switch (code)
5732 	{
5733 	case LE:
5734 	  if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5735 	    code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5736 	  break;
5737 
5738 	/* When cross-compiling, const_val might be sign-extended from
5739 	   BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5740 	case GE:
5741 	  if ((const_val & max_val)
5742 	      != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5743 	    code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5744 	  break;
5745 
5746 	case LEU:
5747 	  if (uconst_val < max_val)
5748 	    code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5749 	  break;
5750 
5751 	case GEU:
5752 	  if (uconst_val != 0)
5753 	    code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5754 	  break;
5755 
5756 	default:
5757 	  break;
5758 	}
5759     }
5760 
5761   /* Never return CC0; return zero instead.  */
5762   if (CC0_P (op0))
5763     return 0;
5764 
5765   /* We promised to return a comparison.  */
5766   rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5767   if (COMPARISON_P (ret))
5768     return ret;
5769   return 0;
5770 }
5771 
5772 /* Given a jump insn JUMP, return the condition that will cause it to branch
5773    to its JUMP_LABEL.  If the condition cannot be understood, or is an
5774    inequality floating-point comparison which needs to be reversed, 0 will
5775    be returned.
5776 
5777    If EARLIEST is nonzero, it is a pointer to a place where the earliest
5778    insn used in locating the condition was found.  If a replacement test
5779    of the condition is desired, it should be placed in front of that
5780    insn and we will be sure that the inputs are still valid.  If EARLIEST
5781    is null, the returned condition will be valid at INSN.
5782 
5783    If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5784    compare CC mode register.
5785 
5786    VALID_AT_INSN_P is the same as for canonicalize_condition.  */
5787 
5788 rtx
get_condition(rtx_insn * jump,rtx_insn ** earliest,int allow_cc_mode,int valid_at_insn_p)5789 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5790 	       int valid_at_insn_p)
5791 {
5792   rtx cond;
5793   int reverse;
5794   rtx set;
5795 
5796   /* If this is not a standard conditional jump, we can't parse it.  */
5797   if (!JUMP_P (jump)
5798       || ! any_condjump_p (jump))
5799     return 0;
5800   set = pc_set (jump);
5801 
5802   cond = XEXP (SET_SRC (set), 0);
5803 
5804   /* If this branches to JUMP_LABEL when the condition is false, reverse
5805      the condition.  */
5806   reverse
5807     = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5808       && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5809 
5810   return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5811 				 allow_cc_mode, valid_at_insn_p);
5812 }
5813 
5814 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5815    TARGET_MODE_REP_EXTENDED.
5816 
5817    Note that we assume that the property of
5818    TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5819    narrower than mode B.  I.e., if A is a mode narrower than B then in
5820    order to be able to operate on it in mode B, mode A needs to
5821    satisfy the requirements set by the representation of mode B.  */
5822 
5823 static void
init_num_sign_bit_copies_in_rep(void)5824 init_num_sign_bit_copies_in_rep (void)
5825 {
5826   opt_scalar_int_mode in_mode_iter;
5827   scalar_int_mode mode;
5828 
5829   FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5830     FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5831       {
5832 	scalar_int_mode in_mode = in_mode_iter.require ();
5833 	scalar_int_mode i;
5834 
5835 	/* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5836 	   extends to the next widest mode.  */
5837 	gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5838 		    || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5839 
5840 	/* We are in in_mode.  Count how many bits outside of mode
5841 	   have to be copies of the sign-bit.  */
5842 	FOR_EACH_MODE (i, mode, in_mode)
5843 	  {
5844 	    /* This must always exist (for the last iteration it will be
5845 	       IN_MODE).  */
5846 	    scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5847 
5848 	    if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5849 		/* We can only check sign-bit copies starting from the
5850 		   top-bit.  In order to be able to check the bits we
5851 		   have already seen we pretend that subsequent bits
5852 		   have to be sign-bit copies too.  */
5853 		|| num_sign_bit_copies_in_rep [in_mode][mode])
5854 	      num_sign_bit_copies_in_rep [in_mode][mode]
5855 		+= GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5856 	  }
5857       }
5858 }
5859 
5860 /* Suppose that truncation from the machine mode of X to MODE is not a
5861    no-op.  See if there is anything special about X so that we can
5862    assume it already contains a truncated value of MODE.  */
5863 
5864 bool
truncated_to_mode(machine_mode mode,const_rtx x)5865 truncated_to_mode (machine_mode mode, const_rtx x)
5866 {
5867   /* This register has already been used in MODE without explicit
5868      truncation.  */
5869   if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5870     return true;
5871 
5872   /* See if we already satisfy the requirements of MODE.  If yes we
5873      can just switch to MODE.  */
5874   if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5875       && (num_sign_bit_copies (x, GET_MODE (x))
5876 	  >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5877     return true;
5878 
5879   return false;
5880 }
5881 
5882 /* Return true if RTX code CODE has a single sequence of zero or more
5883    "e" operands and no rtvec operands.  Initialize its rtx_all_subrtx_bounds
5884    entry in that case.  */
5885 
5886 static bool
setup_reg_subrtx_bounds(unsigned int code)5887 setup_reg_subrtx_bounds (unsigned int code)
5888 {
5889   const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5890   unsigned int i = 0;
5891   for (; format[i] != 'e'; ++i)
5892     {
5893       if (!format[i])
5894 	/* No subrtxes.  Leave start and count as 0.  */
5895 	return true;
5896       if (format[i] == 'E' || format[i] == 'V')
5897 	return false;
5898     }
5899 
5900   /* Record the sequence of 'e's.  */
5901   rtx_all_subrtx_bounds[code].start = i;
5902   do
5903     ++i;
5904   while (format[i] == 'e');
5905   rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5906   /* rtl-iter.h relies on this.  */
5907   gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5908 
5909   for (; format[i]; ++i)
5910     if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5911       return false;
5912 
5913   return true;
5914 }
5915 
5916 /* Initialize rtx_all_subrtx_bounds.  */
5917 void
init_rtlanal(void)5918 init_rtlanal (void)
5919 {
5920   int i;
5921   for (i = 0; i < NUM_RTX_CODE; i++)
5922     {
5923       if (!setup_reg_subrtx_bounds (i))
5924 	rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5925       if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5926 	rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5927     }
5928 
5929   init_num_sign_bit_copies_in_rep ();
5930 }
5931 
5932 /* Check whether this is a constant pool constant.  */
5933 bool
constant_pool_constant_p(rtx x)5934 constant_pool_constant_p (rtx x)
5935 {
5936   x = avoid_constant_pool_reference (x);
5937   return CONST_DOUBLE_P (x);
5938 }
5939 
5940 /* If M is a bitmask that selects a field of low-order bits within an item but
5941    not the entire word, return the length of the field.  Return -1 otherwise.
5942    M is used in machine mode MODE.  */
5943 
5944 int
low_bitmask_len(machine_mode mode,unsigned HOST_WIDE_INT m)5945 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5946 {
5947   if (mode != VOIDmode)
5948     {
5949       if (!HWI_COMPUTABLE_MODE_P (mode))
5950 	return -1;
5951       m &= GET_MODE_MASK (mode);
5952     }
5953 
5954   return exact_log2 (m + 1);
5955 }
5956 
5957 /* Return the mode of MEM's address.  */
5958 
5959 scalar_int_mode
get_address_mode(rtx mem)5960 get_address_mode (rtx mem)
5961 {
5962   machine_mode mode;
5963 
5964   gcc_assert (MEM_P (mem));
5965   mode = GET_MODE (XEXP (mem, 0));
5966   if (mode != VOIDmode)
5967     return as_a <scalar_int_mode> (mode);
5968   return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5969 }
5970 
5971 /* Split up a CONST_DOUBLE or integer constant rtx
5972    into two rtx's for single words,
5973    storing in *FIRST the word that comes first in memory in the target
5974    and in *SECOND the other.
5975 
5976    TODO: This function needs to be rewritten to work on any size
5977    integer.  */
5978 
5979 void
split_double(rtx value,rtx * first,rtx * second)5980 split_double (rtx value, rtx *first, rtx *second)
5981 {
5982   if (CONST_INT_P (value))
5983     {
5984       if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5985 	{
5986 	  /* In this case the CONST_INT holds both target words.
5987 	     Extract the bits from it into two word-sized pieces.
5988 	     Sign extend each half to HOST_WIDE_INT.  */
5989 	  unsigned HOST_WIDE_INT low, high;
5990 	  unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5991 	  unsigned bits_per_word = BITS_PER_WORD;
5992 
5993 	  /* Set sign_bit to the most significant bit of a word.  */
5994 	  sign_bit = 1;
5995 	  sign_bit <<= bits_per_word - 1;
5996 
5997 	  /* Set mask so that all bits of the word are set.  We could
5998 	     have used 1 << BITS_PER_WORD instead of basing the
5999 	     calculation on sign_bit.  However, on machines where
6000 	     HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
6001 	     compiler warning, even though the code would never be
6002 	     executed.  */
6003 	  mask = sign_bit << 1;
6004 	  mask--;
6005 
6006 	  /* Set sign_extend as any remaining bits.  */
6007 	  sign_extend = ~mask;
6008 
6009 	  /* Pick the lower word and sign-extend it.  */
6010 	  low = INTVAL (value);
6011 	  low &= mask;
6012 	  if (low & sign_bit)
6013 	    low |= sign_extend;
6014 
6015 	  /* Pick the higher word, shifted to the least significant
6016 	     bits, and sign-extend it.  */
6017 	  high = INTVAL (value);
6018 	  high >>= bits_per_word - 1;
6019 	  high >>= 1;
6020 	  high &= mask;
6021 	  if (high & sign_bit)
6022 	    high |= sign_extend;
6023 
6024 	  /* Store the words in the target machine order.  */
6025 	  if (WORDS_BIG_ENDIAN)
6026 	    {
6027 	      *first = GEN_INT (high);
6028 	      *second = GEN_INT (low);
6029 	    }
6030 	  else
6031 	    {
6032 	      *first = GEN_INT (low);
6033 	      *second = GEN_INT (high);
6034 	    }
6035 	}
6036       else
6037 	{
6038 	  /* The rule for using CONST_INT for a wider mode
6039 	     is that we regard the value as signed.
6040 	     So sign-extend it.  */
6041 	  rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6042 	  if (WORDS_BIG_ENDIAN)
6043 	    {
6044 	      *first = high;
6045 	      *second = value;
6046 	    }
6047 	  else
6048 	    {
6049 	      *first = value;
6050 	      *second = high;
6051 	    }
6052 	}
6053     }
6054   else if (GET_CODE (value) == CONST_WIDE_INT)
6055     {
6056       /* All of this is scary code and needs to be converted to
6057 	 properly work with any size integer.  */
6058       gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6059       if (WORDS_BIG_ENDIAN)
6060 	{
6061 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6062 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6063 	}
6064       else
6065 	{
6066 	  *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6067 	  *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6068 	}
6069     }
6070   else if (!CONST_DOUBLE_P (value))
6071     {
6072       if (WORDS_BIG_ENDIAN)
6073 	{
6074 	  *first = const0_rtx;
6075 	  *second = value;
6076 	}
6077       else
6078 	{
6079 	  *first = value;
6080 	  *second = const0_rtx;
6081 	}
6082     }
6083   else if (GET_MODE (value) == VOIDmode
6084 	   /* This is the old way we did CONST_DOUBLE integers.  */
6085 	   || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6086     {
6087       /* In an integer, the words are defined as most and least significant.
6088 	 So order them by the target's convention.  */
6089       if (WORDS_BIG_ENDIAN)
6090 	{
6091 	  *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6092 	  *second = GEN_INT (CONST_DOUBLE_LOW (value));
6093 	}
6094       else
6095 	{
6096 	  *first = GEN_INT (CONST_DOUBLE_LOW (value));
6097 	  *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6098 	}
6099     }
6100   else
6101     {
6102       long l[2];
6103 
6104       /* Note, this converts the REAL_VALUE_TYPE to the target's
6105 	 format, splits up the floating point double and outputs
6106 	 exactly 32 bits of it into each of l[0] and l[1] --
6107 	 not necessarily BITS_PER_WORD bits.  */
6108       REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6109 
6110       /* If 32 bits is an entire word for the target, but not for the host,
6111 	 then sign-extend on the host so that the number will look the same
6112 	 way on the host that it would on the target.  See for instance
6113 	 simplify_unary_operation.  The #if is needed to avoid compiler
6114 	 warnings.  */
6115 
6116 #if HOST_BITS_PER_LONG > 32
6117       if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6118 	{
6119 	  if (l[0] & ((long) 1 << 31))
6120 	    l[0] |= ((unsigned long) (-1) << 32);
6121 	  if (l[1] & ((long) 1 << 31))
6122 	    l[1] |= ((unsigned long) (-1) << 32);
6123 	}
6124 #endif
6125 
6126       *first = GEN_INT (l[0]);
6127       *second = GEN_INT (l[1]);
6128     }
6129 }
6130 
6131 /* Return true if X is a sign_extract or zero_extract from the least
6132    significant bit.  */
6133 
6134 static bool
lsb_bitfield_op_p(rtx x)6135 lsb_bitfield_op_p (rtx x)
6136 {
6137   if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6138     {
6139       machine_mode mode = GET_MODE (XEXP (x, 0));
6140       HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6141       HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6142       poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6143 
6144       return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6145     }
6146   return false;
6147 }
6148 
6149 /* Strip outer address "mutations" from LOC and return a pointer to the
6150    inner value.  If OUTER_CODE is nonnull, store the code of the innermost
6151    stripped expression there.
6152 
6153    "Mutations" either convert between modes or apply some kind of
6154    extension, truncation or alignment.  */
6155 
6156 rtx *
strip_address_mutations(rtx * loc,enum rtx_code * outer_code)6157 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6158 {
6159   for (;;)
6160     {
6161       enum rtx_code code = GET_CODE (*loc);
6162       if (GET_RTX_CLASS (code) == RTX_UNARY)
6163 	/* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6164 	   used to convert between pointer sizes.  */
6165 	loc = &XEXP (*loc, 0);
6166       else if (lsb_bitfield_op_p (*loc))
6167 	/* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6168 	   acts as a combined truncation and extension.  */
6169 	loc = &XEXP (*loc, 0);
6170       else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6171 	/* (and ... (const_int -X)) is used to align to X bytes.  */
6172 	loc = &XEXP (*loc, 0);
6173       else if (code == SUBREG
6174                && !OBJECT_P (SUBREG_REG (*loc))
6175                && subreg_lowpart_p (*loc))
6176 	/* (subreg (operator ...) ...) inside and is used for mode
6177 	   conversion too.  */
6178 	loc = &SUBREG_REG (*loc);
6179       else
6180 	return loc;
6181       if (outer_code)
6182 	*outer_code = code;
6183     }
6184 }
6185 
6186 /* Return true if CODE applies some kind of scale.  The scaled value is
6187    is the first operand and the scale is the second.  */
6188 
6189 static bool
binary_scale_code_p(enum rtx_code code)6190 binary_scale_code_p (enum rtx_code code)
6191 {
6192   return (code == MULT
6193           || code == ASHIFT
6194           /* Needed by ARM targets.  */
6195           || code == ASHIFTRT
6196           || code == LSHIFTRT
6197           || code == ROTATE
6198           || code == ROTATERT);
6199 }
6200 
6201 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6202    (see address_info).  Return null otherwise.  */
6203 
6204 static rtx *
get_base_term(rtx * inner)6205 get_base_term (rtx *inner)
6206 {
6207   if (GET_CODE (*inner) == LO_SUM)
6208     inner = strip_address_mutations (&XEXP (*inner, 0));
6209   if (REG_P (*inner)
6210       || MEM_P (*inner)
6211       || GET_CODE (*inner) == SUBREG
6212       || GET_CODE (*inner) == SCRATCH)
6213     return inner;
6214   return 0;
6215 }
6216 
6217 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6218    (see address_info).  Return null otherwise.  */
6219 
6220 static rtx *
get_index_term(rtx * inner)6221 get_index_term (rtx *inner)
6222 {
6223   /* At present, only constant scales are allowed.  */
6224   if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6225     inner = strip_address_mutations (&XEXP (*inner, 0));
6226   if (REG_P (*inner)
6227       || MEM_P (*inner)
6228       || GET_CODE (*inner) == SUBREG
6229       || GET_CODE (*inner) == SCRATCH)
6230     return inner;
6231   return 0;
6232 }
6233 
6234 /* Set the segment part of address INFO to LOC, given that INNER is the
6235    unmutated value.  */
6236 
6237 static void
set_address_segment(struct address_info * info,rtx * loc,rtx * inner)6238 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6239 {
6240   gcc_assert (!info->segment);
6241   info->segment = loc;
6242   info->segment_term = inner;
6243 }
6244 
6245 /* Set the base part of address INFO to LOC, given that INNER is the
6246    unmutated value.  */
6247 
6248 static void
set_address_base(struct address_info * info,rtx * loc,rtx * inner)6249 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6250 {
6251   gcc_assert (!info->base);
6252   info->base = loc;
6253   info->base_term = inner;
6254 }
6255 
6256 /* Set the index part of address INFO to LOC, given that INNER is the
6257    unmutated value.  */
6258 
6259 static void
set_address_index(struct address_info * info,rtx * loc,rtx * inner)6260 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6261 {
6262   gcc_assert (!info->index);
6263   info->index = loc;
6264   info->index_term = inner;
6265 }
6266 
6267 /* Set the displacement part of address INFO to LOC, given that INNER
6268    is the constant term.  */
6269 
6270 static void
set_address_disp(struct address_info * info,rtx * loc,rtx * inner)6271 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6272 {
6273   gcc_assert (!info->disp);
6274   info->disp = loc;
6275   info->disp_term = inner;
6276 }
6277 
6278 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address.  Set up the
6279    rest of INFO accordingly.  */
6280 
6281 static void
decompose_incdec_address(struct address_info * info)6282 decompose_incdec_address (struct address_info *info)
6283 {
6284   info->autoinc_p = true;
6285 
6286   rtx *base = &XEXP (*info->inner, 0);
6287   set_address_base (info, base, base);
6288   gcc_checking_assert (info->base == info->base_term);
6289 
6290   /* These addresses are only valid when the size of the addressed
6291      value is known.  */
6292   gcc_checking_assert (info->mode != VOIDmode);
6293 }
6294 
6295 /* INFO->INNER describes a {PRE,POST}_MODIFY address.  Set up the rest
6296    of INFO accordingly.  */
6297 
6298 static void
decompose_automod_address(struct address_info * info)6299 decompose_automod_address (struct address_info *info)
6300 {
6301   info->autoinc_p = true;
6302 
6303   rtx *base = &XEXP (*info->inner, 0);
6304   set_address_base (info, base, base);
6305   gcc_checking_assert (info->base == info->base_term);
6306 
6307   rtx plus = XEXP (*info->inner, 1);
6308   gcc_assert (GET_CODE (plus) == PLUS);
6309 
6310   info->base_term2 = &XEXP (plus, 0);
6311   gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6312 
6313   rtx *step = &XEXP (plus, 1);
6314   rtx *inner_step = strip_address_mutations (step);
6315   if (CONSTANT_P (*inner_step))
6316     set_address_disp (info, step, inner_step);
6317   else
6318     set_address_index (info, step, inner_step);
6319 }
6320 
6321 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6322    values in [PTR, END).  Return a pointer to the end of the used array.  */
6323 
6324 static rtx **
extract_plus_operands(rtx * loc,rtx ** ptr,rtx ** end)6325 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6326 {
6327   rtx x = *loc;
6328   if (GET_CODE (x) == PLUS)
6329     {
6330       ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6331       ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6332     }
6333   else
6334     {
6335       gcc_assert (ptr != end);
6336       *ptr++ = loc;
6337     }
6338   return ptr;
6339 }
6340 
6341 /* Evaluate the likelihood of X being a base or index value, returning
6342    positive if it is likely to be a base, negative if it is likely to be
6343    an index, and 0 if we can't tell.  Make the magnitude of the return
6344    value reflect the amount of confidence we have in the answer.
6345 
6346    MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1.  */
6347 
6348 static int
baseness(rtx x,machine_mode mode,addr_space_t as,enum rtx_code outer_code,enum rtx_code index_code)6349 baseness (rtx x, machine_mode mode, addr_space_t as,
6350 	  enum rtx_code outer_code, enum rtx_code index_code)
6351 {
6352   /* Believe *_POINTER unless the address shape requires otherwise.  */
6353   if (REG_P (x) && REG_POINTER (x))
6354     return 2;
6355   if (MEM_P (x) && MEM_POINTER (x))
6356     return 2;
6357 
6358   if (REG_P (x) && HARD_REGISTER_P (x))
6359     {
6360       /* X is a hard register.  If it only fits one of the base
6361 	 or index classes, choose that interpretation.  */
6362       int regno = REGNO (x);
6363       bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6364       bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6365       if (base_p != index_p)
6366 	return base_p ? 1 : -1;
6367     }
6368   return 0;
6369 }
6370 
6371 /* INFO->INNER describes a normal, non-automodified address.
6372    Fill in the rest of INFO accordingly.  */
6373 
6374 static void
decompose_normal_address(struct address_info * info)6375 decompose_normal_address (struct address_info *info)
6376 {
6377   /* Treat the address as the sum of up to four values.  */
6378   rtx *ops[4];
6379   size_t n_ops = extract_plus_operands (info->inner, ops,
6380 					ops + ARRAY_SIZE (ops)) - ops;
6381 
6382   /* If there is more than one component, any base component is in a PLUS.  */
6383   if (n_ops > 1)
6384     info->base_outer_code = PLUS;
6385 
6386   /* Try to classify each sum operand now.  Leave those that could be
6387      either a base or an index in OPS.  */
6388   rtx *inner_ops[4];
6389   size_t out = 0;
6390   for (size_t in = 0; in < n_ops; ++in)
6391     {
6392       rtx *loc = ops[in];
6393       rtx *inner = strip_address_mutations (loc);
6394       if (CONSTANT_P (*inner))
6395 	set_address_disp (info, loc, inner);
6396       else if (GET_CODE (*inner) == UNSPEC)
6397 	set_address_segment (info, loc, inner);
6398       else
6399 	{
6400 	  /* The only other possibilities are a base or an index.  */
6401 	  rtx *base_term = get_base_term (inner);
6402 	  rtx *index_term = get_index_term (inner);
6403 	  gcc_assert (base_term || index_term);
6404 	  if (!base_term)
6405 	    set_address_index (info, loc, index_term);
6406 	  else if (!index_term)
6407 	    set_address_base (info, loc, base_term);
6408 	  else
6409 	    {
6410 	      gcc_assert (base_term == index_term);
6411 	      ops[out] = loc;
6412 	      inner_ops[out] = base_term;
6413 	      ++out;
6414 	    }
6415 	}
6416     }
6417 
6418   /* Classify the remaining OPS members as bases and indexes.  */
6419   if (out == 1)
6420     {
6421       /* If we haven't seen a base or an index yet, assume that this is
6422 	 the base.  If we were confident that another term was the base
6423 	 or index, treat the remaining operand as the other kind.  */
6424       if (!info->base)
6425 	set_address_base (info, ops[0], inner_ops[0]);
6426       else
6427 	set_address_index (info, ops[0], inner_ops[0]);
6428     }
6429   else if (out == 2)
6430     {
6431       /* In the event of a tie, assume the base comes first.  */
6432       if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6433 		    GET_CODE (*ops[1]))
6434 	  >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6435 		       GET_CODE (*ops[0])))
6436 	{
6437 	  set_address_base (info, ops[0], inner_ops[0]);
6438 	  set_address_index (info, ops[1], inner_ops[1]);
6439 	}
6440       else
6441 	{
6442 	  set_address_base (info, ops[1], inner_ops[1]);
6443 	  set_address_index (info, ops[0], inner_ops[0]);
6444 	}
6445     }
6446   else
6447     gcc_assert (out == 0);
6448 }
6449 
6450 /* Describe address *LOC in *INFO.  MODE is the mode of the addressed value,
6451    or VOIDmode if not known.  AS is the address space associated with LOC.
6452    OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise.  */
6453 
6454 void
decompose_address(struct address_info * info,rtx * loc,machine_mode mode,addr_space_t as,enum rtx_code outer_code)6455 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6456 		   addr_space_t as, enum rtx_code outer_code)
6457 {
6458   memset (info, 0, sizeof (*info));
6459   info->mode = mode;
6460   info->as = as;
6461   info->addr_outer_code = outer_code;
6462   info->outer = loc;
6463   info->inner = strip_address_mutations (loc, &outer_code);
6464   info->base_outer_code = outer_code;
6465   switch (GET_CODE (*info->inner))
6466     {
6467     case PRE_DEC:
6468     case PRE_INC:
6469     case POST_DEC:
6470     case POST_INC:
6471       decompose_incdec_address (info);
6472       break;
6473 
6474     case PRE_MODIFY:
6475     case POST_MODIFY:
6476       decompose_automod_address (info);
6477       break;
6478 
6479     default:
6480       decompose_normal_address (info);
6481       break;
6482     }
6483 }
6484 
6485 /* Describe address operand LOC in INFO.  */
6486 
6487 void
decompose_lea_address(struct address_info * info,rtx * loc)6488 decompose_lea_address (struct address_info *info, rtx *loc)
6489 {
6490   decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6491 }
6492 
6493 /* Describe the address of MEM X in INFO.  */
6494 
6495 void
decompose_mem_address(struct address_info * info,rtx x)6496 decompose_mem_address (struct address_info *info, rtx x)
6497 {
6498   gcc_assert (MEM_P (x));
6499   decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6500 		     MEM_ADDR_SPACE (x), MEM);
6501 }
6502 
6503 /* Update INFO after a change to the address it describes.  */
6504 
6505 void
update_address(struct address_info * info)6506 update_address (struct address_info *info)
6507 {
6508   decompose_address (info, info->outer, info->mode, info->as,
6509 		     info->addr_outer_code);
6510 }
6511 
6512 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6513    more complicated than that.  */
6514 
6515 HOST_WIDE_INT
get_index_scale(const struct address_info * info)6516 get_index_scale (const struct address_info *info)
6517 {
6518   rtx index = *info->index;
6519   if (GET_CODE (index) == MULT
6520       && CONST_INT_P (XEXP (index, 1))
6521       && info->index_term == &XEXP (index, 0))
6522     return INTVAL (XEXP (index, 1));
6523 
6524   if (GET_CODE (index) == ASHIFT
6525       && CONST_INT_P (XEXP (index, 1))
6526       && info->index_term == &XEXP (index, 0))
6527     return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6528 
6529   if (info->index == info->index_term)
6530     return 1;
6531 
6532   return 0;
6533 }
6534 
6535 /* Return the "index code" of INFO, in the form required by
6536    ok_for_base_p_1.  */
6537 
6538 enum rtx_code
get_index_code(const struct address_info * info)6539 get_index_code (const struct address_info *info)
6540 {
6541   if (info->index)
6542     return GET_CODE (*info->index);
6543 
6544   if (info->disp)
6545     return GET_CODE (*info->disp);
6546 
6547   return SCRATCH;
6548 }
6549 
6550 /* Return true if RTL X contains a SYMBOL_REF.  */
6551 
6552 bool
contains_symbol_ref_p(const_rtx x)6553 contains_symbol_ref_p (const_rtx x)
6554 {
6555   subrtx_iterator::array_type array;
6556   FOR_EACH_SUBRTX (iter, array, x, ALL)
6557     if (SYMBOL_REF_P (*iter))
6558       return true;
6559 
6560   return false;
6561 }
6562 
6563 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF.  */
6564 
6565 bool
contains_symbolic_reference_p(const_rtx x)6566 contains_symbolic_reference_p (const_rtx x)
6567 {
6568   subrtx_iterator::array_type array;
6569   FOR_EACH_SUBRTX (iter, array, x, ALL)
6570     if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6571       return true;
6572 
6573   return false;
6574 }
6575 
6576 /* Return true if RTL X contains a constant pool address.  */
6577 
6578 bool
contains_constant_pool_address_p(const_rtx x)6579 contains_constant_pool_address_p (const_rtx x)
6580 {
6581   subrtx_iterator::array_type array;
6582   FOR_EACH_SUBRTX (iter, array, x, ALL)
6583     if (SYMBOL_REF_P (*iter) && CONSTANT_POOL_ADDRESS_P (*iter))
6584       return true;
6585 
6586   return false;
6587 }
6588 
6589 
6590 /* Return true if X contains a thread-local symbol.  */
6591 
6592 bool
tls_referenced_p(const_rtx x)6593 tls_referenced_p (const_rtx x)
6594 {
6595   if (!targetm.have_tls)
6596     return false;
6597 
6598   subrtx_iterator::array_type array;
6599   FOR_EACH_SUBRTX (iter, array, x, ALL)
6600     if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6601       return true;
6602   return false;
6603 }
6604 
6605 /* Return true if reg REGNO with mode REG_MODE would be clobbered by the
6606    clobber_high operand in CLOBBER_HIGH_OP.  */
6607 
6608 bool
reg_is_clobbered_by_clobber_high(unsigned int regno,machine_mode reg_mode,const_rtx clobber_high_op)6609 reg_is_clobbered_by_clobber_high (unsigned int regno, machine_mode reg_mode,
6610 				  const_rtx clobber_high_op)
6611 {
6612   unsigned int clobber_regno = REGNO (clobber_high_op);
6613   machine_mode clobber_mode = GET_MODE (clobber_high_op);
6614   unsigned char regno_nregs = hard_regno_nregs (regno, reg_mode);
6615 
6616   /* Clobber high should always span exactly one register.  */
6617   gcc_assert (REG_NREGS (clobber_high_op) == 1);
6618 
6619   /* Clobber high needs to match with one of the registers in X.  */
6620   if (clobber_regno < regno || clobber_regno >= regno + regno_nregs)
6621     return false;
6622 
6623   gcc_assert (reg_mode != BLKmode && clobber_mode != BLKmode);
6624 
6625   if (reg_mode == VOIDmode)
6626     return clobber_mode != VOIDmode;
6627 
6628   /* Clobber high will clobber if its size might be greater than the size of
6629      register regno.  */
6630   return maybe_gt (exact_div (GET_MODE_SIZE (reg_mode), regno_nregs),
6631 		 GET_MODE_SIZE (clobber_mode));
6632 }
6633