1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "target.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "predict.h"
29 #include "df.h"
30 #include "memmodel.h"
31 #include "tm_p.h"
32 #include "insn-config.h"
33 #include "regs.h"
34 #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
35 #include "recog.h"
36 #include "addresses.h"
37 #include "rtl-iter.h"
38
39 /* Forward declarations */
40 static void set_of_1 (rtx, const_rtx, void *);
41 static bool covers_regno_p (const_rtx, unsigned int);
42 static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
43 static int computed_jump_p_1 (const_rtx);
44 static void parms_set (rtx, const_rtx, void *);
45
46 static unsigned HOST_WIDE_INT cached_nonzero_bits (const_rtx, scalar_int_mode,
47 const_rtx, machine_mode,
48 unsigned HOST_WIDE_INT);
49 static unsigned HOST_WIDE_INT nonzero_bits1 (const_rtx, scalar_int_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
53 const_rtx, machine_mode,
54 unsigned int);
55 static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
56 const_rtx, machine_mode,
57 unsigned int);
58
59 rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE];
60 rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE];
61
62 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
63 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
64 SIGN_EXTEND then while narrowing we also have to enforce the
65 representation and sign-extend the value to mode DESTINATION_REP.
66
67 If the value is already sign-extended to DESTINATION_REP mode we
68 can just switch to DESTINATION mode on it. For each pair of
69 integral modes SOURCE and DESTINATION, when truncating from SOURCE
70 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
71 contains the number of high-order bits in SOURCE that have to be
72 copies of the sign-bit so that we can do this mode-switch to
73 DESTINATION. */
74
75 static unsigned int
76 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
77
78 /* Store X into index I of ARRAY. ARRAY is known to have at least I
79 elements. Return the new base of ARRAY. */
80
81 template <typename T>
82 typename T::value_type *
add_single_to_queue(array_type & array,value_type * base,size_t i,value_type x)83 generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
84 value_type *base,
85 size_t i, value_type x)
86 {
87 if (base == array.stack)
88 {
89 if (i < LOCAL_ELEMS)
90 {
91 base[i] = x;
92 return base;
93 }
94 gcc_checking_assert (i == LOCAL_ELEMS);
95 /* A previous iteration might also have moved from the stack to the
96 heap, in which case the heap array will already be big enough. */
97 if (vec_safe_length (array.heap) <= i)
98 vec_safe_grow (array.heap, i + 1);
99 base = array.heap->address ();
100 memcpy (base, array.stack, sizeof (array.stack));
101 base[LOCAL_ELEMS] = x;
102 return base;
103 }
104 unsigned int length = array.heap->length ();
105 if (length > i)
106 {
107 gcc_checking_assert (base == array.heap->address ());
108 base[i] = x;
109 return base;
110 }
111 else
112 {
113 gcc_checking_assert (i == length);
114 vec_safe_push (array.heap, x);
115 return array.heap->address ();
116 }
117 }
118
119 /* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
120 number of elements added to the worklist. */
121
122 template <typename T>
123 size_t
add_subrtxes_to_queue(array_type & array,value_type * base,size_t end,rtx_type x)124 generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
125 value_type *base,
126 size_t end, rtx_type x)
127 {
128 enum rtx_code code = GET_CODE (x);
129 const char *format = GET_RTX_FORMAT (code);
130 size_t orig_end = end;
131 if (__builtin_expect (INSN_P (x), false))
132 {
133 /* Put the pattern at the top of the queue, since that's what
134 we're likely to want most. It also allows for the SEQUENCE
135 code below. */
136 for (int i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; --i)
137 if (format[i] == 'e')
138 {
139 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
140 if (__builtin_expect (end < LOCAL_ELEMS, true))
141 base[end++] = subx;
142 else
143 base = add_single_to_queue (array, base, end++, subx);
144 }
145 }
146 else
147 for (int i = 0; format[i]; ++i)
148 if (format[i] == 'e')
149 {
150 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
151 if (__builtin_expect (end < LOCAL_ELEMS, true))
152 base[end++] = subx;
153 else
154 base = add_single_to_queue (array, base, end++, subx);
155 }
156 else if (format[i] == 'E')
157 {
158 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec);
159 rtx *vec = x->u.fld[i].rt_rtvec->elem;
160 if (__builtin_expect (end + length <= LOCAL_ELEMS, true))
161 for (unsigned int j = 0; j < length; j++)
162 base[end++] = T::get_value (vec[j]);
163 else
164 for (unsigned int j = 0; j < length; j++)
165 base = add_single_to_queue (array, base, end++,
166 T::get_value (vec[j]));
167 if (code == SEQUENCE && end == length)
168 /* If the subrtxes of the sequence fill the entire array then
169 we know that no other parts of a containing insn are queued.
170 The caller is therefore iterating over the sequence as a
171 PATTERN (...), so we also want the patterns of the
172 subinstructions. */
173 for (unsigned int j = 0; j < length; j++)
174 {
175 typename T::rtx_type x = T::get_rtx (base[j]);
176 if (INSN_P (x))
177 base[j] = T::get_value (PATTERN (x));
178 }
179 }
180 return end - orig_end;
181 }
182
183 template <typename T>
184 void
free_array(array_type & array)185 generic_subrtx_iterator <T>::free_array (array_type &array)
186 {
187 vec_free (array.heap);
188 }
189
190 template <typename T>
191 const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
192
193 template class generic_subrtx_iterator <const_rtx_accessor>;
194 template class generic_subrtx_iterator <rtx_var_accessor>;
195 template class generic_subrtx_iterator <rtx_ptr_accessor>;
196
197 /* Return 1 if the value of X is unstable
198 (would be different at a different point in the program).
199 The frame pointer, arg pointer, etc. are considered stable
200 (within one function) and so is anything marked `unchanging'. */
201
202 int
rtx_unstable_p(const_rtx x)203 rtx_unstable_p (const_rtx x)
204 {
205 const RTX_CODE code = GET_CODE (x);
206 int i;
207 const char *fmt;
208
209 switch (code)
210 {
211 case MEM:
212 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
213
214 case CONST:
215 CASE_CONST_ANY:
216 case SYMBOL_REF:
217 case LABEL_REF:
218 return 0;
219
220 case REG:
221 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
222 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
223 /* The arg pointer varies if it is not a fixed register. */
224 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
225 return 0;
226 /* ??? When call-clobbered, the value is stable modulo the restore
227 that must happen after a call. This currently screws up local-alloc
228 into believing that the restore is not needed. */
229 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED && x == pic_offset_table_rtx)
230 return 0;
231 return 1;
232
233 case ASM_OPERANDS:
234 if (MEM_VOLATILE_P (x))
235 return 1;
236
237 /* Fall through. */
238
239 default:
240 break;
241 }
242
243 fmt = GET_RTX_FORMAT (code);
244 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
245 if (fmt[i] == 'e')
246 {
247 if (rtx_unstable_p (XEXP (x, i)))
248 return 1;
249 }
250 else if (fmt[i] == 'E')
251 {
252 int j;
253 for (j = 0; j < XVECLEN (x, i); j++)
254 if (rtx_unstable_p (XVECEXP (x, i, j)))
255 return 1;
256 }
257
258 return 0;
259 }
260
261 /* Return 1 if X has a value that can vary even between two
262 executions of the program. 0 means X can be compared reliably
263 against certain constants or near-constants.
264 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
265 zero, we are slightly more conservative.
266 The frame pointer and the arg pointer are considered constant. */
267
268 bool
rtx_varies_p(const_rtx x,bool for_alias)269 rtx_varies_p (const_rtx x, bool for_alias)
270 {
271 RTX_CODE code;
272 int i;
273 const char *fmt;
274
275 if (!x)
276 return 0;
277
278 code = GET_CODE (x);
279 switch (code)
280 {
281 case MEM:
282 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
283
284 case CONST:
285 CASE_CONST_ANY:
286 case SYMBOL_REF:
287 case LABEL_REF:
288 return 0;
289
290 case REG:
291 /* Note that we have to test for the actual rtx used for the frame
292 and arg pointers and not just the register number in case we have
293 eliminated the frame and/or arg pointer and are using it
294 for pseudos. */
295 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
296 /* The arg pointer varies if it is not a fixed register. */
297 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
298 return 0;
299 if (x == pic_offset_table_rtx
300 /* ??? When call-clobbered, the value is stable modulo the restore
301 that must happen after a call. This currently screws up
302 local-alloc into believing that the restore is not needed, so we
303 must return 0 only if we are called from alias analysis. */
304 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED || for_alias))
305 return 0;
306 return 1;
307
308 case LO_SUM:
309 /* The operand 0 of a LO_SUM is considered constant
310 (in fact it is related specifically to operand 1)
311 during alias analysis. */
312 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
313 || rtx_varies_p (XEXP (x, 1), for_alias);
314
315 case ASM_OPERANDS:
316 if (MEM_VOLATILE_P (x))
317 return 1;
318
319 /* Fall through. */
320
321 default:
322 break;
323 }
324
325 fmt = GET_RTX_FORMAT (code);
326 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
327 if (fmt[i] == 'e')
328 {
329 if (rtx_varies_p (XEXP (x, i), for_alias))
330 return 1;
331 }
332 else if (fmt[i] == 'E')
333 {
334 int j;
335 for (j = 0; j < XVECLEN (x, i); j++)
336 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
337 return 1;
338 }
339
340 return 0;
341 }
342
343 /* Compute an approximation for the offset between the register
344 FROM and TO for the current function, as it was at the start
345 of the routine. */
346
347 static poly_int64
get_initial_register_offset(int from,int to)348 get_initial_register_offset (int from, int to)
349 {
350 static const struct elim_table_t
351 {
352 const int from;
353 const int to;
354 } table[] = ELIMINABLE_REGS;
355 poly_int64 offset1, offset2;
356 unsigned int i, j;
357
358 if (to == from)
359 return 0;
360
361 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
362 is completed, but we need to give at least an estimate for the stack
363 pointer based on the frame size. */
364 if (!epilogue_completed)
365 {
366 offset1 = crtl->outgoing_args_size + get_frame_size ();
367 #if !STACK_GROWS_DOWNWARD
368 offset1 = - offset1;
369 #endif
370 if (to == STACK_POINTER_REGNUM)
371 return offset1;
372 else if (from == STACK_POINTER_REGNUM)
373 return - offset1;
374 else
375 return 0;
376 }
377
378 for (i = 0; i < ARRAY_SIZE (table); i++)
379 if (table[i].from == from)
380 {
381 if (table[i].to == to)
382 {
383 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
384 offset1);
385 return offset1;
386 }
387 for (j = 0; j < ARRAY_SIZE (table); j++)
388 {
389 if (table[j].to == to
390 && table[j].from == table[i].to)
391 {
392 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
393 offset1);
394 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
395 offset2);
396 return offset1 + offset2;
397 }
398 if (table[j].from == to
399 && table[j].to == table[i].to)
400 {
401 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
402 offset1);
403 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
404 offset2);
405 return offset1 - offset2;
406 }
407 }
408 }
409 else if (table[i].to == from)
410 {
411 if (table[i].from == to)
412 {
413 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
414 offset1);
415 return - offset1;
416 }
417 for (j = 0; j < ARRAY_SIZE (table); j++)
418 {
419 if (table[j].to == to
420 && table[j].from == table[i].from)
421 {
422 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
423 offset1);
424 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
425 offset2);
426 return - offset1 + offset2;
427 }
428 if (table[j].from == to
429 && table[j].to == table[i].from)
430 {
431 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,
432 offset1);
433 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,
434 offset2);
435 return - offset1 - offset2;
436 }
437 }
438 }
439
440 /* If the requested register combination was not found,
441 try a different more simple combination. */
442 if (from == ARG_POINTER_REGNUM)
443 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM, to);
444 else if (to == ARG_POINTER_REGNUM)
445 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM);
446 else if (from == HARD_FRAME_POINTER_REGNUM)
447 return get_initial_register_offset (FRAME_POINTER_REGNUM, to);
448 else if (to == HARD_FRAME_POINTER_REGNUM)
449 return get_initial_register_offset (from, FRAME_POINTER_REGNUM);
450 else
451 return 0;
452 }
453
454 /* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
455 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
456 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
457 references on strict alignment machines. */
458
459 static int
rtx_addr_can_trap_p_1(const_rtx x,poly_int64 offset,poly_int64 size,machine_mode mode,bool unaligned_mems)460 rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
461 machine_mode mode, bool unaligned_mems)
462 {
463 enum rtx_code code = GET_CODE (x);
464 gcc_checking_assert (mode == BLKmode || known_size_p (size));
465
466 /* The offset must be a multiple of the mode size if we are considering
467 unaligned memory references on strict alignment machines. */
468 if (STRICT_ALIGNMENT && unaligned_mems && mode != BLKmode)
469 {
470 poly_int64 actual_offset = offset;
471
472 #ifdef SPARC_STACK_BOUNDARY_HACK
473 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
474 the real alignment of %sp. However, when it does this, the
475 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
476 if (SPARC_STACK_BOUNDARY_HACK
477 && (x == stack_pointer_rtx || x == hard_frame_pointer_rtx))
478 actual_offset -= STACK_POINTER_OFFSET;
479 #endif
480
481 if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
482 return 1;
483 }
484
485 switch (code)
486 {
487 case SYMBOL_REF:
488 if (SYMBOL_REF_WEAK (x))
489 return 1;
490 if (!CONSTANT_POOL_ADDRESS_P (x) && !SYMBOL_REF_FUNCTION_P (x))
491 {
492 tree decl;
493 poly_int64 decl_size;
494
495 if (maybe_lt (offset, 0))
496 return 1;
497 if (!known_size_p (size))
498 return maybe_ne (offset, 0);
499
500 /* If the size of the access or of the symbol is unknown,
501 assume the worst. */
502 decl = SYMBOL_REF_DECL (x);
503
504 /* Else check that the access is in bounds. TODO: restructure
505 expr_size/tree_expr_size/int_expr_size and just use the latter. */
506 if (!decl)
507 decl_size = -1;
508 else if (DECL_P (decl) && DECL_SIZE_UNIT (decl))
509 {
510 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl), &decl_size))
511 decl_size = -1;
512 }
513 else if (TREE_CODE (decl) == STRING_CST)
514 decl_size = TREE_STRING_LENGTH (decl);
515 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl)))
516 decl_size = int_size_in_bytes (TREE_TYPE (decl));
517 else
518 decl_size = -1;
519
520 return (!known_size_p (decl_size) || known_eq (decl_size, 0)
521 ? maybe_ne (offset, 0)
522 : maybe_gt (offset + size, decl_size));
523 }
524
525 return 0;
526
527 case LABEL_REF:
528 return 0;
529
530 case REG:
531 /* Stack references are assumed not to trap, but we need to deal with
532 nonsensical offsets. */
533 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
534 || x == stack_pointer_rtx
535 /* The arg pointer varies if it is not a fixed register. */
536 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
537 {
538 #ifdef RED_ZONE_SIZE
539 poly_int64 red_zone_size = RED_ZONE_SIZE;
540 #else
541 poly_int64 red_zone_size = 0;
542 #endif
543 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT;
544 poly_int64 low_bound, high_bound;
545
546 if (!known_size_p (size))
547 return 1;
548
549 if (x == frame_pointer_rtx)
550 {
551 if (FRAME_GROWS_DOWNWARD)
552 {
553 high_bound = targetm.starting_frame_offset ();
554 low_bound = high_bound - get_frame_size ();
555 }
556 else
557 {
558 low_bound = targetm.starting_frame_offset ();
559 high_bound = low_bound + get_frame_size ();
560 }
561 }
562 else if (x == hard_frame_pointer_rtx)
563 {
564 poly_int64 sp_offset
565 = get_initial_register_offset (STACK_POINTER_REGNUM,
566 HARD_FRAME_POINTER_REGNUM);
567 poly_int64 ap_offset
568 = get_initial_register_offset (ARG_POINTER_REGNUM,
569 HARD_FRAME_POINTER_REGNUM);
570
571 #if STACK_GROWS_DOWNWARD
572 low_bound = sp_offset - red_zone_size - stack_boundary;
573 high_bound = ap_offset
574 + FIRST_PARM_OFFSET (current_function_decl)
575 #if !ARGS_GROW_DOWNWARD
576 + crtl->args.size
577 #endif
578 + stack_boundary;
579 #else
580 high_bound = sp_offset + red_zone_size + stack_boundary;
581 low_bound = ap_offset
582 + FIRST_PARM_OFFSET (current_function_decl)
583 #if ARGS_GROW_DOWNWARD
584 - crtl->args.size
585 #endif
586 - stack_boundary;
587 #endif
588 }
589 else if (x == stack_pointer_rtx)
590 {
591 poly_int64 ap_offset
592 = get_initial_register_offset (ARG_POINTER_REGNUM,
593 STACK_POINTER_REGNUM);
594
595 #if STACK_GROWS_DOWNWARD
596 low_bound = - red_zone_size - stack_boundary;
597 high_bound = ap_offset
598 + FIRST_PARM_OFFSET (current_function_decl)
599 #if !ARGS_GROW_DOWNWARD
600 + crtl->args.size
601 #endif
602 + stack_boundary;
603 #else
604 high_bound = red_zone_size + stack_boundary;
605 low_bound = ap_offset
606 + FIRST_PARM_OFFSET (current_function_decl)
607 #if ARGS_GROW_DOWNWARD
608 - crtl->args.size
609 #endif
610 - stack_boundary;
611 #endif
612 }
613 else
614 {
615 /* We assume that accesses are safe to at least the
616 next stack boundary.
617 Examples are varargs and __builtin_return_address. */
618 #if ARGS_GROW_DOWNWARD
619 high_bound = FIRST_PARM_OFFSET (current_function_decl)
620 + stack_boundary;
621 low_bound = FIRST_PARM_OFFSET (current_function_decl)
622 - crtl->args.size - stack_boundary;
623 #else
624 low_bound = FIRST_PARM_OFFSET (current_function_decl)
625 - stack_boundary;
626 high_bound = FIRST_PARM_OFFSET (current_function_decl)
627 + crtl->args.size + stack_boundary;
628 #endif
629 }
630
631 if (known_ge (offset, low_bound)
632 && known_le (offset, high_bound - size))
633 return 0;
634 return 1;
635 }
636 /* All of the virtual frame registers are stack references. */
637 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
638 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
639 return 0;
640 return 1;
641
642 case CONST:
643 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
644 mode, unaligned_mems);
645
646 case PLUS:
647 /* An address is assumed not to trap if:
648 - it is the pic register plus a const unspec without offset. */
649 if (XEXP (x, 0) == pic_offset_table_rtx
650 && GET_CODE (XEXP (x, 1)) == CONST
651 && GET_CODE (XEXP (XEXP (x, 1), 0)) == UNSPEC
652 && known_eq (offset, 0))
653 return 0;
654
655 /* - or it is an address that can't trap plus a constant integer. */
656 if (CONST_INT_P (XEXP (x, 1))
657 && !rtx_addr_can_trap_p_1 (XEXP (x, 0), offset + INTVAL (XEXP (x, 1)),
658 size, mode, unaligned_mems))
659 return 0;
660
661 return 1;
662
663 case LO_SUM:
664 case PRE_MODIFY:
665 return rtx_addr_can_trap_p_1 (XEXP (x, 1), offset, size,
666 mode, unaligned_mems);
667
668 case PRE_DEC:
669 case PRE_INC:
670 case POST_DEC:
671 case POST_INC:
672 case POST_MODIFY:
673 return rtx_addr_can_trap_p_1 (XEXP (x, 0), offset, size,
674 mode, unaligned_mems);
675
676 default:
677 break;
678 }
679
680 /* If it isn't one of the case above, it can cause a trap. */
681 return 1;
682 }
683
684 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
685
686 int
rtx_addr_can_trap_p(const_rtx x)687 rtx_addr_can_trap_p (const_rtx x)
688 {
689 return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode, false);
690 }
691
692 /* Return true if X contains a MEM subrtx. */
693
694 bool
contains_mem_rtx_p(rtx x)695 contains_mem_rtx_p (rtx x)
696 {
697 subrtx_iterator::array_type array;
698 FOR_EACH_SUBRTX (iter, array, x, ALL)
699 if (MEM_P (*iter))
700 return true;
701
702 return false;
703 }
704
705 /* Return true if X is an address that is known to not be zero. */
706
707 bool
nonzero_address_p(const_rtx x)708 nonzero_address_p (const_rtx x)
709 {
710 const enum rtx_code code = GET_CODE (x);
711
712 switch (code)
713 {
714 case SYMBOL_REF:
715 return flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x);
716
717 case LABEL_REF:
718 return true;
719
720 case REG:
721 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
722 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
723 || x == stack_pointer_rtx
724 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
725 return true;
726 /* All of the virtual frame registers are stack references. */
727 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
728 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
729 return true;
730 return false;
731
732 case CONST:
733 return nonzero_address_p (XEXP (x, 0));
734
735 case PLUS:
736 /* Handle PIC references. */
737 if (XEXP (x, 0) == pic_offset_table_rtx
738 && CONSTANT_P (XEXP (x, 1)))
739 return true;
740 return false;
741
742 case PRE_MODIFY:
743 /* Similar to the above; allow positive offsets. Further, since
744 auto-inc is only allowed in memories, the register must be a
745 pointer. */
746 if (CONST_INT_P (XEXP (x, 1))
747 && INTVAL (XEXP (x, 1)) > 0)
748 return true;
749 return nonzero_address_p (XEXP (x, 0));
750
751 case PRE_INC:
752 /* Similarly. Further, the offset is always positive. */
753 return true;
754
755 case PRE_DEC:
756 case POST_DEC:
757 case POST_INC:
758 case POST_MODIFY:
759 return nonzero_address_p (XEXP (x, 0));
760
761 case LO_SUM:
762 return nonzero_address_p (XEXP (x, 1));
763
764 default:
765 break;
766 }
767
768 /* If it isn't one of the case above, might be zero. */
769 return false;
770 }
771
772 /* Return 1 if X refers to a memory location whose address
773 cannot be compared reliably with constant addresses,
774 or if X refers to a BLKmode memory object.
775 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
776 zero, we are slightly more conservative. */
777
778 bool
rtx_addr_varies_p(const_rtx x,bool for_alias)779 rtx_addr_varies_p (const_rtx x, bool for_alias)
780 {
781 enum rtx_code code;
782 int i;
783 const char *fmt;
784
785 if (x == 0)
786 return 0;
787
788 code = GET_CODE (x);
789 if (code == MEM)
790 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
791
792 fmt = GET_RTX_FORMAT (code);
793 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
794 if (fmt[i] == 'e')
795 {
796 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
797 return 1;
798 }
799 else if (fmt[i] == 'E')
800 {
801 int j;
802 for (j = 0; j < XVECLEN (x, i); j++)
803 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
804 return 1;
805 }
806 return 0;
807 }
808
809 /* Return the CALL in X if there is one. */
810
811 rtx
get_call_rtx_from(rtx x)812 get_call_rtx_from (rtx x)
813 {
814 if (INSN_P (x))
815 x = PATTERN (x);
816 if (GET_CODE (x) == PARALLEL)
817 x = XVECEXP (x, 0, 0);
818 if (GET_CODE (x) == SET)
819 x = SET_SRC (x);
820 if (GET_CODE (x) == CALL && MEM_P (XEXP (x, 0)))
821 return x;
822 return NULL_RTX;
823 }
824
825 /* Return the value of the integer term in X, if one is apparent;
826 otherwise return 0.
827 Only obvious integer terms are detected.
828 This is used in cse.c with the `related_value' field. */
829
830 HOST_WIDE_INT
get_integer_term(const_rtx x)831 get_integer_term (const_rtx x)
832 {
833 if (GET_CODE (x) == CONST)
834 x = XEXP (x, 0);
835
836 if (GET_CODE (x) == MINUS
837 && CONST_INT_P (XEXP (x, 1)))
838 return - INTVAL (XEXP (x, 1));
839 if (GET_CODE (x) == PLUS
840 && CONST_INT_P (XEXP (x, 1)))
841 return INTVAL (XEXP (x, 1));
842 return 0;
843 }
844
845 /* If X is a constant, return the value sans apparent integer term;
846 otherwise return 0.
847 Only obvious integer terms are detected. */
848
849 rtx
get_related_value(const_rtx x)850 get_related_value (const_rtx x)
851 {
852 if (GET_CODE (x) != CONST)
853 return 0;
854 x = XEXP (x, 0);
855 if (GET_CODE (x) == PLUS
856 && CONST_INT_P (XEXP (x, 1)))
857 return XEXP (x, 0);
858 else if (GET_CODE (x) == MINUS
859 && CONST_INT_P (XEXP (x, 1)))
860 return XEXP (x, 0);
861 return 0;
862 }
863
864 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
865 to somewhere in the same object or object_block as SYMBOL. */
866
867 bool
offset_within_block_p(const_rtx symbol,HOST_WIDE_INT offset)868 offset_within_block_p (const_rtx symbol, HOST_WIDE_INT offset)
869 {
870 tree decl;
871
872 if (GET_CODE (symbol) != SYMBOL_REF)
873 return false;
874
875 if (offset == 0)
876 return true;
877
878 if (offset > 0)
879 {
880 if (CONSTANT_POOL_ADDRESS_P (symbol)
881 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
882 return true;
883
884 decl = SYMBOL_REF_DECL (symbol);
885 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)))
886 return true;
887 }
888
889 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
890 && SYMBOL_REF_BLOCK (symbol)
891 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
892 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
893 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
894 return true;
895
896 return false;
897 }
898
899 /* Split X into a base and a constant offset, storing them in *BASE_OUT
900 and *OFFSET_OUT respectively. */
901
902 void
split_const(rtx x,rtx * base_out,rtx * offset_out)903 split_const (rtx x, rtx *base_out, rtx *offset_out)
904 {
905 if (GET_CODE (x) == CONST)
906 {
907 x = XEXP (x, 0);
908 if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1)))
909 {
910 *base_out = XEXP (x, 0);
911 *offset_out = XEXP (x, 1);
912 return;
913 }
914 }
915 *base_out = x;
916 *offset_out = const0_rtx;
917 }
918
919 /* Express integer value X as some value Y plus a polynomial offset,
920 where Y is either const0_rtx, X or something within X (as opposed
921 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
922
923 rtx
strip_offset(rtx x,poly_int64_pod * offset_out)924 strip_offset (rtx x, poly_int64_pod *offset_out)
925 {
926 rtx base = const0_rtx;
927 rtx test = x;
928 if (GET_CODE (test) == CONST)
929 test = XEXP (test, 0);
930 if (GET_CODE (test) == PLUS)
931 {
932 base = XEXP (test, 0);
933 test = XEXP (test, 1);
934 }
935 if (poly_int_rtx_p (test, offset_out))
936 return base;
937 *offset_out = 0;
938 return x;
939 }
940
941 /* Return the argument size in REG_ARGS_SIZE note X. */
942
943 poly_int64
get_args_size(const_rtx x)944 get_args_size (const_rtx x)
945 {
946 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE);
947 return rtx_to_poly_int64 (XEXP (x, 0));
948 }
949
950 /* Return the number of places FIND appears within X. If COUNT_DEST is
951 zero, we do not count occurrences inside the destination of a SET. */
952
953 int
count_occurrences(const_rtx x,const_rtx find,int count_dest)954 count_occurrences (const_rtx x, const_rtx find, int count_dest)
955 {
956 int i, j;
957 enum rtx_code code;
958 const char *format_ptr;
959 int count;
960
961 if (x == find)
962 return 1;
963
964 code = GET_CODE (x);
965
966 switch (code)
967 {
968 case REG:
969 CASE_CONST_ANY:
970 case SYMBOL_REF:
971 case CODE_LABEL:
972 case PC:
973 case CC0:
974 return 0;
975
976 case EXPR_LIST:
977 count = count_occurrences (XEXP (x, 0), find, count_dest);
978 if (XEXP (x, 1))
979 count += count_occurrences (XEXP (x, 1), find, count_dest);
980 return count;
981
982 case MEM:
983 if (MEM_P (find) && rtx_equal_p (x, find))
984 return 1;
985 break;
986
987 case SET:
988 if (SET_DEST (x) == find && ! count_dest)
989 return count_occurrences (SET_SRC (x), find, count_dest);
990 break;
991
992 default:
993 break;
994 }
995
996 format_ptr = GET_RTX_FORMAT (code);
997 count = 0;
998
999 for (i = 0; i < GET_RTX_LENGTH (code); i++)
1000 {
1001 switch (*format_ptr++)
1002 {
1003 case 'e':
1004 count += count_occurrences (XEXP (x, i), find, count_dest);
1005 break;
1006
1007 case 'E':
1008 for (j = 0; j < XVECLEN (x, i); j++)
1009 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
1010 break;
1011 }
1012 }
1013 return count;
1014 }
1015
1016
1017 /* Return TRUE if OP is a register or subreg of a register that
1018 holds an unsigned quantity. Otherwise, return FALSE. */
1019
1020 bool
unsigned_reg_p(rtx op)1021 unsigned_reg_p (rtx op)
1022 {
1023 if (REG_P (op)
1024 && REG_EXPR (op)
1025 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op))))
1026 return true;
1027
1028 if (GET_CODE (op) == SUBREG
1029 && SUBREG_PROMOTED_SIGN (op))
1030 return true;
1031
1032 return false;
1033 }
1034
1035
1036 /* Nonzero if register REG appears somewhere within IN.
1037 Also works if REG is not a register; in this case it checks
1038 for a subexpression of IN that is Lisp "equal" to REG. */
1039
1040 int
reg_mentioned_p(const_rtx reg,const_rtx in)1041 reg_mentioned_p (const_rtx reg, const_rtx in)
1042 {
1043 const char *fmt;
1044 int i;
1045 enum rtx_code code;
1046
1047 if (in == 0)
1048 return 0;
1049
1050 if (reg == in)
1051 return 1;
1052
1053 if (GET_CODE (in) == LABEL_REF)
1054 return reg == label_ref_label (in);
1055
1056 code = GET_CODE (in);
1057
1058 switch (code)
1059 {
1060 /* Compare registers by number. */
1061 case REG:
1062 return REG_P (reg) && REGNO (in) == REGNO (reg);
1063
1064 /* These codes have no constituent expressions
1065 and are unique. */
1066 case SCRATCH:
1067 case CC0:
1068 case PC:
1069 return 0;
1070
1071 CASE_CONST_ANY:
1072 /* These are kept unique for a given value. */
1073 return 0;
1074
1075 default:
1076 break;
1077 }
1078
1079 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
1080 return 1;
1081
1082 fmt = GET_RTX_FORMAT (code);
1083
1084 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1085 {
1086 if (fmt[i] == 'E')
1087 {
1088 int j;
1089 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
1090 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
1091 return 1;
1092 }
1093 else if (fmt[i] == 'e'
1094 && reg_mentioned_p (reg, XEXP (in, i)))
1095 return 1;
1096 }
1097 return 0;
1098 }
1099
1100 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1101 no CODE_LABEL insn. */
1102
1103 int
no_labels_between_p(const rtx_insn * beg,const rtx_insn * end)1104 no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1105 {
1106 rtx_insn *p;
1107 if (beg == end)
1108 return 0;
1109 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1110 if (LABEL_P (p))
1111 return 0;
1112 return 1;
1113 }
1114
1115 /* Nonzero if register REG is used in an insn between
1116 FROM_INSN and TO_INSN (exclusive of those two). */
1117
1118 int
reg_used_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1119 reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1120 const rtx_insn *to_insn)
1121 {
1122 rtx_insn *insn;
1123
1124 if (from_insn == to_insn)
1125 return 0;
1126
1127 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1128 if (NONDEBUG_INSN_P (insn)
1129 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1130 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
1131 return 1;
1132 return 0;
1133 }
1134
1135 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
1136 is entirely replaced by a new value and the only use is as a SET_DEST,
1137 we do not consider it a reference. */
1138
1139 int
reg_referenced_p(const_rtx x,const_rtx body)1140 reg_referenced_p (const_rtx x, const_rtx body)
1141 {
1142 int i;
1143
1144 switch (GET_CODE (body))
1145 {
1146 case SET:
1147 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
1148 return 1;
1149
1150 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
1151 of a REG that occupies all of the REG, the insn references X if
1152 it is mentioned in the destination. */
1153 if (GET_CODE (SET_DEST (body)) != CC0
1154 && GET_CODE (SET_DEST (body)) != PC
1155 && !REG_P (SET_DEST (body))
1156 && ! (GET_CODE (SET_DEST (body)) == SUBREG
1157 && REG_P (SUBREG_REG (SET_DEST (body)))
1158 && !read_modify_subreg_p (SET_DEST (body)))
1159 && reg_overlap_mentioned_p (x, SET_DEST (body)))
1160 return 1;
1161 return 0;
1162
1163 case ASM_OPERANDS:
1164 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1165 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
1166 return 1;
1167 return 0;
1168
1169 case CALL:
1170 case USE:
1171 case IF_THEN_ELSE:
1172 return reg_overlap_mentioned_p (x, body);
1173
1174 case TRAP_IF:
1175 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
1176
1177 case PREFETCH:
1178 return reg_overlap_mentioned_p (x, XEXP (body, 0));
1179
1180 case UNSPEC:
1181 case UNSPEC_VOLATILE:
1182 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1183 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
1184 return 1;
1185 return 0;
1186
1187 case PARALLEL:
1188 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1189 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
1190 return 1;
1191 return 0;
1192
1193 case CLOBBER:
1194 if (MEM_P (XEXP (body, 0)))
1195 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
1196 return 1;
1197 return 0;
1198
1199 case COND_EXEC:
1200 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
1201 return 1;
1202 return reg_referenced_p (x, COND_EXEC_CODE (body));
1203
1204 default:
1205 return 0;
1206 }
1207 }
1208
1209 /* Nonzero if register REG is set or clobbered in an insn between
1210 FROM_INSN and TO_INSN (exclusive of those two). */
1211
1212 int
reg_set_between_p(const_rtx reg,const rtx_insn * from_insn,const rtx_insn * to_insn)1213 reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1214 const rtx_insn *to_insn)
1215 {
1216 const rtx_insn *insn;
1217
1218 if (from_insn == to_insn)
1219 return 0;
1220
1221 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1222 if (INSN_P (insn) && reg_set_p (reg, insn))
1223 return 1;
1224 return 0;
1225 }
1226
1227 /* Return true if REG is set or clobbered inside INSN. */
1228
1229 int
reg_set_p(const_rtx reg,const_rtx insn)1230 reg_set_p (const_rtx reg, const_rtx insn)
1231 {
1232 /* After delay slot handling, call and branch insns might be in a
1233 sequence. Check all the elements there. */
1234 if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE)
1235 {
1236 for (int i = 0; i < XVECLEN (PATTERN (insn), 0); ++i)
1237 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)))
1238 return true;
1239
1240 return false;
1241 }
1242
1243 /* We can be passed an insn or part of one. If we are passed an insn,
1244 check if a side-effect of the insn clobbers REG. */
1245 if (INSN_P (insn)
1246 && (FIND_REG_INC_NOTE (insn, reg)
1247 || (CALL_P (insn)
1248 && ((REG_P (reg)
1249 && REGNO (reg) < FIRST_PSEUDO_REGISTER
1250 && overlaps_hard_reg_set_p (regs_invalidated_by_call,
1251 GET_MODE (reg), REGNO (reg)))
1252 || MEM_P (reg)
1253 || find_reg_fusage (insn, CLOBBER, reg)))))
1254 return true;
1255
1256 /* There are no REG_INC notes for SP autoinc. */
1257 if (reg == stack_pointer_rtx && INSN_P (insn))
1258 {
1259 subrtx_var_iterator::array_type array;
1260 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)
1261 {
1262 rtx mem = *iter;
1263 if (mem
1264 && MEM_P (mem)
1265 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
1266 {
1267 if (XEXP (XEXP (mem, 0), 0) == stack_pointer_rtx)
1268 return true;
1269 iter.skip_subrtxes ();
1270 }
1271 }
1272 }
1273
1274 return set_of (reg, insn) != NULL_RTX;
1275 }
1276
1277 /* Similar to reg_set_between_p, but check all registers in X. Return 0
1278 only if none of them are modified between START and END. Return 1 if
1279 X contains a MEM; this routine does use memory aliasing. */
1280
1281 int
modified_between_p(const_rtx x,const rtx_insn * start,const rtx_insn * end)1282 modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1283 {
1284 const enum rtx_code code = GET_CODE (x);
1285 const char *fmt;
1286 int i, j;
1287 rtx_insn *insn;
1288
1289 if (start == end)
1290 return 0;
1291
1292 switch (code)
1293 {
1294 CASE_CONST_ANY:
1295 case CONST:
1296 case SYMBOL_REF:
1297 case LABEL_REF:
1298 return 0;
1299
1300 case PC:
1301 case CC0:
1302 return 1;
1303
1304 case MEM:
1305 if (modified_between_p (XEXP (x, 0), start, end))
1306 return 1;
1307 if (MEM_READONLY_P (x))
1308 return 0;
1309 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1310 if (memory_modified_in_insn_p (x, insn))
1311 return 1;
1312 return 0;
1313
1314 case REG:
1315 return reg_set_between_p (x, start, end);
1316
1317 default:
1318 break;
1319 }
1320
1321 fmt = GET_RTX_FORMAT (code);
1322 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1323 {
1324 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
1325 return 1;
1326
1327 else if (fmt[i] == 'E')
1328 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1329 if (modified_between_p (XVECEXP (x, i, j), start, end))
1330 return 1;
1331 }
1332
1333 return 0;
1334 }
1335
1336 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1337 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1338 does use memory aliasing. */
1339
1340 int
modified_in_p(const_rtx x,const_rtx insn)1341 modified_in_p (const_rtx x, const_rtx insn)
1342 {
1343 const enum rtx_code code = GET_CODE (x);
1344 const char *fmt;
1345 int i, j;
1346
1347 switch (code)
1348 {
1349 CASE_CONST_ANY:
1350 case CONST:
1351 case SYMBOL_REF:
1352 case LABEL_REF:
1353 return 0;
1354
1355 case PC:
1356 case CC0:
1357 return 1;
1358
1359 case MEM:
1360 if (modified_in_p (XEXP (x, 0), insn))
1361 return 1;
1362 if (MEM_READONLY_P (x))
1363 return 0;
1364 if (memory_modified_in_insn_p (x, insn))
1365 return 1;
1366 return 0;
1367
1368 case REG:
1369 return reg_set_p (x, insn);
1370
1371 default:
1372 break;
1373 }
1374
1375 fmt = GET_RTX_FORMAT (code);
1376 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1377 {
1378 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
1379 return 1;
1380
1381 else if (fmt[i] == 'E')
1382 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1383 if (modified_in_p (XVECEXP (x, i, j), insn))
1384 return 1;
1385 }
1386
1387 return 0;
1388 }
1389
1390 /* Return true if X is a SUBREG and if storing a value to X would
1391 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1392 target, using a SUBREG to store to one half of a DImode REG would
1393 preserve the other half. */
1394
1395 bool
read_modify_subreg_p(const_rtx x)1396 read_modify_subreg_p (const_rtx x)
1397 {
1398 if (GET_CODE (x) != SUBREG)
1399 return false;
1400 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x)));
1401 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x));
1402 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)));
1403 /* The inner and outer modes of a subreg must be ordered, so that we
1404 can tell whether they're paradoxical or partial. */
1405 gcc_checking_assert (ordered_p (isize, osize));
1406 return (maybe_gt (isize, osize) && maybe_gt (isize, regsize));
1407 }
1408
1409 /* Helper function for set_of. */
1410 struct set_of_data
1411 {
1412 const_rtx found;
1413 const_rtx pat;
1414 };
1415
1416 static void
set_of_1(rtx x,const_rtx pat,void * data1)1417 set_of_1 (rtx x, const_rtx pat, void *data1)
1418 {
1419 struct set_of_data *const data = (struct set_of_data *) (data1);
1420 if (rtx_equal_p (x, data->pat)
1421 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
1422 data->found = pat;
1423 }
1424
1425 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1426 (either directly or via STRICT_LOW_PART and similar modifiers). */
1427 const_rtx
set_of(const_rtx pat,const_rtx insn)1428 set_of (const_rtx pat, const_rtx insn)
1429 {
1430 struct set_of_data data;
1431 data.found = NULL_RTX;
1432 data.pat = pat;
1433 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
1434 return data.found;
1435 }
1436
1437 /* Add all hard register in X to *PSET. */
1438 void
find_all_hard_regs(const_rtx x,HARD_REG_SET * pset)1439 find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1440 {
1441 subrtx_iterator::array_type array;
1442 FOR_EACH_SUBRTX (iter, array, x, NONCONST)
1443 {
1444 const_rtx x = *iter;
1445 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
1446 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1447 }
1448 }
1449
1450 /* This function, called through note_stores, collects sets and
1451 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1452 by DATA. */
1453 void
record_hard_reg_sets(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)1454 record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
1455 {
1456 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1457 if (REG_P (x) && HARD_REGISTER_P (x))
1458 add_to_hard_reg_set (pset, GET_MODE (x), REGNO (x));
1459 }
1460
1461 /* Examine INSN, and compute the set of hard registers written by it.
1462 Store it in *PSET. Should only be called after reload. */
1463 void
find_all_hard_reg_sets(const rtx_insn * insn,HARD_REG_SET * pset,bool implicit)1464 find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1465 {
1466 rtx link;
1467
1468 CLEAR_HARD_REG_SET (*pset);
1469 note_stores (PATTERN (insn), record_hard_reg_sets, pset);
1470 if (CALL_P (insn))
1471 {
1472 if (implicit)
1473 IOR_HARD_REG_SET (*pset, call_used_reg_set);
1474
1475 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1476 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1477 }
1478 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1479 if (REG_NOTE_KIND (link) == REG_INC)
1480 record_hard_reg_sets (XEXP (link, 0), NULL, pset);
1481 }
1482
1483 /* Like record_hard_reg_sets, but called through note_uses. */
1484 void
record_hard_reg_uses(rtx * px,void * data)1485 record_hard_reg_uses (rtx *px, void *data)
1486 {
1487 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1488 }
1489
1490 /* Given an INSN, return a SET expression if this insn has only a single SET.
1491 It may also have CLOBBERs, USEs, or SET whose output
1492 will not be used, which we ignore. */
1493
1494 rtx
single_set_2(const rtx_insn * insn,const_rtx pat)1495 single_set_2 (const rtx_insn *insn, const_rtx pat)
1496 {
1497 rtx set = NULL;
1498 int set_verified = 1;
1499 int i;
1500
1501 if (GET_CODE (pat) == PARALLEL)
1502 {
1503 for (i = 0; i < XVECLEN (pat, 0); i++)
1504 {
1505 rtx sub = XVECEXP (pat, 0, i);
1506 switch (GET_CODE (sub))
1507 {
1508 case USE:
1509 case CLOBBER:
1510 break;
1511
1512 case SET:
1513 /* We can consider insns having multiple sets, where all
1514 but one are dead as single set insns. In common case
1515 only single set is present in the pattern so we want
1516 to avoid checking for REG_UNUSED notes unless necessary.
1517
1518 When we reach set first time, we just expect this is
1519 the single set we are looking for and only when more
1520 sets are found in the insn, we check them. */
1521 if (!set_verified)
1522 {
1523 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
1524 && !side_effects_p (set))
1525 set = NULL;
1526 else
1527 set_verified = 1;
1528 }
1529 if (!set)
1530 set = sub, set_verified = 0;
1531 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
1532 || side_effects_p (sub))
1533 return NULL_RTX;
1534 break;
1535
1536 default:
1537 return NULL_RTX;
1538 }
1539 }
1540 }
1541 return set;
1542 }
1543
1544 /* Given an INSN, return nonzero if it has more than one SET, else return
1545 zero. */
1546
1547 int
multiple_sets(const_rtx insn)1548 multiple_sets (const_rtx insn)
1549 {
1550 int found;
1551 int i;
1552
1553 /* INSN must be an insn. */
1554 if (! INSN_P (insn))
1555 return 0;
1556
1557 /* Only a PARALLEL can have multiple SETs. */
1558 if (GET_CODE (PATTERN (insn)) == PARALLEL)
1559 {
1560 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1561 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
1562 {
1563 /* If we have already found a SET, then return now. */
1564 if (found)
1565 return 1;
1566 else
1567 found = 1;
1568 }
1569 }
1570
1571 /* Either zero or one SET. */
1572 return 0;
1573 }
1574
1575 /* Return nonzero if the destination of SET equals the source
1576 and there are no side effects. */
1577
1578 int
set_noop_p(const_rtx set)1579 set_noop_p (const_rtx set)
1580 {
1581 rtx src = SET_SRC (set);
1582 rtx dst = SET_DEST (set);
1583
1584 if (dst == pc_rtx && src == pc_rtx)
1585 return 1;
1586
1587 if (MEM_P (dst) && MEM_P (src))
1588 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1589
1590 if (GET_CODE (dst) == ZERO_EXTRACT)
1591 return rtx_equal_p (XEXP (dst, 0), src)
1592 && !BITS_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1593 && !side_effects_p (src);
1594
1595 if (GET_CODE (dst) == STRICT_LOW_PART)
1596 dst = XEXP (dst, 0);
1597
1598 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1599 {
1600 if (maybe_ne (SUBREG_BYTE (src), SUBREG_BYTE (dst)))
1601 return 0;
1602 src = SUBREG_REG (src);
1603 dst = SUBREG_REG (dst);
1604 }
1605
1606 /* It is a NOOP if destination overlaps with selected src vector
1607 elements. */
1608 if (GET_CODE (src) == VEC_SELECT
1609 && REG_P (XEXP (src, 0)) && REG_P (dst)
1610 && HARD_REGISTER_P (XEXP (src, 0))
1611 && HARD_REGISTER_P (dst))
1612 {
1613 int i;
1614 rtx par = XEXP (src, 1);
1615 rtx src0 = XEXP (src, 0);
1616 int c0 = INTVAL (XVECEXP (par, 0, 0));
1617 HOST_WIDE_INT offset = GET_MODE_UNIT_SIZE (GET_MODE (src0)) * c0;
1618
1619 for (i = 1; i < XVECLEN (par, 0); i++)
1620 if (INTVAL (XVECEXP (par, 0, i)) != c0 + i)
1621 return 0;
1622 return
1623 simplify_subreg_regno (REGNO (src0), GET_MODE (src0),
1624 offset, GET_MODE (dst)) == (int) REGNO (dst);
1625 }
1626
1627 return (REG_P (src) && REG_P (dst)
1628 && REGNO (src) == REGNO (dst));
1629 }
1630
1631 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1632 value to itself. */
1633
1634 int
noop_move_p(const rtx_insn * insn)1635 noop_move_p (const rtx_insn *insn)
1636 {
1637 rtx pat = PATTERN (insn);
1638
1639 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1640 return 1;
1641
1642 /* Insns carrying these notes are useful later on. */
1643 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1644 return 0;
1645
1646 /* Check the code to be executed for COND_EXEC. */
1647 if (GET_CODE (pat) == COND_EXEC)
1648 pat = COND_EXEC_CODE (pat);
1649
1650 if (GET_CODE (pat) == SET && set_noop_p (pat))
1651 return 1;
1652
1653 if (GET_CODE (pat) == PARALLEL)
1654 {
1655 int i;
1656 /* If nothing but SETs of registers to themselves,
1657 this insn can also be deleted. */
1658 for (i = 0; i < XVECLEN (pat, 0); i++)
1659 {
1660 rtx tem = XVECEXP (pat, 0, i);
1661
1662 if (GET_CODE (tem) == USE
1663 || GET_CODE (tem) == CLOBBER)
1664 continue;
1665
1666 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1667 return 0;
1668 }
1669
1670 return 1;
1671 }
1672 return 0;
1673 }
1674
1675
1676 /* Return nonzero if register in range [REGNO, ENDREGNO)
1677 appears either explicitly or implicitly in X
1678 other than being stored into.
1679
1680 References contained within the substructure at LOC do not count.
1681 LOC may be zero, meaning don't ignore anything. */
1682
1683 bool
refers_to_regno_p(unsigned int regno,unsigned int endregno,const_rtx x,rtx * loc)1684 refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1685 rtx *loc)
1686 {
1687 int i;
1688 unsigned int x_regno;
1689 RTX_CODE code;
1690 const char *fmt;
1691
1692 repeat:
1693 /* The contents of a REG_NONNEG note is always zero, so we must come here
1694 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1695 if (x == 0)
1696 return false;
1697
1698 code = GET_CODE (x);
1699
1700 switch (code)
1701 {
1702 case REG:
1703 x_regno = REGNO (x);
1704
1705 /* If we modifying the stack, frame, or argument pointer, it will
1706 clobber a virtual register. In fact, we could be more precise,
1707 but it isn't worth it. */
1708 if ((x_regno == STACK_POINTER_REGNUM
1709 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1710 && x_regno == ARG_POINTER_REGNUM)
1711 || x_regno == FRAME_POINTER_REGNUM)
1712 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1713 return true;
1714
1715 return endregno > x_regno && regno < END_REGNO (x);
1716
1717 case SUBREG:
1718 /* If this is a SUBREG of a hard reg, we can see exactly which
1719 registers are being modified. Otherwise, handle normally. */
1720 if (REG_P (SUBREG_REG (x))
1721 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1722 {
1723 unsigned int inner_regno = subreg_regno (x);
1724 unsigned int inner_endregno
1725 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1726 ? subreg_nregs (x) : 1);
1727
1728 return endregno > inner_regno && regno < inner_endregno;
1729 }
1730 break;
1731
1732 case CLOBBER:
1733 case SET:
1734 if (&SET_DEST (x) != loc
1735 /* Note setting a SUBREG counts as referring to the REG it is in for
1736 a pseudo but not for hard registers since we can
1737 treat each word individually. */
1738 && ((GET_CODE (SET_DEST (x)) == SUBREG
1739 && loc != &SUBREG_REG (SET_DEST (x))
1740 && REG_P (SUBREG_REG (SET_DEST (x)))
1741 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1742 && refers_to_regno_p (regno, endregno,
1743 SUBREG_REG (SET_DEST (x)), loc))
1744 || (!REG_P (SET_DEST (x))
1745 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1746 return true;
1747
1748 if (code == CLOBBER || loc == &SET_SRC (x))
1749 return false;
1750 x = SET_SRC (x);
1751 goto repeat;
1752
1753 default:
1754 break;
1755 }
1756
1757 /* X does not match, so try its subexpressions. */
1758
1759 fmt = GET_RTX_FORMAT (code);
1760 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1761 {
1762 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1763 {
1764 if (i == 0)
1765 {
1766 x = XEXP (x, 0);
1767 goto repeat;
1768 }
1769 else
1770 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1771 return true;
1772 }
1773 else if (fmt[i] == 'E')
1774 {
1775 int j;
1776 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1777 if (loc != &XVECEXP (x, i, j)
1778 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1779 return true;
1780 }
1781 }
1782 return false;
1783 }
1784
1785 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1786 we check if any register number in X conflicts with the relevant register
1787 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1788 contains a MEM (we don't bother checking for memory addresses that can't
1789 conflict because we expect this to be a rare case. */
1790
1791 int
reg_overlap_mentioned_p(const_rtx x,const_rtx in)1792 reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1793 {
1794 unsigned int regno, endregno;
1795
1796 /* If either argument is a constant, then modifying X can not
1797 affect IN. Here we look at IN, we can profitably combine
1798 CONSTANT_P (x) with the switch statement below. */
1799 if (CONSTANT_P (in))
1800 return 0;
1801
1802 recurse:
1803 switch (GET_CODE (x))
1804 {
1805 case CLOBBER:
1806 case STRICT_LOW_PART:
1807 case ZERO_EXTRACT:
1808 case SIGN_EXTRACT:
1809 /* Overly conservative. */
1810 x = XEXP (x, 0);
1811 goto recurse;
1812
1813 case SUBREG:
1814 regno = REGNO (SUBREG_REG (x));
1815 if (regno < FIRST_PSEUDO_REGISTER)
1816 regno = subreg_regno (x);
1817 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1818 ? subreg_nregs (x) : 1);
1819 goto do_reg;
1820
1821 case REG:
1822 regno = REGNO (x);
1823 endregno = END_REGNO (x);
1824 do_reg:
1825 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1826
1827 case MEM:
1828 {
1829 const char *fmt;
1830 int i;
1831
1832 if (MEM_P (in))
1833 return 1;
1834
1835 fmt = GET_RTX_FORMAT (GET_CODE (in));
1836 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1837 if (fmt[i] == 'e')
1838 {
1839 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1840 return 1;
1841 }
1842 else if (fmt[i] == 'E')
1843 {
1844 int j;
1845 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1846 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1847 return 1;
1848 }
1849
1850 return 0;
1851 }
1852
1853 case SCRATCH:
1854 case PC:
1855 case CC0:
1856 return reg_mentioned_p (x, in);
1857
1858 case PARALLEL:
1859 {
1860 int i;
1861
1862 /* If any register in here refers to it we return true. */
1863 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1864 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1865 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1866 return 1;
1867 return 0;
1868 }
1869
1870 default:
1871 gcc_assert (CONSTANT_P (x));
1872 return 0;
1873 }
1874 }
1875
1876 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1877 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1878 ignored by note_stores, but passed to FUN.
1879
1880 FUN receives three arguments:
1881 1. the REG, MEM, CC0 or PC being stored in or clobbered,
1882 2. the SET or CLOBBER rtx that does the store,
1883 3. the pointer DATA provided to note_stores.
1884
1885 If the item being stored in or clobbered is a SUBREG of a hard register,
1886 the SUBREG will be passed. */
1887
1888 void
note_stores(const_rtx x,void (* fun)(rtx,const_rtx,void *),void * data)1889 note_stores (const_rtx x, void (*fun) (rtx, const_rtx, void *), void *data)
1890 {
1891 int i;
1892
1893 if (GET_CODE (x) == COND_EXEC)
1894 x = COND_EXEC_CODE (x);
1895
1896 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1897 {
1898 rtx dest = SET_DEST (x);
1899
1900 while ((GET_CODE (dest) == SUBREG
1901 && (!REG_P (SUBREG_REG (dest))
1902 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1903 || GET_CODE (dest) == ZERO_EXTRACT
1904 || GET_CODE (dest) == STRICT_LOW_PART)
1905 dest = XEXP (dest, 0);
1906
1907 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1908 each of whose first operand is a register. */
1909 if (GET_CODE (dest) == PARALLEL)
1910 {
1911 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1912 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1913 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1914 }
1915 else
1916 (*fun) (dest, x, data);
1917 }
1918
1919 else if (GET_CODE (x) == PARALLEL)
1920 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1921 note_stores (XVECEXP (x, 0, i), fun, data);
1922 }
1923
1924 /* Like notes_stores, but call FUN for each expression that is being
1925 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1926 FUN for each expression, not any interior subexpressions. FUN receives a
1927 pointer to the expression and the DATA passed to this function.
1928
1929 Note that this is not quite the same test as that done in reg_referenced_p
1930 since that considers something as being referenced if it is being
1931 partially set, while we do not. */
1932
1933 void
note_uses(rtx * pbody,void (* fun)(rtx *,void *),void * data)1934 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1935 {
1936 rtx body = *pbody;
1937 int i;
1938
1939 switch (GET_CODE (body))
1940 {
1941 case COND_EXEC:
1942 (*fun) (&COND_EXEC_TEST (body), data);
1943 note_uses (&COND_EXEC_CODE (body), fun, data);
1944 return;
1945
1946 case PARALLEL:
1947 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1948 note_uses (&XVECEXP (body, 0, i), fun, data);
1949 return;
1950
1951 case SEQUENCE:
1952 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1953 note_uses (&PATTERN (XVECEXP (body, 0, i)), fun, data);
1954 return;
1955
1956 case USE:
1957 (*fun) (&XEXP (body, 0), data);
1958 return;
1959
1960 case ASM_OPERANDS:
1961 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1962 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1963 return;
1964
1965 case TRAP_IF:
1966 (*fun) (&TRAP_CONDITION (body), data);
1967 return;
1968
1969 case PREFETCH:
1970 (*fun) (&XEXP (body, 0), data);
1971 return;
1972
1973 case UNSPEC:
1974 case UNSPEC_VOLATILE:
1975 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1976 (*fun) (&XVECEXP (body, 0, i), data);
1977 return;
1978
1979 case CLOBBER:
1980 if (MEM_P (XEXP (body, 0)))
1981 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1982 return;
1983
1984 case SET:
1985 {
1986 rtx dest = SET_DEST (body);
1987
1988 /* For sets we replace everything in source plus registers in memory
1989 expression in store and operands of a ZERO_EXTRACT. */
1990 (*fun) (&SET_SRC (body), data);
1991
1992 if (GET_CODE (dest) == ZERO_EXTRACT)
1993 {
1994 (*fun) (&XEXP (dest, 1), data);
1995 (*fun) (&XEXP (dest, 2), data);
1996 }
1997
1998 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1999 dest = XEXP (dest, 0);
2000
2001 if (MEM_P (dest))
2002 (*fun) (&XEXP (dest, 0), data);
2003 }
2004 return;
2005
2006 default:
2007 /* All the other possibilities never store. */
2008 (*fun) (pbody, data);
2009 return;
2010 }
2011 }
2012
2013 /* Return nonzero if X's old contents don't survive after INSN.
2014 This will be true if X is (cc0) or if X is a register and
2015 X dies in INSN or because INSN entirely sets X.
2016
2017 "Entirely set" means set directly and not through a SUBREG, or
2018 ZERO_EXTRACT, so no trace of the old contents remains.
2019 Likewise, REG_INC does not count.
2020
2021 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2022 but for this use that makes no difference, since regs don't overlap
2023 during their lifetimes. Therefore, this function may be used
2024 at any time after deaths have been computed.
2025
2026 If REG is a hard reg that occupies multiple machine registers, this
2027 function will only return 1 if each of those registers will be replaced
2028 by INSN. */
2029
2030 int
dead_or_set_p(const rtx_insn * insn,const_rtx x)2031 dead_or_set_p (const rtx_insn *insn, const_rtx x)
2032 {
2033 unsigned int regno, end_regno;
2034 unsigned int i;
2035
2036 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
2037 if (GET_CODE (x) == CC0)
2038 return 1;
2039
2040 gcc_assert (REG_P (x));
2041
2042 regno = REGNO (x);
2043 end_regno = END_REGNO (x);
2044 for (i = regno; i < end_regno; i++)
2045 if (! dead_or_set_regno_p (insn, i))
2046 return 0;
2047
2048 return 1;
2049 }
2050
2051 /* Return TRUE iff DEST is a register or subreg of a register, is a
2052 complete rather than read-modify-write destination, and contains
2053 register TEST_REGNO. */
2054
2055 static bool
covers_regno_no_parallel_p(const_rtx dest,unsigned int test_regno)2056 covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2057 {
2058 unsigned int regno, endregno;
2059
2060 if (GET_CODE (dest) == SUBREG && !read_modify_subreg_p (dest))
2061 dest = SUBREG_REG (dest);
2062
2063 if (!REG_P (dest))
2064 return false;
2065
2066 regno = REGNO (dest);
2067 endregno = END_REGNO (dest);
2068 return (test_regno >= regno && test_regno < endregno);
2069 }
2070
2071 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2072 any member matches the covers_regno_no_parallel_p criteria. */
2073
2074 static bool
covers_regno_p(const_rtx dest,unsigned int test_regno)2075 covers_regno_p (const_rtx dest, unsigned int test_regno)
2076 {
2077 if (GET_CODE (dest) == PARALLEL)
2078 {
2079 /* Some targets place small structures in registers for return
2080 values of functions, and those registers are wrapped in
2081 PARALLELs that we may see as the destination of a SET. */
2082 int i;
2083
2084 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2085 {
2086 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
2087 if (inner != NULL_RTX
2088 && covers_regno_no_parallel_p (inner, test_regno))
2089 return true;
2090 }
2091
2092 return false;
2093 }
2094 else
2095 return covers_regno_no_parallel_p (dest, test_regno);
2096 }
2097
2098 /* Utility function for dead_or_set_p to check an individual register. */
2099
2100 int
dead_or_set_regno_p(const rtx_insn * insn,unsigned int test_regno)2101 dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2102 {
2103 const_rtx pattern;
2104
2105 /* See if there is a death note for something that includes TEST_REGNO. */
2106 if (find_regno_note (insn, REG_DEAD, test_regno))
2107 return 1;
2108
2109 if (CALL_P (insn)
2110 && find_regno_fusage (insn, CLOBBER, test_regno))
2111 return 1;
2112
2113 pattern = PATTERN (insn);
2114
2115 /* If a COND_EXEC is not executed, the value survives. */
2116 if (GET_CODE (pattern) == COND_EXEC)
2117 return 0;
2118
2119 if (GET_CODE (pattern) == SET || GET_CODE (pattern) == CLOBBER)
2120 return covers_regno_p (SET_DEST (pattern), test_regno);
2121 else if (GET_CODE (pattern) == PARALLEL)
2122 {
2123 int i;
2124
2125 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
2126 {
2127 rtx body = XVECEXP (pattern, 0, i);
2128
2129 if (GET_CODE (body) == COND_EXEC)
2130 body = COND_EXEC_CODE (body);
2131
2132 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
2133 && covers_regno_p (SET_DEST (body), test_regno))
2134 return 1;
2135 }
2136 }
2137
2138 return 0;
2139 }
2140
2141 /* Return the reg-note of kind KIND in insn INSN, if there is one.
2142 If DATUM is nonzero, look for one whose datum is DATUM. */
2143
2144 rtx
find_reg_note(const_rtx insn,enum reg_note kind,const_rtx datum)2145 find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2146 {
2147 rtx link;
2148
2149 gcc_checking_assert (insn);
2150
2151 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2152 if (! INSN_P (insn))
2153 return 0;
2154 if (datum == 0)
2155 {
2156 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2157 if (REG_NOTE_KIND (link) == kind)
2158 return link;
2159 return 0;
2160 }
2161
2162 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2163 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
2164 return link;
2165 return 0;
2166 }
2167
2168 /* Return the reg-note of kind KIND in insn INSN which applies to register
2169 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2170 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2171 it might be the case that the note overlaps REGNO. */
2172
2173 rtx
find_regno_note(const_rtx insn,enum reg_note kind,unsigned int regno)2174 find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2175 {
2176 rtx link;
2177
2178 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2179 if (! INSN_P (insn))
2180 return 0;
2181
2182 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2183 if (REG_NOTE_KIND (link) == kind
2184 /* Verify that it is a register, so that scratch and MEM won't cause a
2185 problem here. */
2186 && REG_P (XEXP (link, 0))
2187 && REGNO (XEXP (link, 0)) <= regno
2188 && END_REGNO (XEXP (link, 0)) > regno)
2189 return link;
2190 return 0;
2191 }
2192
2193 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2194 has such a note. */
2195
2196 rtx
find_reg_equal_equiv_note(const_rtx insn)2197 find_reg_equal_equiv_note (const_rtx insn)
2198 {
2199 rtx link;
2200
2201 if (!INSN_P (insn))
2202 return 0;
2203
2204 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2205 if (REG_NOTE_KIND (link) == REG_EQUAL
2206 || REG_NOTE_KIND (link) == REG_EQUIV)
2207 {
2208 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2209 insns that have multiple sets. Checking single_set to
2210 make sure of this is not the proper check, as explained
2211 in the comment in set_unique_reg_note.
2212
2213 This should be changed into an assert. */
2214 if (GET_CODE (PATTERN (insn)) == PARALLEL && multiple_sets (insn))
2215 return 0;
2216 return link;
2217 }
2218 return NULL;
2219 }
2220
2221 /* Check whether INSN is a single_set whose source is known to be
2222 equivalent to a constant. Return that constant if so, otherwise
2223 return null. */
2224
2225 rtx
find_constant_src(const rtx_insn * insn)2226 find_constant_src (const rtx_insn *insn)
2227 {
2228 rtx note, set, x;
2229
2230 set = single_set (insn);
2231 if (set)
2232 {
2233 x = avoid_constant_pool_reference (SET_SRC (set));
2234 if (CONSTANT_P (x))
2235 return x;
2236 }
2237
2238 note = find_reg_equal_equiv_note (insn);
2239 if (note && CONSTANT_P (XEXP (note, 0)))
2240 return XEXP (note, 0);
2241
2242 return NULL_RTX;
2243 }
2244
2245 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2246 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2247
2248 int
find_reg_fusage(const_rtx insn,enum rtx_code code,const_rtx datum)2249 find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2250 {
2251 /* If it's not a CALL_INSN, it can't possibly have a
2252 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2253 if (!CALL_P (insn))
2254 return 0;
2255
2256 gcc_assert (datum);
2257
2258 if (!REG_P (datum))
2259 {
2260 rtx link;
2261
2262 for (link = CALL_INSN_FUNCTION_USAGE (insn);
2263 link;
2264 link = XEXP (link, 1))
2265 if (GET_CODE (XEXP (link, 0)) == code
2266 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
2267 return 1;
2268 }
2269 else
2270 {
2271 unsigned int regno = REGNO (datum);
2272
2273 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2274 to pseudo registers, so don't bother checking. */
2275
2276 if (regno < FIRST_PSEUDO_REGISTER)
2277 {
2278 unsigned int end_regno = END_REGNO (datum);
2279 unsigned int i;
2280
2281 for (i = regno; i < end_regno; i++)
2282 if (find_regno_fusage (insn, code, i))
2283 return 1;
2284 }
2285 }
2286
2287 return 0;
2288 }
2289
2290 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2291 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2292
2293 int
find_regno_fusage(const_rtx insn,enum rtx_code code,unsigned int regno)2294 find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2295 {
2296 rtx link;
2297
2298 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2299 to pseudo registers, so don't bother checking. */
2300
2301 if (regno >= FIRST_PSEUDO_REGISTER
2302 || !CALL_P (insn) )
2303 return 0;
2304
2305 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2306 {
2307 rtx op, reg;
2308
2309 if (GET_CODE (op = XEXP (link, 0)) == code
2310 && REG_P (reg = XEXP (op, 0))
2311 && REGNO (reg) <= regno
2312 && END_REGNO (reg) > regno)
2313 return 1;
2314 }
2315
2316 return 0;
2317 }
2318
2319
2320 /* Return true if KIND is an integer REG_NOTE. */
2321
2322 static bool
int_reg_note_p(enum reg_note kind)2323 int_reg_note_p (enum reg_note kind)
2324 {
2325 return kind == REG_BR_PROB;
2326 }
2327
2328 /* Allocate a register note with kind KIND and datum DATUM. LIST is
2329 stored as the pointer to the next register note. */
2330
2331 rtx
alloc_reg_note(enum reg_note kind,rtx datum,rtx list)2332 alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2333 {
2334 rtx note;
2335
2336 gcc_checking_assert (!int_reg_note_p (kind));
2337 switch (kind)
2338 {
2339 case REG_CC_SETTER:
2340 case REG_CC_USER:
2341 case REG_LABEL_TARGET:
2342 case REG_LABEL_OPERAND:
2343 case REG_TM:
2344 /* These types of register notes use an INSN_LIST rather than an
2345 EXPR_LIST, so that copying is done right and dumps look
2346 better. */
2347 note = alloc_INSN_LIST (datum, list);
2348 PUT_REG_NOTE_KIND (note, kind);
2349 break;
2350
2351 default:
2352 note = alloc_EXPR_LIST (kind, datum, list);
2353 break;
2354 }
2355
2356 return note;
2357 }
2358
2359 /* Add register note with kind KIND and datum DATUM to INSN. */
2360
2361 void
add_reg_note(rtx insn,enum reg_note kind,rtx datum)2362 add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2363 {
2364 REG_NOTES (insn) = alloc_reg_note (kind, datum, REG_NOTES (insn));
2365 }
2366
2367 /* Add an integer register note with kind KIND and datum DATUM to INSN. */
2368
2369 void
add_int_reg_note(rtx_insn * insn,enum reg_note kind,int datum)2370 add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2371 {
2372 gcc_checking_assert (int_reg_note_p (kind));
2373 REG_NOTES (insn) = gen_rtx_INT_LIST ((machine_mode) kind,
2374 datum, REG_NOTES (insn));
2375 }
2376
2377 /* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2378
2379 void
add_args_size_note(rtx_insn * insn,poly_int64 value)2380 add_args_size_note (rtx_insn *insn, poly_int64 value)
2381 {
2382 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX));
2383 add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode));
2384 }
2385
2386 /* Add a register note like NOTE to INSN. */
2387
2388 void
add_shallow_copy_of_reg_note(rtx_insn * insn,rtx note)2389 add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2390 {
2391 if (GET_CODE (note) == INT_LIST)
2392 add_int_reg_note (insn, REG_NOTE_KIND (note), XINT (note, 0));
2393 else
2394 add_reg_note (insn, REG_NOTE_KIND (note), XEXP (note, 0));
2395 }
2396
2397 /* Duplicate NOTE and return the copy. */
2398 rtx
duplicate_reg_note(rtx note)2399 duplicate_reg_note (rtx note)
2400 {
2401 reg_note kind = REG_NOTE_KIND (note);
2402
2403 if (GET_CODE (note) == INT_LIST)
2404 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX);
2405 else if (GET_CODE (note) == EXPR_LIST)
2406 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)), NULL_RTX);
2407 else
2408 return alloc_reg_note (kind, XEXP (note, 0), NULL_RTX);
2409 }
2410
2411 /* Remove register note NOTE from the REG_NOTES of INSN. */
2412
2413 void
remove_note(rtx_insn * insn,const_rtx note)2414 remove_note (rtx_insn *insn, const_rtx note)
2415 {
2416 rtx link;
2417
2418 if (note == NULL_RTX)
2419 return;
2420
2421 if (REG_NOTES (insn) == note)
2422 REG_NOTES (insn) = XEXP (note, 1);
2423 else
2424 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2425 if (XEXP (link, 1) == note)
2426 {
2427 XEXP (link, 1) = XEXP (note, 1);
2428 break;
2429 }
2430
2431 switch (REG_NOTE_KIND (note))
2432 {
2433 case REG_EQUAL:
2434 case REG_EQUIV:
2435 df_notes_rescan (insn);
2436 break;
2437 default:
2438 break;
2439 }
2440 }
2441
2442 /* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2443 Return true if any note has been removed. */
2444
2445 bool
remove_reg_equal_equiv_notes(rtx_insn * insn)2446 remove_reg_equal_equiv_notes (rtx_insn *insn)
2447 {
2448 rtx *loc;
2449 bool ret = false;
2450
2451 loc = ®_NOTES (insn);
2452 while (*loc)
2453 {
2454 enum reg_note kind = REG_NOTE_KIND (*loc);
2455 if (kind == REG_EQUAL || kind == REG_EQUIV)
2456 {
2457 *loc = XEXP (*loc, 1);
2458 ret = true;
2459 }
2460 else
2461 loc = &XEXP (*loc, 1);
2462 }
2463 return ret;
2464 }
2465
2466 /* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2467
2468 void
remove_reg_equal_equiv_notes_for_regno(unsigned int regno)2469 remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2470 {
2471 df_ref eq_use;
2472
2473 if (!df)
2474 return;
2475
2476 /* This loop is a little tricky. We cannot just go down the chain because
2477 it is being modified by some actions in the loop. So we just iterate
2478 over the head. We plan to drain the list anyway. */
2479 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)) != NULL)
2480 {
2481 rtx_insn *insn = DF_REF_INSN (eq_use);
2482 rtx note = find_reg_equal_equiv_note (insn);
2483
2484 /* This assert is generally triggered when someone deletes a REG_EQUAL
2485 or REG_EQUIV note by hacking the list manually rather than calling
2486 remove_note. */
2487 gcc_assert (note);
2488
2489 remove_note (insn, note);
2490 }
2491 }
2492
2493 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2494 return 1 if it is found. A simple equality test is used to determine if
2495 NODE matches. */
2496
2497 bool
in_insn_list_p(const rtx_insn_list * listp,const rtx_insn * node)2498 in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2499 {
2500 const_rtx x;
2501
2502 for (x = listp; x; x = XEXP (x, 1))
2503 if (node == XEXP (x, 0))
2504 return true;
2505
2506 return false;
2507 }
2508
2509 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2510 remove that entry from the list if it is found.
2511
2512 A simple equality test is used to determine if NODE matches. */
2513
2514 void
remove_node_from_expr_list(const_rtx node,rtx_expr_list ** listp)2515 remove_node_from_expr_list (const_rtx node, rtx_expr_list **listp)
2516 {
2517 rtx_expr_list *temp = *listp;
2518 rtx_expr_list *prev = NULL;
2519
2520 while (temp)
2521 {
2522 if (node == temp->element ())
2523 {
2524 /* Splice the node out of the list. */
2525 if (prev)
2526 XEXP (prev, 1) = temp->next ();
2527 else
2528 *listp = temp->next ();
2529
2530 return;
2531 }
2532
2533 prev = temp;
2534 temp = temp->next ();
2535 }
2536 }
2537
2538 /* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2539 remove that entry from the list if it is found.
2540
2541 A simple equality test is used to determine if NODE matches. */
2542
2543 void
remove_node_from_insn_list(const rtx_insn * node,rtx_insn_list ** listp)2544 remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2545 {
2546 rtx_insn_list *temp = *listp;
2547 rtx_insn_list *prev = NULL;
2548
2549 while (temp)
2550 {
2551 if (node == temp->insn ())
2552 {
2553 /* Splice the node out of the list. */
2554 if (prev)
2555 XEXP (prev, 1) = temp->next ();
2556 else
2557 *listp = temp->next ();
2558
2559 return;
2560 }
2561
2562 prev = temp;
2563 temp = temp->next ();
2564 }
2565 }
2566
2567 /* Nonzero if X contains any volatile instructions. These are instructions
2568 which may cause unpredictable machine state instructions, and thus no
2569 instructions or register uses should be moved or combined across them.
2570 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2571
2572 int
volatile_insn_p(const_rtx x)2573 volatile_insn_p (const_rtx x)
2574 {
2575 const RTX_CODE code = GET_CODE (x);
2576 switch (code)
2577 {
2578 case LABEL_REF:
2579 case SYMBOL_REF:
2580 case CONST:
2581 CASE_CONST_ANY:
2582 case CC0:
2583 case PC:
2584 case REG:
2585 case SCRATCH:
2586 case CLOBBER:
2587 case ADDR_VEC:
2588 case ADDR_DIFF_VEC:
2589 case CALL:
2590 case MEM:
2591 return 0;
2592
2593 case UNSPEC_VOLATILE:
2594 return 1;
2595
2596 case ASM_INPUT:
2597 case ASM_OPERANDS:
2598 if (MEM_VOLATILE_P (x))
2599 return 1;
2600
2601 default:
2602 break;
2603 }
2604
2605 /* Recursively scan the operands of this expression. */
2606
2607 {
2608 const char *const fmt = GET_RTX_FORMAT (code);
2609 int i;
2610
2611 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2612 {
2613 if (fmt[i] == 'e')
2614 {
2615 if (volatile_insn_p (XEXP (x, i)))
2616 return 1;
2617 }
2618 else if (fmt[i] == 'E')
2619 {
2620 int j;
2621 for (j = 0; j < XVECLEN (x, i); j++)
2622 if (volatile_insn_p (XVECEXP (x, i, j)))
2623 return 1;
2624 }
2625 }
2626 }
2627 return 0;
2628 }
2629
2630 /* Nonzero if X contains any volatile memory references
2631 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2632
2633 int
volatile_refs_p(const_rtx x)2634 volatile_refs_p (const_rtx x)
2635 {
2636 const RTX_CODE code = GET_CODE (x);
2637 switch (code)
2638 {
2639 case LABEL_REF:
2640 case SYMBOL_REF:
2641 case CONST:
2642 CASE_CONST_ANY:
2643 case CC0:
2644 case PC:
2645 case REG:
2646 case SCRATCH:
2647 case CLOBBER:
2648 case ADDR_VEC:
2649 case ADDR_DIFF_VEC:
2650 return 0;
2651
2652 case UNSPEC_VOLATILE:
2653 return 1;
2654
2655 case MEM:
2656 case ASM_INPUT:
2657 case ASM_OPERANDS:
2658 if (MEM_VOLATILE_P (x))
2659 return 1;
2660
2661 default:
2662 break;
2663 }
2664
2665 /* Recursively scan the operands of this expression. */
2666
2667 {
2668 const char *const fmt = GET_RTX_FORMAT (code);
2669 int i;
2670
2671 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2672 {
2673 if (fmt[i] == 'e')
2674 {
2675 if (volatile_refs_p (XEXP (x, i)))
2676 return 1;
2677 }
2678 else if (fmt[i] == 'E')
2679 {
2680 int j;
2681 for (j = 0; j < XVECLEN (x, i); j++)
2682 if (volatile_refs_p (XVECEXP (x, i, j)))
2683 return 1;
2684 }
2685 }
2686 }
2687 return 0;
2688 }
2689
2690 /* Similar to above, except that it also rejects register pre- and post-
2691 incrementing. */
2692
2693 int
side_effects_p(const_rtx x)2694 side_effects_p (const_rtx x)
2695 {
2696 const RTX_CODE code = GET_CODE (x);
2697 switch (code)
2698 {
2699 case LABEL_REF:
2700 case SYMBOL_REF:
2701 case CONST:
2702 CASE_CONST_ANY:
2703 case CC0:
2704 case PC:
2705 case REG:
2706 case SCRATCH:
2707 case ADDR_VEC:
2708 case ADDR_DIFF_VEC:
2709 case VAR_LOCATION:
2710 return 0;
2711
2712 case CLOBBER:
2713 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
2714 when some combination can't be done. If we see one, don't think
2715 that we can simplify the expression. */
2716 return (GET_MODE (x) != VOIDmode);
2717
2718 case PRE_INC:
2719 case PRE_DEC:
2720 case POST_INC:
2721 case POST_DEC:
2722 case PRE_MODIFY:
2723 case POST_MODIFY:
2724 case CALL:
2725 case UNSPEC_VOLATILE:
2726 return 1;
2727
2728 case MEM:
2729 case ASM_INPUT:
2730 case ASM_OPERANDS:
2731 if (MEM_VOLATILE_P (x))
2732 return 1;
2733
2734 default:
2735 break;
2736 }
2737
2738 /* Recursively scan the operands of this expression. */
2739
2740 {
2741 const char *fmt = GET_RTX_FORMAT (code);
2742 int i;
2743
2744 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2745 {
2746 if (fmt[i] == 'e')
2747 {
2748 if (side_effects_p (XEXP (x, i)))
2749 return 1;
2750 }
2751 else if (fmt[i] == 'E')
2752 {
2753 int j;
2754 for (j = 0; j < XVECLEN (x, i); j++)
2755 if (side_effects_p (XVECEXP (x, i, j)))
2756 return 1;
2757 }
2758 }
2759 }
2760 return 0;
2761 }
2762
2763 /* Return nonzero if evaluating rtx X might cause a trap.
2764 FLAGS controls how to consider MEMs. A nonzero means the context
2765 of the access may have changed from the original, such that the
2766 address may have become invalid. */
2767
2768 int
may_trap_p_1(const_rtx x,unsigned flags)2769 may_trap_p_1 (const_rtx x, unsigned flags)
2770 {
2771 int i;
2772 enum rtx_code code;
2773 const char *fmt;
2774
2775 /* We make no distinction currently, but this function is part of
2776 the internal target-hooks ABI so we keep the parameter as
2777 "unsigned flags". */
2778 bool code_changed = flags != 0;
2779
2780 if (x == 0)
2781 return 0;
2782 code = GET_CODE (x);
2783 switch (code)
2784 {
2785 /* Handle these cases quickly. */
2786 CASE_CONST_ANY:
2787 case SYMBOL_REF:
2788 case LABEL_REF:
2789 case CONST:
2790 case PC:
2791 case CC0:
2792 case REG:
2793 case SCRATCH:
2794 return 0;
2795
2796 case UNSPEC:
2797 return targetm.unspec_may_trap_p (x, flags);
2798
2799 case UNSPEC_VOLATILE:
2800 case ASM_INPUT:
2801 case TRAP_IF:
2802 return 1;
2803
2804 case ASM_OPERANDS:
2805 return MEM_VOLATILE_P (x);
2806
2807 /* Memory ref can trap unless it's a static var or a stack slot. */
2808 case MEM:
2809 /* Recognize specific pattern of stack checking probes. */
2810 if (flag_stack_check
2811 && MEM_VOLATILE_P (x)
2812 && XEXP (x, 0) == stack_pointer_rtx)
2813 return 1;
2814 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2815 reference; moving it out of context such as when moving code
2816 when optimizing, might cause its address to become invalid. */
2817 code_changed
2818 || !MEM_NOTRAP_P (x))
2819 {
2820 poly_int64 size = MEM_SIZE_KNOWN_P (x) ? MEM_SIZE (x) : -1;
2821 return rtx_addr_can_trap_p_1 (XEXP (x, 0), 0, size,
2822 GET_MODE (x), code_changed);
2823 }
2824
2825 return 0;
2826
2827 /* Division by a non-constant might trap. */
2828 case DIV:
2829 case MOD:
2830 case UDIV:
2831 case UMOD:
2832 if (HONOR_SNANS (x))
2833 return 1;
2834 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2835 return flag_trapping_math;
2836 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2837 return 1;
2838 break;
2839
2840 case EXPR_LIST:
2841 /* An EXPR_LIST is used to represent a function call. This
2842 certainly may trap. */
2843 return 1;
2844
2845 case GE:
2846 case GT:
2847 case LE:
2848 case LT:
2849 case LTGT:
2850 case COMPARE:
2851 /* Some floating point comparisons may trap. */
2852 if (!flag_trapping_math)
2853 break;
2854 /* ??? There is no machine independent way to check for tests that trap
2855 when COMPARE is used, though many targets do make this distinction.
2856 For instance, sparc uses CCFPE for compares which generate exceptions
2857 and CCFP for compares which do not generate exceptions. */
2858 if (HONOR_NANS (x))
2859 return 1;
2860 /* But often the compare has some CC mode, so check operand
2861 modes as well. */
2862 if (HONOR_NANS (XEXP (x, 0))
2863 || HONOR_NANS (XEXP (x, 1)))
2864 return 1;
2865 break;
2866
2867 case EQ:
2868 case NE:
2869 if (HONOR_SNANS (x))
2870 return 1;
2871 /* Often comparison is CC mode, so check operand modes. */
2872 if (HONOR_SNANS (XEXP (x, 0))
2873 || HONOR_SNANS (XEXP (x, 1)))
2874 return 1;
2875 break;
2876
2877 case FIX:
2878 /* Conversion of floating point might trap. */
2879 if (flag_trapping_math && HONOR_NANS (XEXP (x, 0)))
2880 return 1;
2881 break;
2882
2883 case NEG:
2884 case ABS:
2885 case SUBREG:
2886 /* These operations don't trap even with floating point. */
2887 break;
2888
2889 default:
2890 /* Any floating arithmetic may trap. */
2891 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math)
2892 return 1;
2893 }
2894
2895 fmt = GET_RTX_FORMAT (code);
2896 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2897 {
2898 if (fmt[i] == 'e')
2899 {
2900 if (may_trap_p_1 (XEXP (x, i), flags))
2901 return 1;
2902 }
2903 else if (fmt[i] == 'E')
2904 {
2905 int j;
2906 for (j = 0; j < XVECLEN (x, i); j++)
2907 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2908 return 1;
2909 }
2910 }
2911 return 0;
2912 }
2913
2914 /* Return nonzero if evaluating rtx X might cause a trap. */
2915
2916 int
may_trap_p(const_rtx x)2917 may_trap_p (const_rtx x)
2918 {
2919 return may_trap_p_1 (x, 0);
2920 }
2921
2922 /* Same as above, but additionally return nonzero if evaluating rtx X might
2923 cause a fault. We define a fault for the purpose of this function as a
2924 erroneous execution condition that cannot be encountered during the normal
2925 execution of a valid program; the typical example is an unaligned memory
2926 access on a strict alignment machine. The compiler guarantees that it
2927 doesn't generate code that will fault from a valid program, but this
2928 guarantee doesn't mean anything for individual instructions. Consider
2929 the following example:
2930
2931 struct S { int d; union { char *cp; int *ip; }; };
2932
2933 int foo(struct S *s)
2934 {
2935 if (s->d == 1)
2936 return *s->ip;
2937 else
2938 return *s->cp;
2939 }
2940
2941 on a strict alignment machine. In a valid program, foo will never be
2942 invoked on a structure for which d is equal to 1 and the underlying
2943 unique field of the union not aligned on a 4-byte boundary, but the
2944 expression *s->ip might cause a fault if considered individually.
2945
2946 At the RTL level, potentially problematic expressions will almost always
2947 verify may_trap_p; for example, the above dereference can be emitted as
2948 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2949 However, suppose that foo is inlined in a caller that causes s->cp to
2950 point to a local character variable and guarantees that s->d is not set
2951 to 1; foo may have been effectively translated into pseudo-RTL as:
2952
2953 if ((reg:SI) == 1)
2954 (set (reg:SI) (mem:SI (%fp - 7)))
2955 else
2956 (set (reg:QI) (mem:QI (%fp - 7)))
2957
2958 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2959 memory reference to a stack slot, but it will certainly cause a fault
2960 on a strict alignment machine. */
2961
2962 int
may_trap_or_fault_p(const_rtx x)2963 may_trap_or_fault_p (const_rtx x)
2964 {
2965 return may_trap_p_1 (x, 1);
2966 }
2967
2968 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2969 i.e., an inequality. */
2970
2971 int
inequality_comparisons_p(const_rtx x)2972 inequality_comparisons_p (const_rtx x)
2973 {
2974 const char *fmt;
2975 int len, i;
2976 const enum rtx_code code = GET_CODE (x);
2977
2978 switch (code)
2979 {
2980 case REG:
2981 case SCRATCH:
2982 case PC:
2983 case CC0:
2984 CASE_CONST_ANY:
2985 case CONST:
2986 case LABEL_REF:
2987 case SYMBOL_REF:
2988 return 0;
2989
2990 case LT:
2991 case LTU:
2992 case GT:
2993 case GTU:
2994 case LE:
2995 case LEU:
2996 case GE:
2997 case GEU:
2998 return 1;
2999
3000 default:
3001 break;
3002 }
3003
3004 len = GET_RTX_LENGTH (code);
3005 fmt = GET_RTX_FORMAT (code);
3006
3007 for (i = 0; i < len; i++)
3008 {
3009 if (fmt[i] == 'e')
3010 {
3011 if (inequality_comparisons_p (XEXP (x, i)))
3012 return 1;
3013 }
3014 else if (fmt[i] == 'E')
3015 {
3016 int j;
3017 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3018 if (inequality_comparisons_p (XVECEXP (x, i, j)))
3019 return 1;
3020 }
3021 }
3022
3023 return 0;
3024 }
3025
3026 /* Replace any occurrence of FROM in X with TO. The function does
3027 not enter into CONST_DOUBLE for the replace.
3028
3029 Note that copying is not done so X must not be shared unless all copies
3030 are to be modified.
3031
3032 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3033 those pointer-equal ones. */
3034
3035 rtx
replace_rtx(rtx x,rtx from,rtx to,bool all_regs)3036 replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3037 {
3038 int i, j;
3039 const char *fmt;
3040
3041 if (x == from)
3042 return to;
3043
3044 /* Allow this function to make replacements in EXPR_LISTs. */
3045 if (x == 0)
3046 return 0;
3047
3048 if (all_regs
3049 && REG_P (x)
3050 && REG_P (from)
3051 && REGNO (x) == REGNO (from))
3052 {
3053 gcc_assert (GET_MODE (x) == GET_MODE (from));
3054 return to;
3055 }
3056 else if (GET_CODE (x) == SUBREG)
3057 {
3058 rtx new_rtx = replace_rtx (SUBREG_REG (x), from, to, all_regs);
3059
3060 if (CONST_INT_P (new_rtx))
3061 {
3062 x = simplify_subreg (GET_MODE (x), new_rtx,
3063 GET_MODE (SUBREG_REG (x)),
3064 SUBREG_BYTE (x));
3065 gcc_assert (x);
3066 }
3067 else
3068 SUBREG_REG (x) = new_rtx;
3069
3070 return x;
3071 }
3072 else if (GET_CODE (x) == ZERO_EXTEND)
3073 {
3074 rtx new_rtx = replace_rtx (XEXP (x, 0), from, to, all_regs);
3075
3076 if (CONST_INT_P (new_rtx))
3077 {
3078 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
3079 new_rtx, GET_MODE (XEXP (x, 0)));
3080 gcc_assert (x);
3081 }
3082 else
3083 XEXP (x, 0) = new_rtx;
3084
3085 return x;
3086 }
3087
3088 fmt = GET_RTX_FORMAT (GET_CODE (x));
3089 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3090 {
3091 if (fmt[i] == 'e')
3092 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to, all_regs);
3093 else if (fmt[i] == 'E')
3094 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3095 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j),
3096 from, to, all_regs);
3097 }
3098
3099 return x;
3100 }
3101
3102 /* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3103 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3104
3105 void
replace_label(rtx * loc,rtx old_label,rtx new_label,bool update_label_nuses)3106 replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3107 {
3108 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3109 rtx x = *loc;
3110 if (JUMP_TABLE_DATA_P (x))
3111 {
3112 x = PATTERN (x);
3113 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC);
3114 int len = GET_NUM_ELEM (vec);
3115 for (int i = 0; i < len; ++i)
3116 {
3117 rtx ref = RTVEC_ELT (vec, i);
3118 if (XEXP (ref, 0) == old_label)
3119 {
3120 XEXP (ref, 0) = new_label;
3121 if (update_label_nuses)
3122 {
3123 ++LABEL_NUSES (new_label);
3124 --LABEL_NUSES (old_label);
3125 }
3126 }
3127 }
3128 return;
3129 }
3130
3131 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3132 field. This is not handled by the iterator because it doesn't
3133 handle unprinted ('0') fields. */
3134 if (JUMP_P (x) && JUMP_LABEL (x) == old_label)
3135 JUMP_LABEL (x) = new_label;
3136
3137 subrtx_ptr_iterator::array_type array;
3138 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)
3139 {
3140 rtx *loc = *iter;
3141 if (rtx x = *loc)
3142 {
3143 if (GET_CODE (x) == SYMBOL_REF
3144 && CONSTANT_POOL_ADDRESS_P (x))
3145 {
3146 rtx c = get_pool_constant (x);
3147 if (rtx_referenced_p (old_label, c))
3148 {
3149 /* Create a copy of constant C; replace the label inside
3150 but do not update LABEL_NUSES because uses in constant pool
3151 are not counted. */
3152 rtx new_c = copy_rtx (c);
3153 replace_label (&new_c, old_label, new_label, false);
3154
3155 /* Add the new constant NEW_C to constant pool and replace
3156 the old reference to constant by new reference. */
3157 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3158 *loc = replace_rtx (x, x, XEXP (new_mem, 0));
3159 }
3160 }
3161
3162 if ((GET_CODE (x) == LABEL_REF
3163 || GET_CODE (x) == INSN_LIST)
3164 && XEXP (x, 0) == old_label)
3165 {
3166 XEXP (x, 0) = new_label;
3167 if (update_label_nuses)
3168 {
3169 ++LABEL_NUSES (new_label);
3170 --LABEL_NUSES (old_label);
3171 }
3172 }
3173 }
3174 }
3175 }
3176
3177 void
replace_label_in_insn(rtx_insn * insn,rtx_insn * old_label,rtx_insn * new_label,bool update_label_nuses)3178 replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3179 rtx_insn *new_label, bool update_label_nuses)
3180 {
3181 rtx insn_as_rtx = insn;
3182 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3183 gcc_checking_assert (insn_as_rtx == insn);
3184 }
3185
3186 /* Return true if X is referenced in BODY. */
3187
3188 bool
rtx_referenced_p(const_rtx x,const_rtx body)3189 rtx_referenced_p (const_rtx x, const_rtx body)
3190 {
3191 subrtx_iterator::array_type array;
3192 FOR_EACH_SUBRTX (iter, array, body, ALL)
3193 if (const_rtx y = *iter)
3194 {
3195 /* Check if a label_ref Y refers to label X. */
3196 if (GET_CODE (y) == LABEL_REF
3197 && LABEL_P (x)
3198 && label_ref_label (y) == x)
3199 return true;
3200
3201 if (rtx_equal_p (x, y))
3202 return true;
3203
3204 /* If Y is a reference to pool constant traverse the constant. */
3205 if (GET_CODE (y) == SYMBOL_REF
3206 && CONSTANT_POOL_ADDRESS_P (y))
3207 iter.substitute (get_pool_constant (y));
3208 }
3209 return false;
3210 }
3211
3212 /* If INSN is a tablejump return true and store the label (before jump table) to
3213 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3214
3215 bool
tablejump_p(const rtx_insn * insn,rtx_insn ** labelp,rtx_jump_table_data ** tablep)3216 tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3217 rtx_jump_table_data **tablep)
3218 {
3219 if (!JUMP_P (insn))
3220 return false;
3221
3222 rtx target = JUMP_LABEL (insn);
3223 if (target == NULL_RTX || ANY_RETURN_P (target))
3224 return false;
3225
3226 rtx_insn *label = as_a<rtx_insn *> (target);
3227 rtx_insn *table = next_insn (label);
3228 if (table == NULL_RTX || !JUMP_TABLE_DATA_P (table))
3229 return false;
3230
3231 if (labelp)
3232 *labelp = label;
3233 if (tablep)
3234 *tablep = as_a <rtx_jump_table_data *> (table);
3235 return true;
3236 }
3237
3238 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3239 constant that is not in the constant pool and not in the condition
3240 of an IF_THEN_ELSE. */
3241
3242 static int
computed_jump_p_1(const_rtx x)3243 computed_jump_p_1 (const_rtx x)
3244 {
3245 const enum rtx_code code = GET_CODE (x);
3246 int i, j;
3247 const char *fmt;
3248
3249 switch (code)
3250 {
3251 case LABEL_REF:
3252 case PC:
3253 return 0;
3254
3255 case CONST:
3256 CASE_CONST_ANY:
3257 case SYMBOL_REF:
3258 case REG:
3259 return 1;
3260
3261 case MEM:
3262 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
3263 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
3264
3265 case IF_THEN_ELSE:
3266 return (computed_jump_p_1 (XEXP (x, 1))
3267 || computed_jump_p_1 (XEXP (x, 2)));
3268
3269 default:
3270 break;
3271 }
3272
3273 fmt = GET_RTX_FORMAT (code);
3274 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3275 {
3276 if (fmt[i] == 'e'
3277 && computed_jump_p_1 (XEXP (x, i)))
3278 return 1;
3279
3280 else if (fmt[i] == 'E')
3281 for (j = 0; j < XVECLEN (x, i); j++)
3282 if (computed_jump_p_1 (XVECEXP (x, i, j)))
3283 return 1;
3284 }
3285
3286 return 0;
3287 }
3288
3289 /* Return nonzero if INSN is an indirect jump (aka computed jump).
3290
3291 Tablejumps and casesi insns are not considered indirect jumps;
3292 we can recognize them by a (use (label_ref)). */
3293
3294 int
computed_jump_p(const rtx_insn * insn)3295 computed_jump_p (const rtx_insn *insn)
3296 {
3297 int i;
3298 if (JUMP_P (insn))
3299 {
3300 rtx pat = PATTERN (insn);
3301
3302 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3303 if (JUMP_LABEL (insn) != NULL)
3304 return 0;
3305
3306 if (GET_CODE (pat) == PARALLEL)
3307 {
3308 int len = XVECLEN (pat, 0);
3309 int has_use_labelref = 0;
3310
3311 for (i = len - 1; i >= 0; i--)
3312 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
3313 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
3314 == LABEL_REF))
3315 {
3316 has_use_labelref = 1;
3317 break;
3318 }
3319
3320 if (! has_use_labelref)
3321 for (i = len - 1; i >= 0; i--)
3322 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
3323 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
3324 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
3325 return 1;
3326 }
3327 else if (GET_CODE (pat) == SET
3328 && SET_DEST (pat) == pc_rtx
3329 && computed_jump_p_1 (SET_SRC (pat)))
3330 return 1;
3331 }
3332 return 0;
3333 }
3334
3335
3336
3337 /* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3338 the equivalent add insn and pass the result to FN, using DATA as the
3339 final argument. */
3340
3341 static int
for_each_inc_dec_find_inc_dec(rtx mem,for_each_inc_dec_fn fn,void * data)3342 for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3343 {
3344 rtx x = XEXP (mem, 0);
3345 switch (GET_CODE (x))
3346 {
3347 case PRE_INC:
3348 case POST_INC:
3349 {
3350 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3351 rtx r1 = XEXP (x, 0);
3352 rtx c = gen_int_mode (size, GET_MODE (r1));
3353 return fn (mem, x, r1, r1, c, data);
3354 }
3355
3356 case PRE_DEC:
3357 case POST_DEC:
3358 {
3359 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem));
3360 rtx r1 = XEXP (x, 0);
3361 rtx c = gen_int_mode (-size, GET_MODE (r1));
3362 return fn (mem, x, r1, r1, c, data);
3363 }
3364
3365 case PRE_MODIFY:
3366 case POST_MODIFY:
3367 {
3368 rtx r1 = XEXP (x, 0);
3369 rtx add = XEXP (x, 1);
3370 return fn (mem, x, r1, add, NULL, data);
3371 }
3372
3373 default:
3374 gcc_unreachable ();
3375 }
3376 }
3377
3378 /* Traverse *LOC looking for MEMs that have autoinc addresses.
3379 For each such autoinc operation found, call FN, passing it
3380 the innermost enclosing MEM, the operation itself, the RTX modified
3381 by the operation, two RTXs (the second may be NULL) that, once
3382 added, represent the value to be held by the modified RTX
3383 afterwards, and DATA. FN is to return 0 to continue the
3384 traversal or any other value to have it returned to the caller of
3385 for_each_inc_dec. */
3386
3387 int
for_each_inc_dec(rtx x,for_each_inc_dec_fn fn,void * data)3388 for_each_inc_dec (rtx x,
3389 for_each_inc_dec_fn fn,
3390 void *data)
3391 {
3392 subrtx_var_iterator::array_type array;
3393 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)
3394 {
3395 rtx mem = *iter;
3396 if (mem
3397 && MEM_P (mem)
3398 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0))) == RTX_AUTOINC)
3399 {
3400 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3401 if (res != 0)
3402 return res;
3403 iter.skip_subrtxes ();
3404 }
3405 }
3406 return 0;
3407 }
3408
3409
3410 /* Searches X for any reference to REGNO, returning the rtx of the
3411 reference found if any. Otherwise, returns NULL_RTX. */
3412
3413 rtx
regno_use_in(unsigned int regno,rtx x)3414 regno_use_in (unsigned int regno, rtx x)
3415 {
3416 const char *fmt;
3417 int i, j;
3418 rtx tem;
3419
3420 if (REG_P (x) && REGNO (x) == regno)
3421 return x;
3422
3423 fmt = GET_RTX_FORMAT (GET_CODE (x));
3424 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
3425 {
3426 if (fmt[i] == 'e')
3427 {
3428 if ((tem = regno_use_in (regno, XEXP (x, i))))
3429 return tem;
3430 }
3431 else if (fmt[i] == 'E')
3432 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
3433 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
3434 return tem;
3435 }
3436
3437 return NULL_RTX;
3438 }
3439
3440 /* Return a value indicating whether OP, an operand of a commutative
3441 operation, is preferred as the first or second operand. The more
3442 positive the value, the stronger the preference for being the first
3443 operand. */
3444
3445 int
commutative_operand_precedence(rtx op)3446 commutative_operand_precedence (rtx op)
3447 {
3448 enum rtx_code code = GET_CODE (op);
3449
3450 /* Constants always become the second operand. Prefer "nice" constants. */
3451 if (code == CONST_INT)
3452 return -10;
3453 if (code == CONST_WIDE_INT)
3454 return -9;
3455 if (code == CONST_POLY_INT)
3456 return -8;
3457 if (code == CONST_DOUBLE)
3458 return -8;
3459 if (code == CONST_FIXED)
3460 return -8;
3461 op = avoid_constant_pool_reference (op);
3462 code = GET_CODE (op);
3463
3464 switch (GET_RTX_CLASS (code))
3465 {
3466 case RTX_CONST_OBJ:
3467 if (code == CONST_INT)
3468 return -7;
3469 if (code == CONST_WIDE_INT)
3470 return -6;
3471 if (code == CONST_POLY_INT)
3472 return -5;
3473 if (code == CONST_DOUBLE)
3474 return -5;
3475 if (code == CONST_FIXED)
3476 return -5;
3477 return -4;
3478
3479 case RTX_EXTRA:
3480 /* SUBREGs of objects should come second. */
3481 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
3482 return -3;
3483 return 0;
3484
3485 case RTX_OBJ:
3486 /* Complex expressions should be the first, so decrease priority
3487 of objects. Prefer pointer objects over non pointer objects. */
3488 if ((REG_P (op) && REG_POINTER (op))
3489 || (MEM_P (op) && MEM_POINTER (op)))
3490 return -1;
3491 return -2;
3492
3493 case RTX_COMM_ARITH:
3494 /* Prefer operands that are themselves commutative to be first.
3495 This helps to make things linear. In particular,
3496 (and (and (reg) (reg)) (not (reg))) is canonical. */
3497 return 4;
3498
3499 case RTX_BIN_ARITH:
3500 /* If only one operand is a binary expression, it will be the first
3501 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3502 is canonical, although it will usually be further simplified. */
3503 return 2;
3504
3505 case RTX_UNARY:
3506 /* Then prefer NEG and NOT. */
3507 if (code == NEG || code == NOT)
3508 return 1;
3509 /* FALLTHRU */
3510
3511 default:
3512 return 0;
3513 }
3514 }
3515
3516 /* Return 1 iff it is necessary to swap operands of commutative operation
3517 in order to canonicalize expression. */
3518
3519 bool
swap_commutative_operands_p(rtx x,rtx y)3520 swap_commutative_operands_p (rtx x, rtx y)
3521 {
3522 return (commutative_operand_precedence (x)
3523 < commutative_operand_precedence (y));
3524 }
3525
3526 /* Return 1 if X is an autoincrement side effect and the register is
3527 not the stack pointer. */
3528 int
auto_inc_p(const_rtx x)3529 auto_inc_p (const_rtx x)
3530 {
3531 switch (GET_CODE (x))
3532 {
3533 case PRE_INC:
3534 case POST_INC:
3535 case PRE_DEC:
3536 case POST_DEC:
3537 case PRE_MODIFY:
3538 case POST_MODIFY:
3539 /* There are no REG_INC notes for SP. */
3540 if (XEXP (x, 0) != stack_pointer_rtx)
3541 return 1;
3542 default:
3543 break;
3544 }
3545 return 0;
3546 }
3547
3548 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3549 int
loc_mentioned_in_p(rtx * loc,const_rtx in)3550 loc_mentioned_in_p (rtx *loc, const_rtx in)
3551 {
3552 enum rtx_code code;
3553 const char *fmt;
3554 int i, j;
3555
3556 if (!in)
3557 return 0;
3558
3559 code = GET_CODE (in);
3560 fmt = GET_RTX_FORMAT (code);
3561 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3562 {
3563 if (fmt[i] == 'e')
3564 {
3565 if (loc == &XEXP (in, i) || loc_mentioned_in_p (loc, XEXP (in, i)))
3566 return 1;
3567 }
3568 else if (fmt[i] == 'E')
3569 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
3570 if (loc == &XVECEXP (in, i, j)
3571 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
3572 return 1;
3573 }
3574 return 0;
3575 }
3576
3577 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
3578 and SUBREG_BYTE, return the bit offset where the subreg begins
3579 (counting from the least significant bit of the operand). */
3580
3581 poly_uint64
subreg_lsb_1(machine_mode outer_mode,machine_mode inner_mode,poly_uint64 subreg_byte)3582 subreg_lsb_1 (machine_mode outer_mode,
3583 machine_mode inner_mode,
3584 poly_uint64 subreg_byte)
3585 {
3586 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3587
3588 /* A paradoxical subreg begins at bit position 0. */
3589 if (paradoxical_subreg_p (outer_mode, inner_mode))
3590 return 0;
3591
3592 subreg_end = subreg_byte + GET_MODE_SIZE (outer_mode);
3593 trailing_bytes = GET_MODE_SIZE (inner_mode) - subreg_end;
3594 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3595 byte_pos = trailing_bytes;
3596 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3597 byte_pos = subreg_byte;
3598 else
3599 {
3600 /* When bytes and words have opposite endianness, we must be able
3601 to split offsets into words and bytes at compile time. */
3602 poly_uint64 leading_word_part
3603 = force_align_down (subreg_byte, UNITS_PER_WORD);
3604 poly_uint64 trailing_word_part
3605 = force_align_down (trailing_bytes, UNITS_PER_WORD);
3606 /* If the subreg crosses a word boundary ensure that
3607 it also begins and ends on a word boundary. */
3608 gcc_assert (known_le (subreg_end - leading_word_part,
3609 (unsigned int) UNITS_PER_WORD)
3610 || (known_eq (leading_word_part, subreg_byte)
3611 && known_eq (trailing_word_part, trailing_bytes)));
3612 if (WORDS_BIG_ENDIAN)
3613 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3614 else
3615 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3616 }
3617
3618 return byte_pos * BITS_PER_UNIT;
3619 }
3620
3621 /* Given a subreg X, return the bit offset where the subreg begins
3622 (counting from the least significant bit of the reg). */
3623
3624 poly_uint64
subreg_lsb(const_rtx x)3625 subreg_lsb (const_rtx x)
3626 {
3627 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
3628 SUBREG_BYTE (x));
3629 }
3630
3631 /* Return the subreg byte offset for a subreg whose outer value has
3632 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3633 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3634 lsb of the inner value. This is the inverse of the calculation
3635 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3636
3637 poly_uint64
subreg_size_offset_from_lsb(poly_uint64 outer_bytes,poly_uint64 inner_bytes,poly_uint64 lsb_shift)3638 subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3639 poly_uint64 lsb_shift)
3640 {
3641 /* A paradoxical subreg begins at bit position 0. */
3642 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes));
3643 if (maybe_gt (outer_bytes, inner_bytes))
3644 {
3645 gcc_checking_assert (known_eq (lsb_shift, 0U));
3646 return 0;
3647 }
3648
3649 poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT);
3650 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3651 if (WORDS_BIG_ENDIAN && BYTES_BIG_ENDIAN)
3652 return upper_bytes;
3653 else if (!WORDS_BIG_ENDIAN && !BYTES_BIG_ENDIAN)
3654 return lower_bytes;
3655 else
3656 {
3657 /* When bytes and words have opposite endianness, we must be able
3658 to split offsets into words and bytes at compile time. */
3659 poly_uint64 lower_word_part = force_align_down (lower_bytes,
3660 UNITS_PER_WORD);
3661 poly_uint64 upper_word_part = force_align_down (upper_bytes,
3662 UNITS_PER_WORD);
3663 if (WORDS_BIG_ENDIAN)
3664 return upper_word_part + (lower_bytes - lower_word_part);
3665 else
3666 return lower_word_part + (upper_bytes - upper_word_part);
3667 }
3668 }
3669
3670 /* Fill in information about a subreg of a hard register.
3671 xregno - A regno of an inner hard subreg_reg (or what will become one).
3672 xmode - The mode of xregno.
3673 offset - The byte offset.
3674 ymode - The mode of a top level SUBREG (or what may become one).
3675 info - Pointer to structure to fill in.
3676
3677 Rather than considering one particular inner register (and thus one
3678 particular "outer" register) in isolation, this function really uses
3679 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
3680 function does not check whether adding INFO->offset to XREGNO gives
3681 a valid hard register; even if INFO->offset + XREGNO is out of range,
3682 there might be another register of the same type that is in range.
3683 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
3684 the new register, since that can depend on things like whether the final
3685 register number is even or odd. Callers that want to check whether
3686 this particular subreg can be replaced by a simple (reg ...) should
3687 use simplify_subreg_regno. */
3688
3689 void
subreg_get_info(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode,struct subreg_info * info)3690 subreg_get_info (unsigned int xregno, machine_mode xmode,
3691 poly_uint64 offset, machine_mode ymode,
3692 struct subreg_info *info)
3693 {
3694 unsigned int nregs_xmode, nregs_ymode;
3695
3696 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
3697
3698 poly_uint64 xsize = GET_MODE_SIZE (xmode);
3699 poly_uint64 ysize = GET_MODE_SIZE (ymode);
3700
3701 bool rknown = false;
3702
3703 /* If the register representation of a non-scalar mode has holes in it,
3704 we expect the scalar units to be concatenated together, with the holes
3705 distributed evenly among the scalar units. Each scalar unit must occupy
3706 at least one register. */
3707 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
3708 {
3709 /* As a consequence, we must be dealing with a constant number of
3710 scalars, and thus a constant offset and number of units. */
3711 HOST_WIDE_INT coffset = offset.to_constant ();
3712 HOST_WIDE_INT cysize = ysize.to_constant ();
3713 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
3714 unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
3715 scalar_mode xmode_unit = GET_MODE_INNER (xmode);
3716 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
3717 gcc_assert (nregs_xmode
3718 == (nunits
3719 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
3720 gcc_assert (hard_regno_nregs (xregno, xmode)
3721 == hard_regno_nregs (xregno, xmode_unit) * nunits);
3722
3723 /* You can only ask for a SUBREG of a value with holes in the middle
3724 if you don't cross the holes. (Such a SUBREG should be done by
3725 picking a different register class, or doing it in memory if
3726 necessary.) An example of a value with holes is XCmode on 32-bit
3727 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3728 3 for each part, but in memory it's two 128-bit parts.
3729 Padding is assumed to be at the end (not necessarily the 'high part')
3730 of each unit. */
3731 if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
3732 && (coffset / GET_MODE_SIZE (xmode_unit)
3733 != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
3734 {
3735 info->representable_p = false;
3736 rknown = true;
3737 }
3738 }
3739 else
3740 nregs_xmode = hard_regno_nregs (xregno, xmode);
3741
3742 nregs_ymode = hard_regno_nregs (xregno, ymode);
3743
3744 /* Subreg sizes must be ordered, so that we can tell whether they are
3745 partial, paradoxical or complete. */
3746 gcc_checking_assert (ordered_p (xsize, ysize));
3747
3748 /* Paradoxical subregs are otherwise valid. */
3749 if (!rknown && known_eq (offset, 0U) && maybe_gt (ysize, xsize))
3750 {
3751 info->representable_p = true;
3752 /* If this is a big endian paradoxical subreg, which uses more
3753 actual hard registers than the original register, we must
3754 return a negative offset so that we find the proper highpart
3755 of the register.
3756
3757 We assume that the ordering of registers within a multi-register
3758 value has a consistent endianness: if bytes and register words
3759 have different endianness, the hard registers that make up a
3760 multi-register value must be at least word-sized. */
3761 if (REG_WORDS_BIG_ENDIAN)
3762 info->offset = (int) nregs_xmode - (int) nregs_ymode;
3763 else
3764 info->offset = 0;
3765 info->nregs = nregs_ymode;
3766 return;
3767 }
3768
3769 /* If registers store different numbers of bits in the different
3770 modes, we cannot generally form this subreg. */
3771 poly_uint64 regsize_xmode, regsize_ymode;
3772 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)
3773 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)
3774 && multiple_p (xsize, nregs_xmode, ®size_xmode)
3775 && multiple_p (ysize, nregs_ymode, ®size_ymode))
3776 {
3777 if (!rknown
3778 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode))
3779 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode))))
3780 {
3781 info->representable_p = false;
3782 if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
3783 || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
3784 /* Checked by validate_subreg. We must know at compile time
3785 which inner registers are being accessed. */
3786 gcc_unreachable ();
3787 return;
3788 }
3789 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
3790 would go outside of XMODE. */
3791 if (!rknown && maybe_gt (ysize + offset, xsize))
3792 {
3793 info->representable_p = false;
3794 info->nregs = nregs_ymode;
3795 if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
3796 /* Checked by validate_subreg. We must know at compile time
3797 which inner registers are being accessed. */
3798 gcc_unreachable ();
3799 return;
3800 }
3801 /* Quick exit for the simple and common case of extracting whole
3802 subregisters from a multiregister value. */
3803 /* ??? It would be better to integrate this into the code below,
3804 if we can generalize the concept enough and figure out how
3805 odd-sized modes can coexist with the other weird cases we support. */
3806 HOST_WIDE_INT count;
3807 if (!rknown
3808 && WORDS_BIG_ENDIAN == REG_WORDS_BIG_ENDIAN
3809 && known_eq (regsize_xmode, regsize_ymode)
3810 && constant_multiple_p (offset, regsize_ymode, &count))
3811 {
3812 info->representable_p = true;
3813 info->nregs = nregs_ymode;
3814 info->offset = count;
3815 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode);
3816 return;
3817 }
3818 }
3819
3820 /* Lowpart subregs are otherwise valid. */
3821 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode)))
3822 {
3823 info->representable_p = true;
3824 rknown = true;
3825
3826 if (known_eq (offset, 0U) || nregs_xmode == nregs_ymode)
3827 {
3828 info->offset = 0;
3829 info->nregs = nregs_ymode;
3830 return;
3831 }
3832 }
3833
3834 /* Set NUM_BLOCKS to the number of independently-representable YMODE
3835 values there are in (reg:XMODE XREGNO). We can view the register
3836 as consisting of this number of independent "blocks", where each
3837 block occupies NREGS_YMODE registers and contains exactly one
3838 representable YMODE value. */
3839 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3840 unsigned int num_blocks = nregs_xmode / nregs_ymode;
3841
3842 /* Calculate the number of bytes in each block. This must always
3843 be exact, otherwise we don't know how to verify the constraint.
3844 These conditions may be relaxed but subreg_regno_offset would
3845 need to be redesigned. */
3846 poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
3847
3848 /* Get the number of the first block that contains the subreg and the byte
3849 offset of the subreg from the start of that block. */
3850 unsigned int block_number;
3851 poly_uint64 subblock_offset;
3852 if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
3853 &subblock_offset))
3854 /* Checked by validate_subreg. We must know at compile time which
3855 inner registers are being accessed. */
3856 gcc_unreachable ();
3857
3858 if (!rknown)
3859 {
3860 /* Only the lowpart of each block is representable. */
3861 info->representable_p
3862 = known_eq (subblock_offset,
3863 subreg_size_lowpart_offset (ysize, bytes_per_block));
3864 rknown = true;
3865 }
3866
3867 /* We assume that the ordering of registers within a multi-register
3868 value has a consistent endianness: if bytes and register words
3869 have different endianness, the hard registers that make up a
3870 multi-register value must be at least word-sized. */
3871 if (WORDS_BIG_ENDIAN != REG_WORDS_BIG_ENDIAN)
3872 /* The block number we calculated above followed memory endianness.
3873 Convert it to register endianness by counting back from the end.
3874 (Note that, because of the assumption above, each block must be
3875 at least word-sized.) */
3876 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
3877 else
3878 info->offset = block_number * nregs_ymode;
3879 info->nregs = nregs_ymode;
3880 }
3881
3882 /* This function returns the regno offset of a subreg expression.
3883 xregno - A regno of an inner hard subreg_reg (or what will become one).
3884 xmode - The mode of xregno.
3885 offset - The byte offset.
3886 ymode - The mode of a top level SUBREG (or what may become one).
3887 RETURN - The regno offset which would be used. */
3888 unsigned int
subreg_regno_offset(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3889 subreg_regno_offset (unsigned int xregno, machine_mode xmode,
3890 poly_uint64 offset, machine_mode ymode)
3891 {
3892 struct subreg_info info;
3893 subreg_get_info (xregno, xmode, offset, ymode, &info);
3894 return info.offset;
3895 }
3896
3897 /* This function returns true when the offset is representable via
3898 subreg_offset in the given regno.
3899 xregno - A regno of an inner hard subreg_reg (or what will become one).
3900 xmode - The mode of xregno.
3901 offset - The byte offset.
3902 ymode - The mode of a top level SUBREG (or what may become one).
3903 RETURN - Whether the offset is representable. */
3904 bool
subreg_offset_representable_p(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3905 subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
3906 poly_uint64 offset, machine_mode ymode)
3907 {
3908 struct subreg_info info;
3909 subreg_get_info (xregno, xmode, offset, ymode, &info);
3910 return info.representable_p;
3911 }
3912
3913 /* Return the number of a YMODE register to which
3914
3915 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
3916
3917 can be simplified. Return -1 if the subreg can't be simplified.
3918
3919 XREGNO is a hard register number. */
3920
3921 int
simplify_subreg_regno(unsigned int xregno,machine_mode xmode,poly_uint64 offset,machine_mode ymode)3922 simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
3923 poly_uint64 offset, machine_mode ymode)
3924 {
3925 struct subreg_info info;
3926 unsigned int yregno;
3927
3928 /* Give the backend a chance to disallow the mode change. */
3929 if (GET_MODE_CLASS (xmode) != MODE_COMPLEX_INT
3930 && GET_MODE_CLASS (xmode) != MODE_COMPLEX_FLOAT
3931 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)
3932 /* We can use mode change in LRA for some transformations. */
3933 && ! lra_in_progress)
3934 return -1;
3935
3936 /* We shouldn't simplify stack-related registers. */
3937 if ((!reload_completed || frame_pointer_needed)
3938 && xregno == FRAME_POINTER_REGNUM)
3939 return -1;
3940
3941 if (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3942 && xregno == ARG_POINTER_REGNUM)
3943 return -1;
3944
3945 if (xregno == STACK_POINTER_REGNUM
3946 /* We should convert hard stack register in LRA if it is
3947 possible. */
3948 && ! lra_in_progress)
3949 return -1;
3950
3951 /* Try to get the register offset. */
3952 subreg_get_info (xregno, xmode, offset, ymode, &info);
3953 if (!info.representable_p)
3954 return -1;
3955
3956 /* Make sure that the offsetted register value is in range. */
3957 yregno = xregno + info.offset;
3958 if (!HARD_REGISTER_NUM_P (yregno))
3959 return -1;
3960
3961 /* See whether (reg:YMODE YREGNO) is valid.
3962
3963 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
3964 This is a kludge to work around how complex FP arguments are passed
3965 on IA-64 and should be fixed. See PR target/49226. */
3966 if (!targetm.hard_regno_mode_ok (yregno, ymode)
3967 && targetm.hard_regno_mode_ok (xregno, xmode))
3968 return -1;
3969
3970 return (int) yregno;
3971 }
3972
3973 /* Return the final regno that a subreg expression refers to. */
3974 unsigned int
subreg_regno(const_rtx x)3975 subreg_regno (const_rtx x)
3976 {
3977 unsigned int ret;
3978 rtx subreg = SUBREG_REG (x);
3979 int regno = REGNO (subreg);
3980
3981 ret = regno + subreg_regno_offset (regno,
3982 GET_MODE (subreg),
3983 SUBREG_BYTE (x),
3984 GET_MODE (x));
3985 return ret;
3986
3987 }
3988
3989 /* Return the number of registers that a subreg expression refers
3990 to. */
3991 unsigned int
subreg_nregs(const_rtx x)3992 subreg_nregs (const_rtx x)
3993 {
3994 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x)), x);
3995 }
3996
3997 /* Return the number of registers that a subreg REG with REGNO
3998 expression refers to. This is a copy of the rtlanal.c:subreg_nregs
3999 changed so that the regno can be passed in. */
4000
4001 unsigned int
subreg_nregs_with_regno(unsigned int regno,const_rtx x)4002 subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4003 {
4004 struct subreg_info info;
4005 rtx subreg = SUBREG_REG (x);
4006
4007 subreg_get_info (regno, GET_MODE (subreg), SUBREG_BYTE (x), GET_MODE (x),
4008 &info);
4009 return info.nregs;
4010 }
4011
4012 struct parms_set_data
4013 {
4014 int nregs;
4015 HARD_REG_SET regs;
4016 };
4017
4018 /* Helper function for noticing stores to parameter registers. */
4019 static void
parms_set(rtx x,const_rtx pat ATTRIBUTE_UNUSED,void * data)4020 parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
4021 {
4022 struct parms_set_data *const d = (struct parms_set_data *) data;
4023 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
4024 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
4025 {
4026 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
4027 d->nregs--;
4028 }
4029 }
4030
4031 /* Look backward for first parameter to be loaded.
4032 Note that loads of all parameters will not necessarily be
4033 found if CSE has eliminated some of them (e.g., an argument
4034 to the outer function is passed down as a parameter).
4035 Do not skip BOUNDARY. */
4036 rtx_insn *
find_first_parameter_load(rtx_insn * call_insn,rtx_insn * boundary)4037 find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4038 {
4039 struct parms_set_data parm;
4040 rtx p;
4041 rtx_insn *before, *first_set;
4042
4043 /* Since different machines initialize their parameter registers
4044 in different orders, assume nothing. Collect the set of all
4045 parameter registers. */
4046 CLEAR_HARD_REG_SET (parm.regs);
4047 parm.nregs = 0;
4048 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
4049 if (GET_CODE (XEXP (p, 0)) == USE
4050 && REG_P (XEXP (XEXP (p, 0), 0))
4051 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0)))
4052 {
4053 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
4054
4055 /* We only care about registers which can hold function
4056 arguments. */
4057 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
4058 continue;
4059
4060 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
4061 parm.nregs++;
4062 }
4063 before = call_insn;
4064 first_set = call_insn;
4065
4066 /* Search backward for the first set of a register in this set. */
4067 while (parm.nregs && before != boundary)
4068 {
4069 before = PREV_INSN (before);
4070
4071 /* It is possible that some loads got CSEed from one call to
4072 another. Stop in that case. */
4073 if (CALL_P (before))
4074 break;
4075
4076 /* Our caller needs either ensure that we will find all sets
4077 (in case code has not been optimized yet), or take care
4078 for possible labels in a way by setting boundary to preceding
4079 CODE_LABEL. */
4080 if (LABEL_P (before))
4081 {
4082 gcc_assert (before == boundary);
4083 break;
4084 }
4085
4086 if (INSN_P (before))
4087 {
4088 int nregs_old = parm.nregs;
4089 note_stores (PATTERN (before), parms_set, &parm);
4090 /* If we found something that did not set a parameter reg,
4091 we're done. Do not keep going, as that might result
4092 in hoisting an insn before the setting of a pseudo
4093 that is used by the hoisted insn. */
4094 if (nregs_old != parm.nregs)
4095 first_set = before;
4096 else
4097 break;
4098 }
4099 }
4100 return first_set;
4101 }
4102
4103 /* Return true if we should avoid inserting code between INSN and preceding
4104 call instruction. */
4105
4106 bool
keep_with_call_p(const rtx_insn * insn)4107 keep_with_call_p (const rtx_insn *insn)
4108 {
4109 rtx set;
4110
4111 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
4112 {
4113 if (REG_P (SET_DEST (set))
4114 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
4115 && fixed_regs[REGNO (SET_DEST (set))]
4116 && general_operand (SET_SRC (set), VOIDmode))
4117 return true;
4118 if (REG_P (SET_SRC (set))
4119 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set)))
4120 && REG_P (SET_DEST (set))
4121 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
4122 return true;
4123 /* There may be a stack pop just after the call and before the store
4124 of the return register. Search for the actual store when deciding
4125 if we can break or not. */
4126 if (SET_DEST (set) == stack_pointer_rtx)
4127 {
4128 /* This CONST_CAST is okay because next_nonnote_insn just
4129 returns its argument and we assign it to a const_rtx
4130 variable. */
4131 const rtx_insn *i2
4132 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4133 if (i2 && keep_with_call_p (i2))
4134 return true;
4135 }
4136 }
4137 return false;
4138 }
4139
4140 /* Return true if LABEL is a target of JUMP_INSN. This applies only
4141 to non-complex jumps. That is, direct unconditional, conditional,
4142 and tablejumps, but not computed jumps or returns. It also does
4143 not apply to the fallthru case of a conditional jump. */
4144
4145 bool
label_is_jump_target_p(const_rtx label,const rtx_insn * jump_insn)4146 label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4147 {
4148 rtx tmp = JUMP_LABEL (jump_insn);
4149 rtx_jump_table_data *table;
4150
4151 if (label == tmp)
4152 return true;
4153
4154 if (tablejump_p (jump_insn, NULL, &table))
4155 {
4156 rtvec vec = table->get_labels ();
4157 int i, veclen = GET_NUM_ELEM (vec);
4158
4159 for (i = 0; i < veclen; ++i)
4160 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
4161 return true;
4162 }
4163
4164 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4165 return true;
4166
4167 return false;
4168 }
4169
4170
4171 /* Return an estimate of the cost of computing rtx X.
4172 One use is in cse, to decide which expression to keep in the hash table.
4173 Another is in rtl generation, to pick the cheapest way to multiply.
4174 Other uses like the latter are expected in the future.
4175
4176 X appears as operand OPNO in an expression with code OUTER_CODE.
4177 SPEED specifies whether costs optimized for speed or size should
4178 be returned. */
4179
4180 int
rtx_cost(rtx x,machine_mode mode,enum rtx_code outer_code,int opno,bool speed)4181 rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4182 int opno, bool speed)
4183 {
4184 int i, j;
4185 enum rtx_code code;
4186 const char *fmt;
4187 int total;
4188 int factor;
4189
4190 if (x == 0)
4191 return 0;
4192
4193 if (GET_MODE (x) != VOIDmode)
4194 mode = GET_MODE (x);
4195
4196 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4197 many insns, taking N times as long. */
4198 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4199 if (factor == 0)
4200 factor = 1;
4201
4202 /* Compute the default costs of certain things.
4203 Note that targetm.rtx_costs can override the defaults. */
4204
4205 code = GET_CODE (x);
4206 switch (code)
4207 {
4208 case MULT:
4209 /* Multiplication has time-complexity O(N*N), where N is the
4210 number of units (translated from digits) when using
4211 schoolbook long multiplication. */
4212 total = factor * factor * COSTS_N_INSNS (5);
4213 break;
4214 case DIV:
4215 case UDIV:
4216 case MOD:
4217 case UMOD:
4218 /* Similarly, complexity for schoolbook long division. */
4219 total = factor * factor * COSTS_N_INSNS (7);
4220 break;
4221 case USE:
4222 /* Used in combine.c as a marker. */
4223 total = 0;
4224 break;
4225 case SET:
4226 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4227 the mode for the factor. */
4228 mode = GET_MODE (SET_DEST (x));
4229 factor = estimated_poly_value (GET_MODE_SIZE (mode)) / UNITS_PER_WORD;
4230 if (factor == 0)
4231 factor = 1;
4232 /* FALLTHRU */
4233 default:
4234 total = factor * COSTS_N_INSNS (1);
4235 }
4236
4237 switch (code)
4238 {
4239 case REG:
4240 return 0;
4241
4242 case SUBREG:
4243 total = 0;
4244 /* If we can't tie these modes, make this expensive. The larger
4245 the mode, the more expensive it is. */
4246 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))))
4247 return COSTS_N_INSNS (2 + factor);
4248 break;
4249
4250 case TRUNCATE:
4251 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))))
4252 {
4253 total = 0;
4254 break;
4255 }
4256 /* FALLTHRU */
4257 default:
4258 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4259 return total;
4260 break;
4261 }
4262
4263 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4264 which is already in total. */
4265
4266 fmt = GET_RTX_FORMAT (code);
4267 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4268 if (fmt[i] == 'e')
4269 total += rtx_cost (XEXP (x, i), mode, code, i, speed);
4270 else if (fmt[i] == 'E')
4271 for (j = 0; j < XVECLEN (x, i); j++)
4272 total += rtx_cost (XVECEXP (x, i, j), mode, code, i, speed);
4273
4274 return total;
4275 }
4276
4277 /* Fill in the structure C with information about both speed and size rtx
4278 costs for X, which is operand OPNO in an expression with code OUTER. */
4279
4280 void
get_full_rtx_cost(rtx x,machine_mode mode,enum rtx_code outer,int opno,struct full_rtx_costs * c)4281 get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4282 struct full_rtx_costs *c)
4283 {
4284 c->speed = rtx_cost (x, mode, outer, opno, true);
4285 c->size = rtx_cost (x, mode, outer, opno, false);
4286 }
4287
4288
4289 /* Return cost of address expression X.
4290 Expect that X is properly formed address reference.
4291
4292 SPEED parameter specify whether costs optimized for speed or size should
4293 be returned. */
4294
4295 int
address_cost(rtx x,machine_mode mode,addr_space_t as,bool speed)4296 address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4297 {
4298 /* We may be asked for cost of various unusual addresses, such as operands
4299 of push instruction. It is not worthwhile to complicate writing
4300 of the target hook by such cases. */
4301
4302 if (!memory_address_addr_space_p (mode, x, as))
4303 return 1000;
4304
4305 return targetm.address_cost (x, mode, as, speed);
4306 }
4307
4308 /* If the target doesn't override, compute the cost as with arithmetic. */
4309
4310 int
default_address_cost(rtx x,machine_mode,addr_space_t,bool speed)4311 default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4312 {
4313 return rtx_cost (x, Pmode, MEM, 0, speed);
4314 }
4315
4316
4317 unsigned HOST_WIDE_INT
nonzero_bits(const_rtx x,machine_mode mode)4318 nonzero_bits (const_rtx x, machine_mode mode)
4319 {
4320 if (mode == VOIDmode)
4321 mode = GET_MODE (x);
4322 scalar_int_mode int_mode;
4323 if (!is_a <scalar_int_mode> (mode, &int_mode))
4324 return GET_MODE_MASK (mode);
4325 return cached_nonzero_bits (x, int_mode, NULL_RTX, VOIDmode, 0);
4326 }
4327
4328 unsigned int
num_sign_bit_copies(const_rtx x,machine_mode mode)4329 num_sign_bit_copies (const_rtx x, machine_mode mode)
4330 {
4331 if (mode == VOIDmode)
4332 mode = GET_MODE (x);
4333 scalar_int_mode int_mode;
4334 if (!is_a <scalar_int_mode> (mode, &int_mode))
4335 return 1;
4336 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX, VOIDmode, 0);
4337 }
4338
4339 /* Return true if nonzero_bits1 might recurse into both operands
4340 of X. */
4341
4342 static inline bool
nonzero_bits_binary_arith_p(const_rtx x)4343 nonzero_bits_binary_arith_p (const_rtx x)
4344 {
4345 if (!ARITHMETIC_P (x))
4346 return false;
4347 switch (GET_CODE (x))
4348 {
4349 case AND:
4350 case XOR:
4351 case IOR:
4352 case UMIN:
4353 case UMAX:
4354 case SMIN:
4355 case SMAX:
4356 case PLUS:
4357 case MINUS:
4358 case MULT:
4359 case DIV:
4360 case UDIV:
4361 case MOD:
4362 case UMOD:
4363 return true;
4364 default:
4365 return false;
4366 }
4367 }
4368
4369 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4370 It avoids exponential behavior in nonzero_bits1 when X has
4371 identical subexpressions on the first or the second level. */
4372
4373 static unsigned HOST_WIDE_INT
cached_nonzero_bits(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4374 cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4375 machine_mode known_mode,
4376 unsigned HOST_WIDE_INT known_ret)
4377 {
4378 if (x == known_x && mode == known_mode)
4379 return known_ret;
4380
4381 /* Try to find identical subexpressions. If found call
4382 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4383 precomputed value for the subexpression as KNOWN_RET. */
4384
4385 if (nonzero_bits_binary_arith_p (x))
4386 {
4387 rtx x0 = XEXP (x, 0);
4388 rtx x1 = XEXP (x, 1);
4389
4390 /* Check the first level. */
4391 if (x0 == x1)
4392 return nonzero_bits1 (x, mode, x0, mode,
4393 cached_nonzero_bits (x0, mode, known_x,
4394 known_mode, known_ret));
4395
4396 /* Check the second level. */
4397 if (nonzero_bits_binary_arith_p (x0)
4398 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4399 return nonzero_bits1 (x, mode, x1, mode,
4400 cached_nonzero_bits (x1, mode, known_x,
4401 known_mode, known_ret));
4402
4403 if (nonzero_bits_binary_arith_p (x1)
4404 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4405 return nonzero_bits1 (x, mode, x0, mode,
4406 cached_nonzero_bits (x0, mode, known_x,
4407 known_mode, known_ret));
4408 }
4409
4410 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4411 }
4412
4413 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4414 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4415 is less useful. We can't allow both, because that results in exponential
4416 run time recursion. There is a nullstone testcase that triggered
4417 this. This macro avoids accidental uses of num_sign_bit_copies. */
4418 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4419
4420 /* Given an expression, X, compute which bits in X can be nonzero.
4421 We don't care about bits outside of those defined in MODE.
4422
4423 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4424 an arithmetic operation, we can do better. */
4425
4426 static unsigned HOST_WIDE_INT
nonzero_bits1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)4427 nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4428 machine_mode known_mode,
4429 unsigned HOST_WIDE_INT known_ret)
4430 {
4431 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
4432 unsigned HOST_WIDE_INT inner_nz;
4433 enum rtx_code code = GET_CODE (x);
4434 machine_mode inner_mode;
4435 unsigned int inner_width;
4436 scalar_int_mode xmode;
4437
4438 unsigned int mode_width = GET_MODE_PRECISION (mode);
4439
4440 if (CONST_INT_P (x))
4441 {
4442 if (SHORT_IMMEDIATES_SIGN_EXTEND
4443 && INTVAL (x) > 0
4444 && mode_width < BITS_PER_WORD
4445 && (UINTVAL (x) & (HOST_WIDE_INT_1U << (mode_width - 1))) != 0)
4446 return UINTVAL (x) | (HOST_WIDE_INT_M1U << mode_width);
4447
4448 return UINTVAL (x);
4449 }
4450
4451 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
4452 return nonzero;
4453 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4454
4455 /* If X is wider than MODE, use its mode instead. */
4456 if (xmode_width > mode_width)
4457 {
4458 mode = xmode;
4459 nonzero = GET_MODE_MASK (mode);
4460 mode_width = xmode_width;
4461 }
4462
4463 if (mode_width > HOST_BITS_PER_WIDE_INT)
4464 /* Our only callers in this case look for single bit values. So
4465 just return the mode mask. Those tests will then be false. */
4466 return nonzero;
4467
4468 /* If MODE is wider than X, but both are a single word for both the host
4469 and target machines, we can compute this from which bits of the object
4470 might be nonzero in its own mode, taking into account the fact that, on
4471 CISC machines, accessing an object in a wider mode generally causes the
4472 high-order bits to become undefined, so they are not known to be zero.
4473 We extend this reasoning to RISC machines for operations that might not
4474 operate on the full registers. */
4475 if (mode_width > xmode_width
4476 && xmode_width <= BITS_PER_WORD
4477 && xmode_width <= HOST_BITS_PER_WIDE_INT
4478 && !(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
4479 {
4480 nonzero &= cached_nonzero_bits (x, xmode,
4481 known_x, known_mode, known_ret);
4482 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode);
4483 return nonzero;
4484 }
4485
4486 /* Please keep nonzero_bits_binary_arith_p above in sync with
4487 the code in the switch below. */
4488 switch (code)
4489 {
4490 case REG:
4491 #if defined(POINTERS_EXTEND_UNSIGNED)
4492 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4493 all the bits above ptr_mode are known to be zero. */
4494 /* As we do not know which address space the pointer is referring to,
4495 we can do this only if the target does not support different pointer
4496 or address modes depending on the address space. */
4497 if (target_default_pointer_address_modes_p ()
4498 && POINTERS_EXTEND_UNSIGNED
4499 && xmode == Pmode
4500 && REG_POINTER (x)
4501 && !targetm.have_ptr_extend ())
4502 nonzero &= GET_MODE_MASK (ptr_mode);
4503 #endif
4504
4505 /* Include declared information about alignment of pointers. */
4506 /* ??? We don't properly preserve REG_POINTER changes across
4507 pointer-to-integer casts, so we can't trust it except for
4508 things that we know must be pointers. See execute/960116-1.c. */
4509 if ((x == stack_pointer_rtx
4510 || x == frame_pointer_rtx
4511 || x == arg_pointer_rtx)
4512 && REGNO_POINTER_ALIGN (REGNO (x)))
4513 {
4514 unsigned HOST_WIDE_INT alignment
4515 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
4516
4517 #ifdef PUSH_ROUNDING
4518 /* If PUSH_ROUNDING is defined, it is possible for the
4519 stack to be momentarily aligned only to that amount,
4520 so we pick the least alignment. */
4521 if (x == stack_pointer_rtx && PUSH_ARGS)
4522 {
4523 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1));
4524 alignment = MIN (known_alignment (rounded_1), alignment);
4525 }
4526 #endif
4527
4528 nonzero &= ~(alignment - 1);
4529 }
4530
4531 {
4532 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
4533 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4534 &nonzero_for_hook);
4535
4536 if (new_rtx)
4537 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4538 known_mode, known_ret);
4539
4540 return nonzero_for_hook;
4541 }
4542
4543 case MEM:
4544 /* In many, if not most, RISC machines, reading a byte from memory
4545 zeros the rest of the register. Noticing that fact saves a lot
4546 of extra zero-extends. */
4547 if (load_extend_op (xmode) == ZERO_EXTEND)
4548 nonzero &= GET_MODE_MASK (xmode);
4549 break;
4550
4551 case EQ: case NE:
4552 case UNEQ: case LTGT:
4553 case GT: case GTU: case UNGT:
4554 case LT: case LTU: case UNLT:
4555 case GE: case GEU: case UNGE:
4556 case LE: case LEU: case UNLE:
4557 case UNORDERED: case ORDERED:
4558 /* If this produces an integer result, we know which bits are set.
4559 Code here used to clear bits outside the mode of X, but that is
4560 now done above. */
4561 /* Mind that MODE is the mode the caller wants to look at this
4562 operation in, and not the actual operation mode. We can wind
4563 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4564 that describes the results of a vector compare. */
4565 if (GET_MODE_CLASS (xmode) == MODE_INT
4566 && mode_width <= HOST_BITS_PER_WIDE_INT)
4567 nonzero = STORE_FLAG_VALUE;
4568 break;
4569
4570 case NEG:
4571 #if 0
4572 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4573 and num_sign_bit_copies. */
4574 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4575 nonzero = 1;
4576 #endif
4577
4578 if (xmode_width < mode_width)
4579 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (xmode));
4580 break;
4581
4582 case ABS:
4583 #if 0
4584 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4585 and num_sign_bit_copies. */
4586 if (num_sign_bit_copies (XEXP (x, 0), xmode) == xmode_width)
4587 nonzero = 1;
4588 #endif
4589 break;
4590
4591 case TRUNCATE:
4592 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
4593 known_x, known_mode, known_ret)
4594 & GET_MODE_MASK (mode));
4595 break;
4596
4597 case ZERO_EXTEND:
4598 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4599 known_x, known_mode, known_ret);
4600 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4601 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4602 break;
4603
4604 case SIGN_EXTEND:
4605 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4606 Otherwise, show all the bits in the outer mode but not the inner
4607 may be nonzero. */
4608 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
4609 known_x, known_mode, known_ret);
4610 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
4611 {
4612 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
4613 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0)), inner_nz))
4614 inner_nz |= (GET_MODE_MASK (mode)
4615 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
4616 }
4617
4618 nonzero &= inner_nz;
4619 break;
4620
4621 case AND:
4622 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
4623 known_x, known_mode, known_ret)
4624 & cached_nonzero_bits (XEXP (x, 1), mode,
4625 known_x, known_mode, known_ret);
4626 break;
4627
4628 case XOR: case IOR:
4629 case UMIN: case UMAX: case SMIN: case SMAX:
4630 {
4631 unsigned HOST_WIDE_INT nonzero0
4632 = cached_nonzero_bits (XEXP (x, 0), mode,
4633 known_x, known_mode, known_ret);
4634
4635 /* Don't call nonzero_bits for the second time if it cannot change
4636 anything. */
4637 if ((nonzero & nonzero0) != nonzero)
4638 nonzero &= nonzero0
4639 | cached_nonzero_bits (XEXP (x, 1), mode,
4640 known_x, known_mode, known_ret);
4641 }
4642 break;
4643
4644 case PLUS: case MINUS:
4645 case MULT:
4646 case DIV: case UDIV:
4647 case MOD: case UMOD:
4648 /* We can apply the rules of arithmetic to compute the number of
4649 high- and low-order zero bits of these operations. We start by
4650 computing the width (position of the highest-order nonzero bit)
4651 and the number of low-order zero bits for each value. */
4652 {
4653 unsigned HOST_WIDE_INT nz0
4654 = cached_nonzero_bits (XEXP (x, 0), mode,
4655 known_x, known_mode, known_ret);
4656 unsigned HOST_WIDE_INT nz1
4657 = cached_nonzero_bits (XEXP (x, 1), mode,
4658 known_x, known_mode, known_ret);
4659 int sign_index = xmode_width - 1;
4660 int width0 = floor_log2 (nz0) + 1;
4661 int width1 = floor_log2 (nz1) + 1;
4662 int low0 = ctz_or_zero (nz0);
4663 int low1 = ctz_or_zero (nz1);
4664 unsigned HOST_WIDE_INT op0_maybe_minusp
4665 = nz0 & (HOST_WIDE_INT_1U << sign_index);
4666 unsigned HOST_WIDE_INT op1_maybe_minusp
4667 = nz1 & (HOST_WIDE_INT_1U << sign_index);
4668 unsigned int result_width = mode_width;
4669 int result_low = 0;
4670
4671 switch (code)
4672 {
4673 case PLUS:
4674 result_width = MAX (width0, width1) + 1;
4675 result_low = MIN (low0, low1);
4676 break;
4677 case MINUS:
4678 result_low = MIN (low0, low1);
4679 break;
4680 case MULT:
4681 result_width = width0 + width1;
4682 result_low = low0 + low1;
4683 break;
4684 case DIV:
4685 if (width1 == 0)
4686 break;
4687 if (!op0_maybe_minusp && !op1_maybe_minusp)
4688 result_width = width0;
4689 break;
4690 case UDIV:
4691 if (width1 == 0)
4692 break;
4693 result_width = width0;
4694 break;
4695 case MOD:
4696 if (width1 == 0)
4697 break;
4698 if (!op0_maybe_minusp && !op1_maybe_minusp)
4699 result_width = MIN (width0, width1);
4700 result_low = MIN (low0, low1);
4701 break;
4702 case UMOD:
4703 if (width1 == 0)
4704 break;
4705 result_width = MIN (width0, width1);
4706 result_low = MIN (low0, low1);
4707 break;
4708 default:
4709 gcc_unreachable ();
4710 }
4711
4712 if (result_width < mode_width)
4713 nonzero &= (HOST_WIDE_INT_1U << result_width) - 1;
4714
4715 if (result_low > 0)
4716 nonzero &= ~((HOST_WIDE_INT_1U << result_low) - 1);
4717 }
4718 break;
4719
4720 case ZERO_EXTRACT:
4721 if (CONST_INT_P (XEXP (x, 1))
4722 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
4723 nonzero &= (HOST_WIDE_INT_1U << INTVAL (XEXP (x, 1))) - 1;
4724 break;
4725
4726 case SUBREG:
4727 /* If this is a SUBREG formed for a promoted variable that has
4728 been zero-extended, we know that at least the high-order bits
4729 are zero, though others might be too. */
4730 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x))
4731 nonzero = GET_MODE_MASK (xmode)
4732 & cached_nonzero_bits (SUBREG_REG (x), xmode,
4733 known_x, known_mode, known_ret);
4734
4735 /* If the inner mode is a single word for both the host and target
4736 machines, we can compute this from which bits of the inner
4737 object might be nonzero. */
4738 inner_mode = GET_MODE (SUBREG_REG (x));
4739 if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
4740 && inner_width <= BITS_PER_WORD
4741 && inner_width <= HOST_BITS_PER_WIDE_INT)
4742 {
4743 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
4744 known_x, known_mode, known_ret);
4745
4746 /* On a typical CISC machine, accessing an object in a wider mode
4747 causes the high-order bits to become undefined. So they are
4748 not known to be zero.
4749
4750 On a typical RISC machine, we only have to worry about the way
4751 loads are extended. Otherwise, if we get a reload for the inner
4752 part, it may be loaded from the stack, and then we may lose all
4753 the zero bits that existed before the store to the stack. */
4754 rtx_code extend_op;
4755 if ((!WORD_REGISTER_OPERATIONS
4756 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
4757 ? val_signbit_known_set_p (inner_mode, nonzero)
4758 : extend_op != ZERO_EXTEND)
4759 || !MEM_P (SUBREG_REG (x)))
4760 && xmode_width > inner_width)
4761 nonzero
4762 |= (GET_MODE_MASK (GET_MODE (x)) & ~GET_MODE_MASK (inner_mode));
4763 }
4764 break;
4765
4766 case ASHIFT:
4767 case ASHIFTRT:
4768 case LSHIFTRT:
4769 case ROTATE:
4770 case ROTATERT:
4771 /* The nonzero bits are in two classes: any bits within MODE
4772 that aren't in xmode are always significant. The rest of the
4773 nonzero bits are those that are significant in the operand of
4774 the shift when shifted the appropriate number of bits. This
4775 shows that high-order bits are cleared by the right shift and
4776 low-order bits by left shifts. */
4777 if (CONST_INT_P (XEXP (x, 1))
4778 && INTVAL (XEXP (x, 1)) >= 0
4779 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
4780 && INTVAL (XEXP (x, 1)) < xmode_width)
4781 {
4782 int count = INTVAL (XEXP (x, 1));
4783 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (xmode);
4784 unsigned HOST_WIDE_INT op_nonzero
4785 = cached_nonzero_bits (XEXP (x, 0), mode,
4786 known_x, known_mode, known_ret);
4787 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
4788 unsigned HOST_WIDE_INT outer = 0;
4789
4790 if (mode_width > xmode_width)
4791 outer = (op_nonzero & nonzero & ~mode_mask);
4792
4793 switch (code)
4794 {
4795 case ASHIFT:
4796 inner <<= count;
4797 break;
4798
4799 case LSHIFTRT:
4800 inner >>= count;
4801 break;
4802
4803 case ASHIFTRT:
4804 inner >>= count;
4805
4806 /* If the sign bit may have been nonzero before the shift, we
4807 need to mark all the places it could have been copied to
4808 by the shift as possibly nonzero. */
4809 if (inner & (HOST_WIDE_INT_1U << (xmode_width - 1 - count)))
4810 inner |= (((HOST_WIDE_INT_1U << count) - 1)
4811 << (xmode_width - count));
4812 break;
4813
4814 case ROTATE:
4815 inner = (inner << (count % xmode_width)
4816 | (inner >> (xmode_width - (count % xmode_width))))
4817 & mode_mask;
4818 break;
4819
4820 case ROTATERT:
4821 inner = (inner >> (count % xmode_width)
4822 | (inner << (xmode_width - (count % xmode_width))))
4823 & mode_mask;
4824 break;
4825
4826 default:
4827 gcc_unreachable ();
4828 }
4829
4830 nonzero &= (outer | inner);
4831 }
4832 break;
4833
4834 case FFS:
4835 case POPCOUNT:
4836 /* This is at most the number of bits in the mode. */
4837 nonzero = ((unsigned HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
4838 break;
4839
4840 case CLZ:
4841 /* If CLZ has a known value at zero, then the nonzero bits are
4842 that value, plus the number of bits in the mode minus one. */
4843 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4844 nonzero
4845 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4846 else
4847 nonzero = -1;
4848 break;
4849
4850 case CTZ:
4851 /* If CTZ has a known value at zero, then the nonzero bits are
4852 that value, plus the number of bits in the mode minus one. */
4853 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
4854 nonzero
4855 |= (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4856 else
4857 nonzero = -1;
4858 break;
4859
4860 case CLRSB:
4861 /* This is at most the number of bits in the mode minus 1. */
4862 nonzero = (HOST_WIDE_INT_1U << (floor_log2 (mode_width))) - 1;
4863 break;
4864
4865 case PARITY:
4866 nonzero = 1;
4867 break;
4868
4869 case IF_THEN_ELSE:
4870 {
4871 unsigned HOST_WIDE_INT nonzero_true
4872 = cached_nonzero_bits (XEXP (x, 1), mode,
4873 known_x, known_mode, known_ret);
4874
4875 /* Don't call nonzero_bits for the second time if it cannot change
4876 anything. */
4877 if ((nonzero & nonzero_true) != nonzero)
4878 nonzero &= nonzero_true
4879 | cached_nonzero_bits (XEXP (x, 2), mode,
4880 known_x, known_mode, known_ret);
4881 }
4882 break;
4883
4884 default:
4885 break;
4886 }
4887
4888 return nonzero;
4889 }
4890
4891 /* See the macro definition above. */
4892 #undef cached_num_sign_bit_copies
4893
4894
4895 /* Return true if num_sign_bit_copies1 might recurse into both operands
4896 of X. */
4897
4898 static inline bool
num_sign_bit_copies_binary_arith_p(const_rtx x)4899 num_sign_bit_copies_binary_arith_p (const_rtx x)
4900 {
4901 if (!ARITHMETIC_P (x))
4902 return false;
4903 switch (GET_CODE (x))
4904 {
4905 case IOR:
4906 case AND:
4907 case XOR:
4908 case SMIN:
4909 case SMAX:
4910 case UMIN:
4911 case UMAX:
4912 case PLUS:
4913 case MINUS:
4914 case MULT:
4915 return true;
4916 default:
4917 return false;
4918 }
4919 }
4920
4921 /* The function cached_num_sign_bit_copies is a wrapper around
4922 num_sign_bit_copies1. It avoids exponential behavior in
4923 num_sign_bit_copies1 when X has identical subexpressions on the
4924 first or the second level. */
4925
4926 static unsigned int
cached_num_sign_bit_copies(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)4927 cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
4928 const_rtx known_x, machine_mode known_mode,
4929 unsigned int known_ret)
4930 {
4931 if (x == known_x && mode == known_mode)
4932 return known_ret;
4933
4934 /* Try to find identical subexpressions. If found call
4935 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
4936 the precomputed value for the subexpression as KNOWN_RET. */
4937
4938 if (num_sign_bit_copies_binary_arith_p (x))
4939 {
4940 rtx x0 = XEXP (x, 0);
4941 rtx x1 = XEXP (x, 1);
4942
4943 /* Check the first level. */
4944 if (x0 == x1)
4945 return
4946 num_sign_bit_copies1 (x, mode, x0, mode,
4947 cached_num_sign_bit_copies (x0, mode, known_x,
4948 known_mode,
4949 known_ret));
4950
4951 /* Check the second level. */
4952 if (num_sign_bit_copies_binary_arith_p (x0)
4953 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
4954 return
4955 num_sign_bit_copies1 (x, mode, x1, mode,
4956 cached_num_sign_bit_copies (x1, mode, known_x,
4957 known_mode,
4958 known_ret));
4959
4960 if (num_sign_bit_copies_binary_arith_p (x1)
4961 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
4962 return
4963 num_sign_bit_copies1 (x, mode, x0, mode,
4964 cached_num_sign_bit_copies (x0, mode, known_x,
4965 known_mode,
4966 known_ret));
4967 }
4968
4969 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
4970 }
4971
4972 /* Return the number of bits at the high-order end of X that are known to
4973 be equal to the sign bit. X will be used in mode MODE. The returned
4974 value will always be between 1 and the number of bits in MODE. */
4975
4976 static unsigned int
num_sign_bit_copies1(const_rtx x,scalar_int_mode mode,const_rtx known_x,machine_mode known_mode,unsigned int known_ret)4977 num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4978 machine_mode known_mode,
4979 unsigned int known_ret)
4980 {
4981 enum rtx_code code = GET_CODE (x);
4982 unsigned int bitwidth = GET_MODE_PRECISION (mode);
4983 int num0, num1, result;
4984 unsigned HOST_WIDE_INT nonzero;
4985
4986 if (CONST_INT_P (x))
4987 {
4988 /* If the constant is negative, take its 1's complement and remask.
4989 Then see how many zero bits we have. */
4990 nonzero = UINTVAL (x) & GET_MODE_MASK (mode);
4991 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4992 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
4993 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4994
4995 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4996 }
4997
4998 scalar_int_mode xmode, inner_mode;
4999 if (!is_a <scalar_int_mode> (GET_MODE (x), &xmode))
5000 return 1;
5001
5002 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5003
5004 /* For a smaller mode, just ignore the high bits. */
5005 if (bitwidth < xmode_width)
5006 {
5007 num0 = cached_num_sign_bit_copies (x, xmode,
5008 known_x, known_mode, known_ret);
5009 return MAX (1, num0 - (int) (xmode_width - bitwidth));
5010 }
5011
5012 if (bitwidth > xmode_width)
5013 {
5014 /* If this machine does not do all register operations on the entire
5015 register and MODE is wider than the mode of X, we can say nothing
5016 at all about the high-order bits. We extend this reasoning to RISC
5017 machines for operations that might not operate on full registers. */
5018 if (!(WORD_REGISTER_OPERATIONS && word_register_operation_p (x)))
5019 return 1;
5020
5021 /* Likewise on machines that do, if the mode of the object is smaller
5022 than a word and loads of that size don't sign extend, we can say
5023 nothing about the high order bits. */
5024 if (xmode_width < BITS_PER_WORD
5025 && load_extend_op (xmode) != SIGN_EXTEND)
5026 return 1;
5027 }
5028
5029 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5030 the code in the switch below. */
5031 switch (code)
5032 {
5033 case REG:
5034
5035 #if defined(POINTERS_EXTEND_UNSIGNED)
5036 /* If pointers extend signed and this is a pointer in Pmode, say that
5037 all the bits above ptr_mode are known to be sign bit copies. */
5038 /* As we do not know which address space the pointer is referring to,
5039 we can do this only if the target does not support different pointer
5040 or address modes depending on the address space. */
5041 if (target_default_pointer_address_modes_p ()
5042 && ! POINTERS_EXTEND_UNSIGNED && xmode == Pmode
5043 && mode == Pmode && REG_POINTER (x)
5044 && !targetm.have_ptr_extend ())
5045 return GET_MODE_PRECISION (Pmode) - GET_MODE_PRECISION (ptr_mode) + 1;
5046 #endif
5047
5048 {
5049 unsigned int copies_for_hook = 1, copies = 1;
5050 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5051 &copies_for_hook);
5052
5053 if (new_rtx)
5054 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5055 known_mode, known_ret);
5056
5057 if (copies > 1 || copies_for_hook > 1)
5058 return MAX (copies, copies_for_hook);
5059
5060 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5061 }
5062 break;
5063
5064 case MEM:
5065 /* Some RISC machines sign-extend all loads of smaller than a word. */
5066 if (load_extend_op (xmode) == SIGN_EXTEND)
5067 return MAX (1, ((int) bitwidth - (int) xmode_width + 1));
5068 break;
5069
5070 case SUBREG:
5071 /* If this is a SUBREG for a promoted object that is sign-extended
5072 and we are looking at it in a wider mode, we know that at least the
5073 high-order bits are known to be sign bit copies. */
5074
5075 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_SIGNED_P (x))
5076 {
5077 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5078 known_x, known_mode, known_ret);
5079 return MAX ((int) bitwidth - (int) xmode_width + 1, num0);
5080 }
5081
5082 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x)), &inner_mode))
5083 {
5084 /* For a smaller object, just ignore the high bits. */
5085 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5086 {
5087 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), inner_mode,
5088 known_x, known_mode,
5089 known_ret);
5090 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)
5091 - bitwidth));
5092 }
5093
5094 /* For paradoxical SUBREGs on machines where all register operations
5095 affect the entire register, just look inside. Note that we are
5096 passing MODE to the recursive call, so the number of sign bit
5097 copies will remain relative to that mode, not the inner mode.
5098
5099 This works only if loads sign extend. Otherwise, if we get a
5100 reload for the inner part, it may be loaded from the stack, and
5101 then we lose all sign bit copies that existed before the store
5102 to the stack. */
5103 if (WORD_REGISTER_OPERATIONS
5104 && load_extend_op (inner_mode) == SIGN_EXTEND
5105 && paradoxical_subreg_p (x)
5106 && MEM_P (SUBREG_REG (x)))
5107 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
5108 known_x, known_mode, known_ret);
5109 }
5110 break;
5111
5112 case SIGN_EXTRACT:
5113 if (CONST_INT_P (XEXP (x, 1)))
5114 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
5115 break;
5116
5117 case SIGN_EXTEND:
5118 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
5119 return (bitwidth - GET_MODE_PRECISION (inner_mode)
5120 + cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5121 known_x, known_mode, known_ret));
5122 break;
5123
5124 case TRUNCATE:
5125 /* For a smaller object, just ignore the high bits. */
5126 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
5127 num0 = cached_num_sign_bit_copies (XEXP (x, 0), inner_mode,
5128 known_x, known_mode, known_ret);
5129 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)
5130 - bitwidth)));
5131
5132 case NOT:
5133 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5134 known_x, known_mode, known_ret);
5135
5136 case ROTATE: case ROTATERT:
5137 /* If we are rotating left by a number of bits less than the number
5138 of sign bit copies, we can just subtract that amount from the
5139 number. */
5140 if (CONST_INT_P (XEXP (x, 1))
5141 && INTVAL (XEXP (x, 1)) >= 0
5142 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
5143 {
5144 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5145 known_x, known_mode, known_ret);
5146 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
5147 : (int) bitwidth - INTVAL (XEXP (x, 1))));
5148 }
5149 break;
5150
5151 case NEG:
5152 /* In general, this subtracts one sign bit copy. But if the value
5153 is known to be positive, the number of sign bit copies is the
5154 same as that of the input. Finally, if the input has just one bit
5155 that might be nonzero, all the bits are copies of the sign bit. */
5156 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5157 known_x, known_mode, known_ret);
5158 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5159 return num0 > 1 ? num0 - 1 : 1;
5160
5161 nonzero = nonzero_bits (XEXP (x, 0), mode);
5162 if (nonzero == 1)
5163 return bitwidth;
5164
5165 if (num0 > 1
5166 && ((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero))
5167 num0--;
5168
5169 return num0;
5170
5171 case IOR: case AND: case XOR:
5172 case SMIN: case SMAX: case UMIN: case UMAX:
5173 /* Logical operations will preserve the number of sign-bit copies.
5174 MIN and MAX operations always return one of the operands. */
5175 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5176 known_x, known_mode, known_ret);
5177 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5178 known_x, known_mode, known_ret);
5179
5180 /* If num1 is clearing some of the top bits then regardless of
5181 the other term, we are guaranteed to have at least that many
5182 high-order zero bits. */
5183 if (code == AND
5184 && num1 > 1
5185 && bitwidth <= HOST_BITS_PER_WIDE_INT
5186 && CONST_INT_P (XEXP (x, 1))
5187 && (UINTVAL (XEXP (x, 1))
5188 & (HOST_WIDE_INT_1U << (bitwidth - 1))) == 0)
5189 return num1;
5190
5191 /* Similarly for IOR when setting high-order bits. */
5192 if (code == IOR
5193 && num1 > 1
5194 && bitwidth <= HOST_BITS_PER_WIDE_INT
5195 && CONST_INT_P (XEXP (x, 1))
5196 && (UINTVAL (XEXP (x, 1))
5197 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5198 return num1;
5199
5200 return MIN (num0, num1);
5201
5202 case PLUS: case MINUS:
5203 /* For addition and subtraction, we can have a 1-bit carry. However,
5204 if we are subtracting 1 from a positive number, there will not
5205 be such a carry. Furthermore, if the positive number is known to
5206 be 0 or 1, we know the result is either -1 or 0. */
5207
5208 if (code == PLUS && XEXP (x, 1) == constm1_rtx
5209 && bitwidth <= HOST_BITS_PER_WIDE_INT)
5210 {
5211 nonzero = nonzero_bits (XEXP (x, 0), mode);
5212 if (((HOST_WIDE_INT_1U << (bitwidth - 1)) & nonzero) == 0)
5213 return (nonzero == 1 || nonzero == 0 ? bitwidth
5214 : bitwidth - floor_log2 (nonzero) - 1);
5215 }
5216
5217 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5218 known_x, known_mode, known_ret);
5219 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5220 known_x, known_mode, known_ret);
5221 result = MAX (1, MIN (num0, num1) - 1);
5222
5223 return result;
5224
5225 case MULT:
5226 /* The number of bits of the product is the sum of the number of
5227 bits of both terms. However, unless one of the terms if known
5228 to be positive, we must allow for an additional bit since negating
5229 a negative number can remove one sign bit copy. */
5230
5231 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5232 known_x, known_mode, known_ret);
5233 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5234 known_x, known_mode, known_ret);
5235
5236 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5237 if (result > 0
5238 && (bitwidth > HOST_BITS_PER_WIDE_INT
5239 || (((nonzero_bits (XEXP (x, 0), mode)
5240 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5241 && ((nonzero_bits (XEXP (x, 1), mode)
5242 & (HOST_WIDE_INT_1U << (bitwidth - 1)))
5243 != 0))))
5244 result--;
5245
5246 return MAX (1, result);
5247
5248 case UDIV:
5249 /* The result must be <= the first operand. If the first operand
5250 has the high bit set, we know nothing about the number of sign
5251 bit copies. */
5252 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5253 return 1;
5254 else if ((nonzero_bits (XEXP (x, 0), mode)
5255 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5256 return 1;
5257 else
5258 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
5259 known_x, known_mode, known_ret);
5260
5261 case UMOD:
5262 /* The result must be <= the second operand. If the second operand
5263 has (or just might have) the high bit set, we know nothing about
5264 the number of sign bit copies. */
5265 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5266 return 1;
5267 else if ((nonzero_bits (XEXP (x, 1), mode)
5268 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5269 return 1;
5270 else
5271 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
5272 known_x, known_mode, known_ret);
5273
5274 case DIV:
5275 /* Similar to unsigned division, except that we have to worry about
5276 the case where the divisor is negative, in which case we have
5277 to add 1. */
5278 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5279 known_x, known_mode, known_ret);
5280 if (result > 1
5281 && (bitwidth > HOST_BITS_PER_WIDE_INT
5282 || (nonzero_bits (XEXP (x, 1), mode)
5283 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5284 result--;
5285
5286 return result;
5287
5288 case MOD:
5289 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5290 known_x, known_mode, known_ret);
5291 if (result > 1
5292 && (bitwidth > HOST_BITS_PER_WIDE_INT
5293 || (nonzero_bits (XEXP (x, 1), mode)
5294 & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0))
5295 result--;
5296
5297 return result;
5298
5299 case ASHIFTRT:
5300 /* Shifts by a constant add to the number of bits equal to the
5301 sign bit. */
5302 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5303 known_x, known_mode, known_ret);
5304 if (CONST_INT_P (XEXP (x, 1))
5305 && INTVAL (XEXP (x, 1)) > 0
5306 && INTVAL (XEXP (x, 1)) < xmode_width)
5307 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
5308
5309 return num0;
5310
5311 case ASHIFT:
5312 /* Left shifts destroy copies. */
5313 if (!CONST_INT_P (XEXP (x, 1))
5314 || INTVAL (XEXP (x, 1)) < 0
5315 || INTVAL (XEXP (x, 1)) >= (int) bitwidth
5316 || INTVAL (XEXP (x, 1)) >= xmode_width)
5317 return 1;
5318
5319 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
5320 known_x, known_mode, known_ret);
5321 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
5322
5323 case IF_THEN_ELSE:
5324 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
5325 known_x, known_mode, known_ret);
5326 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
5327 known_x, known_mode, known_ret);
5328 return MIN (num0, num1);
5329
5330 case EQ: case NE: case GE: case GT: case LE: case LT:
5331 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
5332 case GEU: case GTU: case LEU: case LTU:
5333 case UNORDERED: case ORDERED:
5334 /* If the constant is negative, take its 1's complement and remask.
5335 Then see how many zero bits we have. */
5336 nonzero = STORE_FLAG_VALUE;
5337 if (bitwidth <= HOST_BITS_PER_WIDE_INT
5338 && (nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))) != 0)
5339 nonzero = (~nonzero) & GET_MODE_MASK (mode);
5340
5341 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5342
5343 default:
5344 break;
5345 }
5346
5347 /* If we haven't been able to figure it out by one of the above rules,
5348 see if some of the high-order bits are known to be zero. If so,
5349 count those bits and return one less than that amount. If we can't
5350 safely compute the mask for this mode, always return BITWIDTH. */
5351
5352 bitwidth = GET_MODE_PRECISION (mode);
5353 if (bitwidth > HOST_BITS_PER_WIDE_INT)
5354 return 1;
5355
5356 nonzero = nonzero_bits (x, mode);
5357 return nonzero & (HOST_WIDE_INT_1U << (bitwidth - 1))
5358 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
5359 }
5360
5361 /* Calculate the rtx_cost of a single instruction pattern. A return value of
5362 zero indicates an instruction pattern without a known cost. */
5363
5364 int
pattern_cost(rtx pat,bool speed)5365 pattern_cost (rtx pat, bool speed)
5366 {
5367 int i, cost;
5368 rtx set;
5369
5370 /* Extract the single set rtx from the instruction pattern. We
5371 can't use single_set since we only have the pattern. We also
5372 consider PARALLELs of a normal set and a single comparison. In
5373 that case we use the cost of the non-comparison SET operation,
5374 which is most-likely to be the real cost of this operation. */
5375 if (GET_CODE (pat) == SET)
5376 set = pat;
5377 else if (GET_CODE (pat) == PARALLEL)
5378 {
5379 set = NULL_RTX;
5380 rtx comparison = NULL_RTX;
5381
5382 for (i = 0; i < XVECLEN (pat, 0); i++)
5383 {
5384 rtx x = XVECEXP (pat, 0, i);
5385 if (GET_CODE (x) == SET)
5386 {
5387 if (GET_CODE (SET_SRC (x)) == COMPARE)
5388 {
5389 if (comparison)
5390 return 0;
5391 comparison = x;
5392 }
5393 else
5394 {
5395 if (set)
5396 return 0;
5397 set = x;
5398 }
5399 }
5400 }
5401
5402 if (!set && comparison)
5403 set = comparison;
5404
5405 if (!set)
5406 return 0;
5407 }
5408 else
5409 return 0;
5410
5411 cost = set_src_cost (SET_SRC (set), GET_MODE (SET_DEST (set)), speed);
5412 return cost > 0 ? cost : COSTS_N_INSNS (1);
5413 }
5414
5415 /* Calculate the cost of a single instruction. A return value of zero
5416 indicates an instruction pattern without a known cost. */
5417
5418 int
insn_cost(rtx_insn * insn,bool speed)5419 insn_cost (rtx_insn *insn, bool speed)
5420 {
5421 if (targetm.insn_cost)
5422 return targetm.insn_cost (insn, speed);
5423
5424 return pattern_cost (PATTERN (insn), speed);
5425 }
5426
5427 /* Returns estimate on cost of computing SEQ. */
5428
5429 unsigned
seq_cost(const rtx_insn * seq,bool speed)5430 seq_cost (const rtx_insn *seq, bool speed)
5431 {
5432 unsigned cost = 0;
5433 rtx set;
5434
5435 for (; seq; seq = NEXT_INSN (seq))
5436 {
5437 set = single_set (seq);
5438 if (set)
5439 cost += set_rtx_cost (set, speed);
5440 else if (NONDEBUG_INSN_P (seq))
5441 {
5442 int this_cost = insn_cost (CONST_CAST_RTX_INSN (seq), speed);
5443 if (this_cost > 0)
5444 cost += this_cost;
5445 else
5446 cost++;
5447 }
5448 }
5449
5450 return cost;
5451 }
5452
5453 /* Given an insn INSN and condition COND, return the condition in a
5454 canonical form to simplify testing by callers. Specifically:
5455
5456 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
5457 (2) Both operands will be machine operands; (cc0) will have been replaced.
5458 (3) If an operand is a constant, it will be the second operand.
5459 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
5460 for GE, GEU, and LEU.
5461
5462 If the condition cannot be understood, or is an inequality floating-point
5463 comparison which needs to be reversed, 0 will be returned.
5464
5465 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
5466
5467 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5468 insn used in locating the condition was found. If a replacement test
5469 of the condition is desired, it should be placed in front of that
5470 insn and we will be sure that the inputs are still valid.
5471
5472 If WANT_REG is nonzero, we wish the condition to be relative to that
5473 register, if possible. Therefore, do not canonicalize the condition
5474 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
5475 to be a compare to a CC mode register.
5476
5477 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
5478 and at INSN. */
5479
5480 rtx
canonicalize_condition(rtx_insn * insn,rtx cond,int reverse,rtx_insn ** earliest,rtx want_reg,int allow_cc_mode,int valid_at_insn_p)5481 canonicalize_condition (rtx_insn *insn, rtx cond, int reverse,
5482 rtx_insn **earliest,
5483 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
5484 {
5485 enum rtx_code code;
5486 rtx_insn *prev = insn;
5487 const_rtx set;
5488 rtx tem;
5489 rtx op0, op1;
5490 int reverse_code = 0;
5491 machine_mode mode;
5492 basic_block bb = BLOCK_FOR_INSN (insn);
5493
5494 code = GET_CODE (cond);
5495 mode = GET_MODE (cond);
5496 op0 = XEXP (cond, 0);
5497 op1 = XEXP (cond, 1);
5498
5499 if (reverse)
5500 code = reversed_comparison_code (cond, insn);
5501 if (code == UNKNOWN)
5502 return 0;
5503
5504 if (earliest)
5505 *earliest = insn;
5506
5507 /* If we are comparing a register with zero, see if the register is set
5508 in the previous insn to a COMPARE or a comparison operation. Perform
5509 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
5510 in cse.c */
5511
5512 while ((GET_RTX_CLASS (code) == RTX_COMPARE
5513 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
5514 && op1 == CONST0_RTX (GET_MODE (op0))
5515 && op0 != want_reg)
5516 {
5517 /* Set nonzero when we find something of interest. */
5518 rtx x = 0;
5519
5520 /* If comparison with cc0, import actual comparison from compare
5521 insn. */
5522 if (op0 == cc0_rtx)
5523 {
5524 if ((prev = prev_nonnote_insn (prev)) == 0
5525 || !NONJUMP_INSN_P (prev)
5526 || (set = single_set (prev)) == 0
5527 || SET_DEST (set) != cc0_rtx)
5528 return 0;
5529
5530 op0 = SET_SRC (set);
5531 op1 = CONST0_RTX (GET_MODE (op0));
5532 if (earliest)
5533 *earliest = prev;
5534 }
5535
5536 /* If this is a COMPARE, pick up the two things being compared. */
5537 if (GET_CODE (op0) == COMPARE)
5538 {
5539 op1 = XEXP (op0, 1);
5540 op0 = XEXP (op0, 0);
5541 continue;
5542 }
5543 else if (!REG_P (op0))
5544 break;
5545
5546 /* Go back to the previous insn. Stop if it is not an INSN. We also
5547 stop if it isn't a single set or if it has a REG_INC note because
5548 we don't want to bother dealing with it. */
5549
5550 prev = prev_nonnote_nondebug_insn (prev);
5551
5552 if (prev == 0
5553 || !NONJUMP_INSN_P (prev)
5554 || FIND_REG_INC_NOTE (prev, NULL_RTX)
5555 /* In cfglayout mode, there do not have to be labels at the
5556 beginning of a block, or jumps at the end, so the previous
5557 conditions would not stop us when we reach bb boundary. */
5558 || BLOCK_FOR_INSN (prev) != bb)
5559 break;
5560
5561 set = set_of (op0, prev);
5562
5563 if (set
5564 && (GET_CODE (set) != SET
5565 || !rtx_equal_p (SET_DEST (set), op0)))
5566 break;
5567
5568 /* If this is setting OP0, get what it sets it to if it looks
5569 relevant. */
5570 if (set)
5571 {
5572 machine_mode inner_mode = GET_MODE (SET_DEST (set));
5573 #ifdef FLOAT_STORE_FLAG_VALUE
5574 REAL_VALUE_TYPE fsfv;
5575 #endif
5576
5577 /* ??? We may not combine comparisons done in a CCmode with
5578 comparisons not done in a CCmode. This is to aid targets
5579 like Alpha that have an IEEE compliant EQ instruction, and
5580 a non-IEEE compliant BEQ instruction. The use of CCmode is
5581 actually artificial, simply to prevent the combination, but
5582 should not affect other platforms.
5583
5584 However, we must allow VOIDmode comparisons to match either
5585 CCmode or non-CCmode comparison, because some ports have
5586 modeless comparisons inside branch patterns.
5587
5588 ??? This mode check should perhaps look more like the mode check
5589 in simplify_comparison in combine. */
5590 if (((GET_MODE_CLASS (mode) == MODE_CC)
5591 != (GET_MODE_CLASS (inner_mode) == MODE_CC))
5592 && mode != VOIDmode
5593 && inner_mode != VOIDmode)
5594 break;
5595 if (GET_CODE (SET_SRC (set)) == COMPARE
5596 || (((code == NE
5597 || (code == LT
5598 && val_signbit_known_set_p (inner_mode,
5599 STORE_FLAG_VALUE))
5600 #ifdef FLOAT_STORE_FLAG_VALUE
5601 || (code == LT
5602 && SCALAR_FLOAT_MODE_P (inner_mode)
5603 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5604 REAL_VALUE_NEGATIVE (fsfv)))
5605 #endif
5606 ))
5607 && COMPARISON_P (SET_SRC (set))))
5608 x = SET_SRC (set);
5609 else if (((code == EQ
5610 || (code == GE
5611 && val_signbit_known_set_p (inner_mode,
5612 STORE_FLAG_VALUE))
5613 #ifdef FLOAT_STORE_FLAG_VALUE
5614 || (code == GE
5615 && SCALAR_FLOAT_MODE_P (inner_mode)
5616 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
5617 REAL_VALUE_NEGATIVE (fsfv)))
5618 #endif
5619 ))
5620 && COMPARISON_P (SET_SRC (set)))
5621 {
5622 reverse_code = 1;
5623 x = SET_SRC (set);
5624 }
5625 else if ((code == EQ || code == NE)
5626 && GET_CODE (SET_SRC (set)) == XOR)
5627 /* Handle sequences like:
5628
5629 (set op0 (xor X Y))
5630 ...(eq|ne op0 (const_int 0))...
5631
5632 in which case:
5633
5634 (eq op0 (const_int 0)) reduces to (eq X Y)
5635 (ne op0 (const_int 0)) reduces to (ne X Y)
5636
5637 This is the form used by MIPS16, for example. */
5638 x = SET_SRC (set);
5639 else
5640 break;
5641 }
5642
5643 else if (reg_set_p (op0, prev))
5644 /* If this sets OP0, but not directly, we have to give up. */
5645 break;
5646
5647 if (x)
5648 {
5649 /* If the caller is expecting the condition to be valid at INSN,
5650 make sure X doesn't change before INSN. */
5651 if (valid_at_insn_p)
5652 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
5653 break;
5654 if (COMPARISON_P (x))
5655 code = GET_CODE (x);
5656 if (reverse_code)
5657 {
5658 code = reversed_comparison_code (x, prev);
5659 if (code == UNKNOWN)
5660 return 0;
5661 reverse_code = 0;
5662 }
5663
5664 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
5665 if (earliest)
5666 *earliest = prev;
5667 }
5668 }
5669
5670 /* If constant is first, put it last. */
5671 if (CONSTANT_P (op0))
5672 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
5673
5674 /* If OP0 is the result of a comparison, we weren't able to find what
5675 was really being compared, so fail. */
5676 if (!allow_cc_mode
5677 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
5678 return 0;
5679
5680 /* Canonicalize any ordered comparison with integers involving equality
5681 if we can do computations in the relevant mode and we do not
5682 overflow. */
5683
5684 scalar_int_mode op0_mode;
5685 if (CONST_INT_P (op1)
5686 && is_a <scalar_int_mode> (GET_MODE (op0), &op0_mode)
5687 && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT)
5688 {
5689 HOST_WIDE_INT const_val = INTVAL (op1);
5690 unsigned HOST_WIDE_INT uconst_val = const_val;
5691 unsigned HOST_WIDE_INT max_val
5692 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (op0_mode);
5693
5694 switch (code)
5695 {
5696 case LE:
5697 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
5698 code = LT, op1 = gen_int_mode (const_val + 1, op0_mode);
5699 break;
5700
5701 /* When cross-compiling, const_val might be sign-extended from
5702 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
5703 case GE:
5704 if ((const_val & max_val)
5705 != (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (op0_mode) - 1)))
5706 code = GT, op1 = gen_int_mode (const_val - 1, op0_mode);
5707 break;
5708
5709 case LEU:
5710 if (uconst_val < max_val)
5711 code = LTU, op1 = gen_int_mode (uconst_val + 1, op0_mode);
5712 break;
5713
5714 case GEU:
5715 if (uconst_val != 0)
5716 code = GTU, op1 = gen_int_mode (uconst_val - 1, op0_mode);
5717 break;
5718
5719 default:
5720 break;
5721 }
5722 }
5723
5724 /* Never return CC0; return zero instead. */
5725 if (CC0_P (op0))
5726 return 0;
5727
5728 /* We promised to return a comparison. */
5729 rtx ret = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
5730 if (COMPARISON_P (ret))
5731 return ret;
5732 return 0;
5733 }
5734
5735 /* Given a jump insn JUMP, return the condition that will cause it to branch
5736 to its JUMP_LABEL. If the condition cannot be understood, or is an
5737 inequality floating-point comparison which needs to be reversed, 0 will
5738 be returned.
5739
5740 If EARLIEST is nonzero, it is a pointer to a place where the earliest
5741 insn used in locating the condition was found. If a replacement test
5742 of the condition is desired, it should be placed in front of that
5743 insn and we will be sure that the inputs are still valid. If EARLIEST
5744 is null, the returned condition will be valid at INSN.
5745
5746 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
5747 compare CC mode register.
5748
5749 VALID_AT_INSN_P is the same as for canonicalize_condition. */
5750
5751 rtx
get_condition(rtx_insn * jump,rtx_insn ** earliest,int allow_cc_mode,int valid_at_insn_p)5752 get_condition (rtx_insn *jump, rtx_insn **earliest, int allow_cc_mode,
5753 int valid_at_insn_p)
5754 {
5755 rtx cond;
5756 int reverse;
5757 rtx set;
5758
5759 /* If this is not a standard conditional jump, we can't parse it. */
5760 if (!JUMP_P (jump)
5761 || ! any_condjump_p (jump))
5762 return 0;
5763 set = pc_set (jump);
5764
5765 cond = XEXP (SET_SRC (set), 0);
5766
5767 /* If this branches to JUMP_LABEL when the condition is false, reverse
5768 the condition. */
5769 reverse
5770 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
5771 && label_ref_label (XEXP (SET_SRC (set), 2)) == JUMP_LABEL (jump);
5772
5773 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
5774 allow_cc_mode, valid_at_insn_p);
5775 }
5776
5777 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
5778 TARGET_MODE_REP_EXTENDED.
5779
5780 Note that we assume that the property of
5781 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
5782 narrower than mode B. I.e., if A is a mode narrower than B then in
5783 order to be able to operate on it in mode B, mode A needs to
5784 satisfy the requirements set by the representation of mode B. */
5785
5786 static void
init_num_sign_bit_copies_in_rep(void)5787 init_num_sign_bit_copies_in_rep (void)
5788 {
5789 opt_scalar_int_mode in_mode_iter;
5790 scalar_int_mode mode;
5791
5792 FOR_EACH_MODE_IN_CLASS (in_mode_iter, MODE_INT)
5793 FOR_EACH_MODE_UNTIL (mode, in_mode_iter.require ())
5794 {
5795 scalar_int_mode in_mode = in_mode_iter.require ();
5796 scalar_int_mode i;
5797
5798 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
5799 extends to the next widest mode. */
5800 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
5801 || GET_MODE_WIDER_MODE (mode).require () == in_mode);
5802
5803 /* We are in in_mode. Count how many bits outside of mode
5804 have to be copies of the sign-bit. */
5805 FOR_EACH_MODE (i, mode, in_mode)
5806 {
5807 /* This must always exist (for the last iteration it will be
5808 IN_MODE). */
5809 scalar_int_mode wider = GET_MODE_WIDER_MODE (i).require ();
5810
5811 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
5812 /* We can only check sign-bit copies starting from the
5813 top-bit. In order to be able to check the bits we
5814 have already seen we pretend that subsequent bits
5815 have to be sign-bit copies too. */
5816 || num_sign_bit_copies_in_rep [in_mode][mode])
5817 num_sign_bit_copies_in_rep [in_mode][mode]
5818 += GET_MODE_PRECISION (wider) - GET_MODE_PRECISION (i);
5819 }
5820 }
5821 }
5822
5823 /* Suppose that truncation from the machine mode of X to MODE is not a
5824 no-op. See if there is anything special about X so that we can
5825 assume it already contains a truncated value of MODE. */
5826
5827 bool
truncated_to_mode(machine_mode mode,const_rtx x)5828 truncated_to_mode (machine_mode mode, const_rtx x)
5829 {
5830 /* This register has already been used in MODE without explicit
5831 truncation. */
5832 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
5833 return true;
5834
5835 /* See if we already satisfy the requirements of MODE. If yes we
5836 can just switch to MODE. */
5837 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
5838 && (num_sign_bit_copies (x, GET_MODE (x))
5839 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
5840 return true;
5841
5842 return false;
5843 }
5844
5845 /* Return true if RTX code CODE has a single sequence of zero or more
5846 "e" operands and no rtvec operands. Initialize its rtx_all_subrtx_bounds
5847 entry in that case. */
5848
5849 static bool
setup_reg_subrtx_bounds(unsigned int code)5850 setup_reg_subrtx_bounds (unsigned int code)
5851 {
5852 const char *format = GET_RTX_FORMAT ((enum rtx_code) code);
5853 unsigned int i = 0;
5854 for (; format[i] != 'e'; ++i)
5855 {
5856 if (!format[i])
5857 /* No subrtxes. Leave start and count as 0. */
5858 return true;
5859 if (format[i] == 'E' || format[i] == 'V')
5860 return false;
5861 }
5862
5863 /* Record the sequence of 'e's. */
5864 rtx_all_subrtx_bounds[code].start = i;
5865 do
5866 ++i;
5867 while (format[i] == 'e');
5868 rtx_all_subrtx_bounds[code].count = i - rtx_all_subrtx_bounds[code].start;
5869 /* rtl-iter.h relies on this. */
5870 gcc_checking_assert (rtx_all_subrtx_bounds[code].count <= 3);
5871
5872 for (; format[i]; ++i)
5873 if (format[i] == 'E' || format[i] == 'V' || format[i] == 'e')
5874 return false;
5875
5876 return true;
5877 }
5878
5879 /* Initialize rtx_all_subrtx_bounds. */
5880 void
init_rtlanal(void)5881 init_rtlanal (void)
5882 {
5883 int i;
5884 for (i = 0; i < NUM_RTX_CODE; i++)
5885 {
5886 if (!setup_reg_subrtx_bounds (i))
5887 rtx_all_subrtx_bounds[i].count = UCHAR_MAX;
5888 if (GET_RTX_CLASS (i) != RTX_CONST_OBJ)
5889 rtx_nonconst_subrtx_bounds[i] = rtx_all_subrtx_bounds[i];
5890 }
5891
5892 init_num_sign_bit_copies_in_rep ();
5893 }
5894
5895 /* Check whether this is a constant pool constant. */
5896 bool
constant_pool_constant_p(rtx x)5897 constant_pool_constant_p (rtx x)
5898 {
5899 x = avoid_constant_pool_reference (x);
5900 return CONST_DOUBLE_P (x);
5901 }
5902
5903 /* If M is a bitmask that selects a field of low-order bits within an item but
5904 not the entire word, return the length of the field. Return -1 otherwise.
5905 M is used in machine mode MODE. */
5906
5907 int
low_bitmask_len(machine_mode mode,unsigned HOST_WIDE_INT m)5908 low_bitmask_len (machine_mode mode, unsigned HOST_WIDE_INT m)
5909 {
5910 if (mode != VOIDmode)
5911 {
5912 if (!HWI_COMPUTABLE_MODE_P (mode))
5913 return -1;
5914 m &= GET_MODE_MASK (mode);
5915 }
5916
5917 return exact_log2 (m + 1);
5918 }
5919
5920 /* Return the mode of MEM's address. */
5921
5922 scalar_int_mode
get_address_mode(rtx mem)5923 get_address_mode (rtx mem)
5924 {
5925 machine_mode mode;
5926
5927 gcc_assert (MEM_P (mem));
5928 mode = GET_MODE (XEXP (mem, 0));
5929 if (mode != VOIDmode)
5930 return as_a <scalar_int_mode> (mode);
5931 return targetm.addr_space.address_mode (MEM_ADDR_SPACE (mem));
5932 }
5933
5934 /* Split up a CONST_DOUBLE or integer constant rtx
5935 into two rtx's for single words,
5936 storing in *FIRST the word that comes first in memory in the target
5937 and in *SECOND the other.
5938
5939 TODO: This function needs to be rewritten to work on any size
5940 integer. */
5941
5942 void
split_double(rtx value,rtx * first,rtx * second)5943 split_double (rtx value, rtx *first, rtx *second)
5944 {
5945 if (CONST_INT_P (value))
5946 {
5947 if (HOST_BITS_PER_WIDE_INT >= (2 * BITS_PER_WORD))
5948 {
5949 /* In this case the CONST_INT holds both target words.
5950 Extract the bits from it into two word-sized pieces.
5951 Sign extend each half to HOST_WIDE_INT. */
5952 unsigned HOST_WIDE_INT low, high;
5953 unsigned HOST_WIDE_INT mask, sign_bit, sign_extend;
5954 unsigned bits_per_word = BITS_PER_WORD;
5955
5956 /* Set sign_bit to the most significant bit of a word. */
5957 sign_bit = 1;
5958 sign_bit <<= bits_per_word - 1;
5959
5960 /* Set mask so that all bits of the word are set. We could
5961 have used 1 << BITS_PER_WORD instead of basing the
5962 calculation on sign_bit. However, on machines where
5963 HOST_BITS_PER_WIDE_INT == BITS_PER_WORD, it could cause a
5964 compiler warning, even though the code would never be
5965 executed. */
5966 mask = sign_bit << 1;
5967 mask--;
5968
5969 /* Set sign_extend as any remaining bits. */
5970 sign_extend = ~mask;
5971
5972 /* Pick the lower word and sign-extend it. */
5973 low = INTVAL (value);
5974 low &= mask;
5975 if (low & sign_bit)
5976 low |= sign_extend;
5977
5978 /* Pick the higher word, shifted to the least significant
5979 bits, and sign-extend it. */
5980 high = INTVAL (value);
5981 high >>= bits_per_word - 1;
5982 high >>= 1;
5983 high &= mask;
5984 if (high & sign_bit)
5985 high |= sign_extend;
5986
5987 /* Store the words in the target machine order. */
5988 if (WORDS_BIG_ENDIAN)
5989 {
5990 *first = GEN_INT (high);
5991 *second = GEN_INT (low);
5992 }
5993 else
5994 {
5995 *first = GEN_INT (low);
5996 *second = GEN_INT (high);
5997 }
5998 }
5999 else
6000 {
6001 /* The rule for using CONST_INT for a wider mode
6002 is that we regard the value as signed.
6003 So sign-extend it. */
6004 rtx high = (INTVAL (value) < 0 ? constm1_rtx : const0_rtx);
6005 if (WORDS_BIG_ENDIAN)
6006 {
6007 *first = high;
6008 *second = value;
6009 }
6010 else
6011 {
6012 *first = value;
6013 *second = high;
6014 }
6015 }
6016 }
6017 else if (GET_CODE (value) == CONST_WIDE_INT)
6018 {
6019 /* All of this is scary code and needs to be converted to
6020 properly work with any size integer. */
6021 gcc_assert (CONST_WIDE_INT_NUNITS (value) == 2);
6022 if (WORDS_BIG_ENDIAN)
6023 {
6024 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6025 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6026 }
6027 else
6028 {
6029 *first = GEN_INT (CONST_WIDE_INT_ELT (value, 0));
6030 *second = GEN_INT (CONST_WIDE_INT_ELT (value, 1));
6031 }
6032 }
6033 else if (!CONST_DOUBLE_P (value))
6034 {
6035 if (WORDS_BIG_ENDIAN)
6036 {
6037 *first = const0_rtx;
6038 *second = value;
6039 }
6040 else
6041 {
6042 *first = value;
6043 *second = const0_rtx;
6044 }
6045 }
6046 else if (GET_MODE (value) == VOIDmode
6047 /* This is the old way we did CONST_DOUBLE integers. */
6048 || GET_MODE_CLASS (GET_MODE (value)) == MODE_INT)
6049 {
6050 /* In an integer, the words are defined as most and least significant.
6051 So order them by the target's convention. */
6052 if (WORDS_BIG_ENDIAN)
6053 {
6054 *first = GEN_INT (CONST_DOUBLE_HIGH (value));
6055 *second = GEN_INT (CONST_DOUBLE_LOW (value));
6056 }
6057 else
6058 {
6059 *first = GEN_INT (CONST_DOUBLE_LOW (value));
6060 *second = GEN_INT (CONST_DOUBLE_HIGH (value));
6061 }
6062 }
6063 else
6064 {
6065 long l[2];
6066
6067 /* Note, this converts the REAL_VALUE_TYPE to the target's
6068 format, splits up the floating point double and outputs
6069 exactly 32 bits of it into each of l[0] and l[1] --
6070 not necessarily BITS_PER_WORD bits. */
6071 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (value), l);
6072
6073 /* If 32 bits is an entire word for the target, but not for the host,
6074 then sign-extend on the host so that the number will look the same
6075 way on the host that it would on the target. See for instance
6076 simplify_unary_operation. The #if is needed to avoid compiler
6077 warnings. */
6078
6079 #if HOST_BITS_PER_LONG > 32
6080 if (BITS_PER_WORD < HOST_BITS_PER_LONG && BITS_PER_WORD == 32)
6081 {
6082 if (l[0] & ((long) 1 << 31))
6083 l[0] |= ((unsigned long) (-1) << 32);
6084 if (l[1] & ((long) 1 << 31))
6085 l[1] |= ((unsigned long) (-1) << 32);
6086 }
6087 #endif
6088
6089 *first = GEN_INT (l[0]);
6090 *second = GEN_INT (l[1]);
6091 }
6092 }
6093
6094 /* Return true if X is a sign_extract or zero_extract from the least
6095 significant bit. */
6096
6097 static bool
lsb_bitfield_op_p(rtx x)6098 lsb_bitfield_op_p (rtx x)
6099 {
6100 if (GET_RTX_CLASS (GET_CODE (x)) == RTX_BITFIELD_OPS)
6101 {
6102 machine_mode mode = GET_MODE (XEXP (x, 0));
6103 HOST_WIDE_INT len = INTVAL (XEXP (x, 1));
6104 HOST_WIDE_INT pos = INTVAL (XEXP (x, 2));
6105 poly_int64 remaining_bits = GET_MODE_PRECISION (mode) - len;
6106
6107 return known_eq (pos, BITS_BIG_ENDIAN ? remaining_bits : 0);
6108 }
6109 return false;
6110 }
6111
6112 /* Strip outer address "mutations" from LOC and return a pointer to the
6113 inner value. If OUTER_CODE is nonnull, store the code of the innermost
6114 stripped expression there.
6115
6116 "Mutations" either convert between modes or apply some kind of
6117 extension, truncation or alignment. */
6118
6119 rtx *
strip_address_mutations(rtx * loc,enum rtx_code * outer_code)6120 strip_address_mutations (rtx *loc, enum rtx_code *outer_code)
6121 {
6122 for (;;)
6123 {
6124 enum rtx_code code = GET_CODE (*loc);
6125 if (GET_RTX_CLASS (code) == RTX_UNARY)
6126 /* Things like SIGN_EXTEND, ZERO_EXTEND and TRUNCATE can be
6127 used to convert between pointer sizes. */
6128 loc = &XEXP (*loc, 0);
6129 else if (lsb_bitfield_op_p (*loc))
6130 /* A [SIGN|ZERO]_EXTRACT from the least significant bit effectively
6131 acts as a combined truncation and extension. */
6132 loc = &XEXP (*loc, 0);
6133 else if (code == AND && CONST_INT_P (XEXP (*loc, 1)))
6134 /* (and ... (const_int -X)) is used to align to X bytes. */
6135 loc = &XEXP (*loc, 0);
6136 else if (code == SUBREG
6137 && !OBJECT_P (SUBREG_REG (*loc))
6138 && subreg_lowpart_p (*loc))
6139 /* (subreg (operator ...) ...) inside and is used for mode
6140 conversion too. */
6141 loc = &SUBREG_REG (*loc);
6142 else
6143 return loc;
6144 if (outer_code)
6145 *outer_code = code;
6146 }
6147 }
6148
6149 /* Return true if CODE applies some kind of scale. The scaled value is
6150 is the first operand and the scale is the second. */
6151
6152 static bool
binary_scale_code_p(enum rtx_code code)6153 binary_scale_code_p (enum rtx_code code)
6154 {
6155 return (code == MULT
6156 || code == ASHIFT
6157 /* Needed by ARM targets. */
6158 || code == ASHIFTRT
6159 || code == LSHIFTRT
6160 || code == ROTATE
6161 || code == ROTATERT);
6162 }
6163
6164 /* If *INNER can be interpreted as a base, return a pointer to the inner term
6165 (see address_info). Return null otherwise. */
6166
6167 static rtx *
get_base_term(rtx * inner)6168 get_base_term (rtx *inner)
6169 {
6170 if (GET_CODE (*inner) == LO_SUM)
6171 inner = strip_address_mutations (&XEXP (*inner, 0));
6172 if (REG_P (*inner)
6173 || MEM_P (*inner)
6174 || GET_CODE (*inner) == SUBREG
6175 || GET_CODE (*inner) == SCRATCH)
6176 return inner;
6177 return 0;
6178 }
6179
6180 /* If *INNER can be interpreted as an index, return a pointer to the inner term
6181 (see address_info). Return null otherwise. */
6182
6183 static rtx *
get_index_term(rtx * inner)6184 get_index_term (rtx *inner)
6185 {
6186 /* At present, only constant scales are allowed. */
6187 if (binary_scale_code_p (GET_CODE (*inner)) && CONSTANT_P (XEXP (*inner, 1)))
6188 inner = strip_address_mutations (&XEXP (*inner, 0));
6189 if (REG_P (*inner)
6190 || MEM_P (*inner)
6191 || GET_CODE (*inner) == SUBREG
6192 || GET_CODE (*inner) == SCRATCH)
6193 return inner;
6194 return 0;
6195 }
6196
6197 /* Set the segment part of address INFO to LOC, given that INNER is the
6198 unmutated value. */
6199
6200 static void
set_address_segment(struct address_info * info,rtx * loc,rtx * inner)6201 set_address_segment (struct address_info *info, rtx *loc, rtx *inner)
6202 {
6203 gcc_assert (!info->segment);
6204 info->segment = loc;
6205 info->segment_term = inner;
6206 }
6207
6208 /* Set the base part of address INFO to LOC, given that INNER is the
6209 unmutated value. */
6210
6211 static void
set_address_base(struct address_info * info,rtx * loc,rtx * inner)6212 set_address_base (struct address_info *info, rtx *loc, rtx *inner)
6213 {
6214 gcc_assert (!info->base);
6215 info->base = loc;
6216 info->base_term = inner;
6217 }
6218
6219 /* Set the index part of address INFO to LOC, given that INNER is the
6220 unmutated value. */
6221
6222 static void
set_address_index(struct address_info * info,rtx * loc,rtx * inner)6223 set_address_index (struct address_info *info, rtx *loc, rtx *inner)
6224 {
6225 gcc_assert (!info->index);
6226 info->index = loc;
6227 info->index_term = inner;
6228 }
6229
6230 /* Set the displacement part of address INFO to LOC, given that INNER
6231 is the constant term. */
6232
6233 static void
set_address_disp(struct address_info * info,rtx * loc,rtx * inner)6234 set_address_disp (struct address_info *info, rtx *loc, rtx *inner)
6235 {
6236 gcc_assert (!info->disp);
6237 info->disp = loc;
6238 info->disp_term = inner;
6239 }
6240
6241 /* INFO->INNER describes a {PRE,POST}_{INC,DEC} address. Set up the
6242 rest of INFO accordingly. */
6243
6244 static void
decompose_incdec_address(struct address_info * info)6245 decompose_incdec_address (struct address_info *info)
6246 {
6247 info->autoinc_p = true;
6248
6249 rtx *base = &XEXP (*info->inner, 0);
6250 set_address_base (info, base, base);
6251 gcc_checking_assert (info->base == info->base_term);
6252
6253 /* These addresses are only valid when the size of the addressed
6254 value is known. */
6255 gcc_checking_assert (info->mode != VOIDmode);
6256 }
6257
6258 /* INFO->INNER describes a {PRE,POST}_MODIFY address. Set up the rest
6259 of INFO accordingly. */
6260
6261 static void
decompose_automod_address(struct address_info * info)6262 decompose_automod_address (struct address_info *info)
6263 {
6264 info->autoinc_p = true;
6265
6266 rtx *base = &XEXP (*info->inner, 0);
6267 set_address_base (info, base, base);
6268 gcc_checking_assert (info->base == info->base_term);
6269
6270 rtx plus = XEXP (*info->inner, 1);
6271 gcc_assert (GET_CODE (plus) == PLUS);
6272
6273 info->base_term2 = &XEXP (plus, 0);
6274 gcc_checking_assert (rtx_equal_p (*info->base_term, *info->base_term2));
6275
6276 rtx *step = &XEXP (plus, 1);
6277 rtx *inner_step = strip_address_mutations (step);
6278 if (CONSTANT_P (*inner_step))
6279 set_address_disp (info, step, inner_step);
6280 else
6281 set_address_index (info, step, inner_step);
6282 }
6283
6284 /* Treat *LOC as a tree of PLUS operands and store pointers to the summed
6285 values in [PTR, END). Return a pointer to the end of the used array. */
6286
6287 static rtx **
extract_plus_operands(rtx * loc,rtx ** ptr,rtx ** end)6288 extract_plus_operands (rtx *loc, rtx **ptr, rtx **end)
6289 {
6290 rtx x = *loc;
6291 if (GET_CODE (x) == PLUS)
6292 {
6293 ptr = extract_plus_operands (&XEXP (x, 0), ptr, end);
6294 ptr = extract_plus_operands (&XEXP (x, 1), ptr, end);
6295 }
6296 else
6297 {
6298 gcc_assert (ptr != end);
6299 *ptr++ = loc;
6300 }
6301 return ptr;
6302 }
6303
6304 /* Evaluate the likelihood of X being a base or index value, returning
6305 positive if it is likely to be a base, negative if it is likely to be
6306 an index, and 0 if we can't tell. Make the magnitude of the return
6307 value reflect the amount of confidence we have in the answer.
6308
6309 MODE, AS, OUTER_CODE and INDEX_CODE are as for ok_for_base_p_1. */
6310
6311 static int
baseness(rtx x,machine_mode mode,addr_space_t as,enum rtx_code outer_code,enum rtx_code index_code)6312 baseness (rtx x, machine_mode mode, addr_space_t as,
6313 enum rtx_code outer_code, enum rtx_code index_code)
6314 {
6315 /* Believe *_POINTER unless the address shape requires otherwise. */
6316 if (REG_P (x) && REG_POINTER (x))
6317 return 2;
6318 if (MEM_P (x) && MEM_POINTER (x))
6319 return 2;
6320
6321 if (REG_P (x) && HARD_REGISTER_P (x))
6322 {
6323 /* X is a hard register. If it only fits one of the base
6324 or index classes, choose that interpretation. */
6325 int regno = REGNO (x);
6326 bool base_p = ok_for_base_p_1 (regno, mode, as, outer_code, index_code);
6327 bool index_p = REGNO_OK_FOR_INDEX_P (regno);
6328 if (base_p != index_p)
6329 return base_p ? 1 : -1;
6330 }
6331 return 0;
6332 }
6333
6334 /* INFO->INNER describes a normal, non-automodified address.
6335 Fill in the rest of INFO accordingly. */
6336
6337 static void
decompose_normal_address(struct address_info * info)6338 decompose_normal_address (struct address_info *info)
6339 {
6340 /* Treat the address as the sum of up to four values. */
6341 rtx *ops[4];
6342 size_t n_ops = extract_plus_operands (info->inner, ops,
6343 ops + ARRAY_SIZE (ops)) - ops;
6344
6345 /* If there is more than one component, any base component is in a PLUS. */
6346 if (n_ops > 1)
6347 info->base_outer_code = PLUS;
6348
6349 /* Try to classify each sum operand now. Leave those that could be
6350 either a base or an index in OPS. */
6351 rtx *inner_ops[4];
6352 size_t out = 0;
6353 for (size_t in = 0; in < n_ops; ++in)
6354 {
6355 rtx *loc = ops[in];
6356 rtx *inner = strip_address_mutations (loc);
6357 if (CONSTANT_P (*inner))
6358 set_address_disp (info, loc, inner);
6359 else if (GET_CODE (*inner) == UNSPEC)
6360 set_address_segment (info, loc, inner);
6361 else
6362 {
6363 /* The only other possibilities are a base or an index. */
6364 rtx *base_term = get_base_term (inner);
6365 rtx *index_term = get_index_term (inner);
6366 gcc_assert (base_term || index_term);
6367 if (!base_term)
6368 set_address_index (info, loc, index_term);
6369 else if (!index_term)
6370 set_address_base (info, loc, base_term);
6371 else
6372 {
6373 gcc_assert (base_term == index_term);
6374 ops[out] = loc;
6375 inner_ops[out] = base_term;
6376 ++out;
6377 }
6378 }
6379 }
6380
6381 /* Classify the remaining OPS members as bases and indexes. */
6382 if (out == 1)
6383 {
6384 /* If we haven't seen a base or an index yet, assume that this is
6385 the base. If we were confident that another term was the base
6386 or index, treat the remaining operand as the other kind. */
6387 if (!info->base)
6388 set_address_base (info, ops[0], inner_ops[0]);
6389 else
6390 set_address_index (info, ops[0], inner_ops[0]);
6391 }
6392 else if (out == 2)
6393 {
6394 /* In the event of a tie, assume the base comes first. */
6395 if (baseness (*inner_ops[0], info->mode, info->as, PLUS,
6396 GET_CODE (*ops[1]))
6397 >= baseness (*inner_ops[1], info->mode, info->as, PLUS,
6398 GET_CODE (*ops[0])))
6399 {
6400 set_address_base (info, ops[0], inner_ops[0]);
6401 set_address_index (info, ops[1], inner_ops[1]);
6402 }
6403 else
6404 {
6405 set_address_base (info, ops[1], inner_ops[1]);
6406 set_address_index (info, ops[0], inner_ops[0]);
6407 }
6408 }
6409 else
6410 gcc_assert (out == 0);
6411 }
6412
6413 /* Describe address *LOC in *INFO. MODE is the mode of the addressed value,
6414 or VOIDmode if not known. AS is the address space associated with LOC.
6415 OUTER_CODE is MEM if *LOC is a MEM address and ADDRESS otherwise. */
6416
6417 void
decompose_address(struct address_info * info,rtx * loc,machine_mode mode,addr_space_t as,enum rtx_code outer_code)6418 decompose_address (struct address_info *info, rtx *loc, machine_mode mode,
6419 addr_space_t as, enum rtx_code outer_code)
6420 {
6421 memset (info, 0, sizeof (*info));
6422 info->mode = mode;
6423 info->as = as;
6424 info->addr_outer_code = outer_code;
6425 info->outer = loc;
6426 info->inner = strip_address_mutations (loc, &outer_code);
6427 info->base_outer_code = outer_code;
6428 switch (GET_CODE (*info->inner))
6429 {
6430 case PRE_DEC:
6431 case PRE_INC:
6432 case POST_DEC:
6433 case POST_INC:
6434 decompose_incdec_address (info);
6435 break;
6436
6437 case PRE_MODIFY:
6438 case POST_MODIFY:
6439 decompose_automod_address (info);
6440 break;
6441
6442 default:
6443 decompose_normal_address (info);
6444 break;
6445 }
6446 }
6447
6448 /* Describe address operand LOC in INFO. */
6449
6450 void
decompose_lea_address(struct address_info * info,rtx * loc)6451 decompose_lea_address (struct address_info *info, rtx *loc)
6452 {
6453 decompose_address (info, loc, VOIDmode, ADDR_SPACE_GENERIC, ADDRESS);
6454 }
6455
6456 /* Describe the address of MEM X in INFO. */
6457
6458 void
decompose_mem_address(struct address_info * info,rtx x)6459 decompose_mem_address (struct address_info *info, rtx x)
6460 {
6461 gcc_assert (MEM_P (x));
6462 decompose_address (info, &XEXP (x, 0), GET_MODE (x),
6463 MEM_ADDR_SPACE (x), MEM);
6464 }
6465
6466 /* Update INFO after a change to the address it describes. */
6467
6468 void
update_address(struct address_info * info)6469 update_address (struct address_info *info)
6470 {
6471 decompose_address (info, info->outer, info->mode, info->as,
6472 info->addr_outer_code);
6473 }
6474
6475 /* Return the scale applied to *INFO->INDEX_TERM, or 0 if the index is
6476 more complicated than that. */
6477
6478 HOST_WIDE_INT
get_index_scale(const struct address_info * info)6479 get_index_scale (const struct address_info *info)
6480 {
6481 rtx index = *info->index;
6482 if (GET_CODE (index) == MULT
6483 && CONST_INT_P (XEXP (index, 1))
6484 && info->index_term == &XEXP (index, 0))
6485 return INTVAL (XEXP (index, 1));
6486
6487 if (GET_CODE (index) == ASHIFT
6488 && CONST_INT_P (XEXP (index, 1))
6489 && info->index_term == &XEXP (index, 0))
6490 return HOST_WIDE_INT_1 << INTVAL (XEXP (index, 1));
6491
6492 if (info->index == info->index_term)
6493 return 1;
6494
6495 return 0;
6496 }
6497
6498 /* Return the "index code" of INFO, in the form required by
6499 ok_for_base_p_1. */
6500
6501 enum rtx_code
get_index_code(const struct address_info * info)6502 get_index_code (const struct address_info *info)
6503 {
6504 if (info->index)
6505 return GET_CODE (*info->index);
6506
6507 if (info->disp)
6508 return GET_CODE (*info->disp);
6509
6510 return SCRATCH;
6511 }
6512
6513 /* Return true if RTL X contains a SYMBOL_REF. */
6514
6515 bool
contains_symbol_ref_p(const_rtx x)6516 contains_symbol_ref_p (const_rtx x)
6517 {
6518 subrtx_iterator::array_type array;
6519 FOR_EACH_SUBRTX (iter, array, x, ALL)
6520 if (SYMBOL_REF_P (*iter))
6521 return true;
6522
6523 return false;
6524 }
6525
6526 /* Return true if RTL X contains a SYMBOL_REF or LABEL_REF. */
6527
6528 bool
contains_symbolic_reference_p(const_rtx x)6529 contains_symbolic_reference_p (const_rtx x)
6530 {
6531 subrtx_iterator::array_type array;
6532 FOR_EACH_SUBRTX (iter, array, x, ALL)
6533 if (SYMBOL_REF_P (*iter) || GET_CODE (*iter) == LABEL_REF)
6534 return true;
6535
6536 return false;
6537 }
6538
6539 /* Return true if X contains a thread-local symbol. */
6540
6541 bool
tls_referenced_p(const_rtx x)6542 tls_referenced_p (const_rtx x)
6543 {
6544 if (!targetm.have_tls)
6545 return false;
6546
6547 subrtx_iterator::array_type array;
6548 FOR_EACH_SUBRTX (iter, array, x, ALL)
6549 if (GET_CODE (*iter) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (*iter) != 0)
6550 return true;
6551 return false;
6552 }
6553