1 /* Analyze RTL for GNU compiler.
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software
4 Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 2, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to the Free
20 Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
21 02110-1301, USA. */
22
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "toplev.h"
29 #include "rtl.h"
30 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "recog.h"
33 #include "target.h"
34 #include "output.h"
35 #include "tm_p.h"
36 #include "flags.h"
37 #include "real.h"
38 #include "regs.h"
39 #include "function.h"
40
41 /* Forward declarations */
42 static void set_of_1 (rtx, rtx, void *);
43 static bool covers_regno_p (rtx, unsigned int);
44 static bool covers_regno_no_parallel_p (rtx, unsigned int);
45 static int rtx_referenced_p_1 (rtx *, void *);
46 static int computed_jump_p_1 (rtx);
47 static void parms_set (rtx, rtx, void *);
48
49 static unsigned HOST_WIDE_INT cached_nonzero_bits (rtx, enum machine_mode,
50 rtx, enum machine_mode,
51 unsigned HOST_WIDE_INT);
52 static unsigned HOST_WIDE_INT nonzero_bits1 (rtx, enum machine_mode, rtx,
53 enum machine_mode,
54 unsigned HOST_WIDE_INT);
55 static unsigned int cached_num_sign_bit_copies (rtx, enum machine_mode, rtx,
56 enum machine_mode,
57 unsigned int);
58 static unsigned int num_sign_bit_copies1 (rtx, enum machine_mode, rtx,
59 enum machine_mode, unsigned int);
60
61 /* Offset of the first 'e', 'E' or 'V' operand for each rtx code, or
62 -1 if a code has no such operand. */
63 static int non_rtx_starting_operands[NUM_RTX_CODE];
64
65 /* Bit flags that specify the machine subtype we are compiling for.
66 Bits are tested using macros TARGET_... defined in the tm.h file
67 and set by `-m...' switches. Must be defined in rtlanal.c. */
68
69 int target_flags;
70
71 /* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
72 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
73 SIGN_EXTEND then while narrowing we also have to enforce the
74 representation and sign-extend the value to mode DESTINATION_REP.
75
76 If the value is already sign-extended to DESTINATION_REP mode we
77 can just switch to DESTINATION mode on it. For each pair of
78 integral modes SOURCE and DESTINATION, when truncating from SOURCE
79 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
80 contains the number of high-order bits in SOURCE that have to be
81 copies of the sign-bit so that we can do this mode-switch to
82 DESTINATION. */
83
84 static unsigned int
85 num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
86
87 /* Return 1 if the value of X is unstable
88 (would be different at a different point in the program).
89 The frame pointer, arg pointer, etc. are considered stable
90 (within one function) and so is anything marked `unchanging'. */
91
92 int
rtx_unstable_p(rtx x)93 rtx_unstable_p (rtx x)
94 {
95 RTX_CODE code = GET_CODE (x);
96 int i;
97 const char *fmt;
98
99 switch (code)
100 {
101 case MEM:
102 return !MEM_READONLY_P (x) || rtx_unstable_p (XEXP (x, 0));
103
104 case CONST:
105 case CONST_INT:
106 case CONST_DOUBLE:
107 case CONST_VECTOR:
108 case SYMBOL_REF:
109 case LABEL_REF:
110 return 0;
111
112 case REG:
113 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
114 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
115 /* The arg pointer varies if it is not a fixed register. */
116 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
117 return 0;
118 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
119 /* ??? When call-clobbered, the value is stable modulo the restore
120 that must happen after a call. This currently screws up local-alloc
121 into believing that the restore is not needed. */
122 if (x == pic_offset_table_rtx)
123 return 0;
124 #endif
125 return 1;
126
127 case ASM_OPERANDS:
128 if (MEM_VOLATILE_P (x))
129 return 1;
130
131 /* Fall through. */
132
133 default:
134 break;
135 }
136
137 fmt = GET_RTX_FORMAT (code);
138 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
139 if (fmt[i] == 'e')
140 {
141 if (rtx_unstable_p (XEXP (x, i)))
142 return 1;
143 }
144 else if (fmt[i] == 'E')
145 {
146 int j;
147 for (j = 0; j < XVECLEN (x, i); j++)
148 if (rtx_unstable_p (XVECEXP (x, i, j)))
149 return 1;
150 }
151
152 return 0;
153 }
154
155 /* Return 1 if X has a value that can vary even between two
156 executions of the program. 0 means X can be compared reliably
157 against certain constants or near-constants.
158 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
159 zero, we are slightly more conservative.
160 The frame pointer and the arg pointer are considered constant. */
161
162 int
rtx_varies_p(rtx x,int for_alias)163 rtx_varies_p (rtx x, int for_alias)
164 {
165 RTX_CODE code;
166 int i;
167 const char *fmt;
168
169 if (!x)
170 return 0;
171
172 code = GET_CODE (x);
173 switch (code)
174 {
175 case MEM:
176 return !MEM_READONLY_P (x) || rtx_varies_p (XEXP (x, 0), for_alias);
177
178 case CONST:
179 case CONST_INT:
180 case CONST_DOUBLE:
181 case CONST_VECTOR:
182 case SYMBOL_REF:
183 case LABEL_REF:
184 return 0;
185
186 case REG:
187 /* Note that we have to test for the actual rtx used for the frame
188 and arg pointers and not just the register number in case we have
189 eliminated the frame and/or arg pointer and are using it
190 for pseudos. */
191 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
192 /* The arg pointer varies if it is not a fixed register. */
193 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
194 return 0;
195 if (x == pic_offset_table_rtx
196 #ifdef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED
197 /* ??? When call-clobbered, the value is stable modulo the restore
198 that must happen after a call. This currently screws up
199 local-alloc into believing that the restore is not needed, so we
200 must return 0 only if we are called from alias analysis. */
201 && for_alias
202 #endif
203 )
204 return 0;
205 return 1;
206
207 case LO_SUM:
208 /* The operand 0 of a LO_SUM is considered constant
209 (in fact it is related specifically to operand 1)
210 during alias analysis. */
211 return (! for_alias && rtx_varies_p (XEXP (x, 0), for_alias))
212 || rtx_varies_p (XEXP (x, 1), for_alias);
213
214 case ASM_OPERANDS:
215 if (MEM_VOLATILE_P (x))
216 return 1;
217
218 /* Fall through. */
219
220 default:
221 break;
222 }
223
224 fmt = GET_RTX_FORMAT (code);
225 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
226 if (fmt[i] == 'e')
227 {
228 if (rtx_varies_p (XEXP (x, i), for_alias))
229 return 1;
230 }
231 else if (fmt[i] == 'E')
232 {
233 int j;
234 for (j = 0; j < XVECLEN (x, i); j++)
235 if (rtx_varies_p (XVECEXP (x, i, j), for_alias))
236 return 1;
237 }
238
239 return 0;
240 }
241
242 /* Return nonzero if the use of X as an address in a MEM can cause a trap.
243 MODE is the mode of the MEM (not that of X) and UNALIGNED_MEMS controls
244 whether nonzero is returned for unaligned memory accesses on strict
245 alignment machines. */
246
247 static int
rtx_addr_can_trap_p_1(rtx x,enum machine_mode mode,bool unaligned_mems)248 rtx_addr_can_trap_p_1 (rtx x, enum machine_mode mode, bool unaligned_mems)
249 {
250 enum rtx_code code = GET_CODE (x);
251
252 switch (code)
253 {
254 case SYMBOL_REF:
255 return SYMBOL_REF_WEAK (x);
256
257 case LABEL_REF:
258 return 0;
259
260 case REG:
261 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
262 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
263 || x == stack_pointer_rtx
264 /* The arg pointer varies if it is not a fixed register. */
265 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
266 return 0;
267 /* All of the virtual frame registers are stack references. */
268 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
269 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
270 return 0;
271 return 1;
272
273 case CONST:
274 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
275
276 case PLUS:
277 /* An address is assumed not to trap if:
278 - it is an address that can't trap plus a constant integer,
279 with the proper remainder modulo the mode size if we are
280 considering unaligned memory references. */
281 if (!rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems)
282 && GET_CODE (XEXP (x, 1)) == CONST_INT)
283 {
284 HOST_WIDE_INT offset;
285
286 if (!STRICT_ALIGNMENT
287 || !unaligned_mems
288 || GET_MODE_SIZE (mode) == 0)
289 return 0;
290
291 offset = INTVAL (XEXP (x, 1));
292
293 #ifdef SPARC_STACK_BOUNDARY_HACK
294 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
295 the real alignment of %sp. However, when it does this, the
296 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
297 if (SPARC_STACK_BOUNDARY_HACK
298 && (XEXP (x, 0) == stack_pointer_rtx
299 || XEXP (x, 0) == hard_frame_pointer_rtx))
300 offset -= STACK_POINTER_OFFSET;
301 #endif
302
303 return offset % GET_MODE_SIZE (mode) != 0;
304 }
305
306 /* - or it is the pic register plus a constant. */
307 if (XEXP (x, 0) == pic_offset_table_rtx && CONSTANT_P (XEXP (x, 1)))
308 return 0;
309
310 return 1;
311
312 case LO_SUM:
313 case PRE_MODIFY:
314 return rtx_addr_can_trap_p_1 (XEXP (x, 1), mode, unaligned_mems);
315
316 case PRE_DEC:
317 case PRE_INC:
318 case POST_DEC:
319 case POST_INC:
320 case POST_MODIFY:
321 return rtx_addr_can_trap_p_1 (XEXP (x, 0), mode, unaligned_mems);
322
323 default:
324 break;
325 }
326
327 /* If it isn't one of the case above, it can cause a trap. */
328 return 1;
329 }
330
331 /* Return nonzero if the use of X as an address in a MEM can cause a trap. */
332
333 int
rtx_addr_can_trap_p(rtx x)334 rtx_addr_can_trap_p (rtx x)
335 {
336 return rtx_addr_can_trap_p_1 (x, VOIDmode, false);
337 }
338
339 /* Return true if X is an address that is known to not be zero. */
340
341 bool
nonzero_address_p(rtx x)342 nonzero_address_p (rtx x)
343 {
344 enum rtx_code code = GET_CODE (x);
345
346 switch (code)
347 {
348 case SYMBOL_REF:
349 return !SYMBOL_REF_WEAK (x);
350
351 case LABEL_REF:
352 return true;
353
354 case REG:
355 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
356 if (x == frame_pointer_rtx || x == hard_frame_pointer_rtx
357 || x == stack_pointer_rtx
358 || (x == arg_pointer_rtx && fixed_regs[ARG_POINTER_REGNUM]))
359 return true;
360 /* All of the virtual frame registers are stack references. */
361 if (REGNO (x) >= FIRST_VIRTUAL_REGISTER
362 && REGNO (x) <= LAST_VIRTUAL_REGISTER)
363 return true;
364 return false;
365
366 case CONST:
367 return nonzero_address_p (XEXP (x, 0));
368
369 case PLUS:
370 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
371 return nonzero_address_p (XEXP (x, 0));
372 /* Handle PIC references. */
373 else if (XEXP (x, 0) == pic_offset_table_rtx
374 && CONSTANT_P (XEXP (x, 1)))
375 return true;
376 return false;
377
378 case PRE_MODIFY:
379 /* Similar to the above; allow positive offsets. Further, since
380 auto-inc is only allowed in memories, the register must be a
381 pointer. */
382 if (GET_CODE (XEXP (x, 1)) == CONST_INT
383 && INTVAL (XEXP (x, 1)) > 0)
384 return true;
385 return nonzero_address_p (XEXP (x, 0));
386
387 case PRE_INC:
388 /* Similarly. Further, the offset is always positive. */
389 return true;
390
391 case PRE_DEC:
392 case POST_DEC:
393 case POST_INC:
394 case POST_MODIFY:
395 return nonzero_address_p (XEXP (x, 0));
396
397 case LO_SUM:
398 return nonzero_address_p (XEXP (x, 1));
399
400 default:
401 break;
402 }
403
404 /* If it isn't one of the case above, might be zero. */
405 return false;
406 }
407
408 /* Return 1 if X refers to a memory location whose address
409 cannot be compared reliably with constant addresses,
410 or if X refers to a BLKmode memory object.
411 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
412 zero, we are slightly more conservative. */
413
414 int
rtx_addr_varies_p(rtx x,int for_alias)415 rtx_addr_varies_p (rtx x, int for_alias)
416 {
417 enum rtx_code code;
418 int i;
419 const char *fmt;
420
421 if (x == 0)
422 return 0;
423
424 code = GET_CODE (x);
425 if (code == MEM)
426 return GET_MODE (x) == BLKmode || rtx_varies_p (XEXP (x, 0), for_alias);
427
428 fmt = GET_RTX_FORMAT (code);
429 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
430 if (fmt[i] == 'e')
431 {
432 if (rtx_addr_varies_p (XEXP (x, i), for_alias))
433 return 1;
434 }
435 else if (fmt[i] == 'E')
436 {
437 int j;
438 for (j = 0; j < XVECLEN (x, i); j++)
439 if (rtx_addr_varies_p (XVECEXP (x, i, j), for_alias))
440 return 1;
441 }
442 return 0;
443 }
444
445 /* Return the value of the integer term in X, if one is apparent;
446 otherwise return 0.
447 Only obvious integer terms are detected.
448 This is used in cse.c with the `related_value' field. */
449
450 HOST_WIDE_INT
get_integer_term(rtx x)451 get_integer_term (rtx x)
452 {
453 if (GET_CODE (x) == CONST)
454 x = XEXP (x, 0);
455
456 if (GET_CODE (x) == MINUS
457 && GET_CODE (XEXP (x, 1)) == CONST_INT)
458 return - INTVAL (XEXP (x, 1));
459 if (GET_CODE (x) == PLUS
460 && GET_CODE (XEXP (x, 1)) == CONST_INT)
461 return INTVAL (XEXP (x, 1));
462 return 0;
463 }
464
465 /* If X is a constant, return the value sans apparent integer term;
466 otherwise return 0.
467 Only obvious integer terms are detected. */
468
469 rtx
get_related_value(rtx x)470 get_related_value (rtx x)
471 {
472 if (GET_CODE (x) != CONST)
473 return 0;
474 x = XEXP (x, 0);
475 if (GET_CODE (x) == PLUS
476 && GET_CODE (XEXP (x, 1)) == CONST_INT)
477 return XEXP (x, 0);
478 else if (GET_CODE (x) == MINUS
479 && GET_CODE (XEXP (x, 1)) == CONST_INT)
480 return XEXP (x, 0);
481 return 0;
482 }
483
484 /* Return the number of places FIND appears within X. If COUNT_DEST is
485 zero, we do not count occurrences inside the destination of a SET. */
486
487 int
count_occurrences(rtx x,rtx find,int count_dest)488 count_occurrences (rtx x, rtx find, int count_dest)
489 {
490 int i, j;
491 enum rtx_code code;
492 const char *format_ptr;
493 int count;
494
495 if (x == find)
496 return 1;
497
498 code = GET_CODE (x);
499
500 switch (code)
501 {
502 case REG:
503 case CONST_INT:
504 case CONST_DOUBLE:
505 case CONST_VECTOR:
506 case SYMBOL_REF:
507 case CODE_LABEL:
508 case PC:
509 case CC0:
510 return 0;
511
512 case MEM:
513 if (MEM_P (find) && rtx_equal_p (x, find))
514 return 1;
515 break;
516
517 case SET:
518 if (SET_DEST (x) == find && ! count_dest)
519 return count_occurrences (SET_SRC (x), find, count_dest);
520 break;
521
522 default:
523 break;
524 }
525
526 format_ptr = GET_RTX_FORMAT (code);
527 count = 0;
528
529 for (i = 0; i < GET_RTX_LENGTH (code); i++)
530 {
531 switch (*format_ptr++)
532 {
533 case 'e':
534 count += count_occurrences (XEXP (x, i), find, count_dest);
535 break;
536
537 case 'E':
538 for (j = 0; j < XVECLEN (x, i); j++)
539 count += count_occurrences (XVECEXP (x, i, j), find, count_dest);
540 break;
541 }
542 }
543 return count;
544 }
545
546 /* Nonzero if register REG appears somewhere within IN.
547 Also works if REG is not a register; in this case it checks
548 for a subexpression of IN that is Lisp "equal" to REG. */
549
550 int
reg_mentioned_p(rtx reg,rtx in)551 reg_mentioned_p (rtx reg, rtx in)
552 {
553 const char *fmt;
554 int i;
555 enum rtx_code code;
556
557 if (in == 0)
558 return 0;
559
560 if (reg == in)
561 return 1;
562
563 if (GET_CODE (in) == LABEL_REF)
564 return reg == XEXP (in, 0);
565
566 code = GET_CODE (in);
567
568 switch (code)
569 {
570 /* Compare registers by number. */
571 case REG:
572 return REG_P (reg) && REGNO (in) == REGNO (reg);
573
574 /* These codes have no constituent expressions
575 and are unique. */
576 case SCRATCH:
577 case CC0:
578 case PC:
579 return 0;
580
581 case CONST_INT:
582 case CONST_VECTOR:
583 case CONST_DOUBLE:
584 /* These are kept unique for a given value. */
585 return 0;
586
587 default:
588 break;
589 }
590
591 if (GET_CODE (reg) == code && rtx_equal_p (reg, in))
592 return 1;
593
594 fmt = GET_RTX_FORMAT (code);
595
596 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
597 {
598 if (fmt[i] == 'E')
599 {
600 int j;
601 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
602 if (reg_mentioned_p (reg, XVECEXP (in, i, j)))
603 return 1;
604 }
605 else if (fmt[i] == 'e'
606 && reg_mentioned_p (reg, XEXP (in, i)))
607 return 1;
608 }
609 return 0;
610 }
611
612 /* Return 1 if in between BEG and END, exclusive of BEG and END, there is
613 no CODE_LABEL insn. */
614
615 int
no_labels_between_p(rtx beg,rtx end)616 no_labels_between_p (rtx beg, rtx end)
617 {
618 rtx p;
619 if (beg == end)
620 return 0;
621 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
622 if (LABEL_P (p))
623 return 0;
624 return 1;
625 }
626
627 /* Nonzero if register REG is used in an insn between
628 FROM_INSN and TO_INSN (exclusive of those two). */
629
630 int
reg_used_between_p(rtx reg,rtx from_insn,rtx to_insn)631 reg_used_between_p (rtx reg, rtx from_insn, rtx to_insn)
632 {
633 rtx insn;
634
635 if (from_insn == to_insn)
636 return 0;
637
638 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
639 if (INSN_P (insn)
640 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
641 || (CALL_P (insn) && find_reg_fusage (insn, USE, reg))))
642 return 1;
643 return 0;
644 }
645
646 /* Nonzero if the old value of X, a register, is referenced in BODY. If X
647 is entirely replaced by a new value and the only use is as a SET_DEST,
648 we do not consider it a reference. */
649
650 int
reg_referenced_p(rtx x,rtx body)651 reg_referenced_p (rtx x, rtx body)
652 {
653 int i;
654
655 switch (GET_CODE (body))
656 {
657 case SET:
658 if (reg_overlap_mentioned_p (x, SET_SRC (body)))
659 return 1;
660
661 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
662 of a REG that occupies all of the REG, the insn references X if
663 it is mentioned in the destination. */
664 if (GET_CODE (SET_DEST (body)) != CC0
665 && GET_CODE (SET_DEST (body)) != PC
666 && !REG_P (SET_DEST (body))
667 && ! (GET_CODE (SET_DEST (body)) == SUBREG
668 && REG_P (SUBREG_REG (SET_DEST (body)))
669 && (((GET_MODE_SIZE (GET_MODE (SUBREG_REG (SET_DEST (body))))
670 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
671 == ((GET_MODE_SIZE (GET_MODE (SET_DEST (body)))
672 + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)))
673 && reg_overlap_mentioned_p (x, SET_DEST (body)))
674 return 1;
675 return 0;
676
677 case ASM_OPERANDS:
678 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
679 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)))
680 return 1;
681 return 0;
682
683 case CALL:
684 case USE:
685 case IF_THEN_ELSE:
686 return reg_overlap_mentioned_p (x, body);
687
688 case TRAP_IF:
689 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
690
691 case PREFETCH:
692 return reg_overlap_mentioned_p (x, XEXP (body, 0));
693
694 case UNSPEC:
695 case UNSPEC_VOLATILE:
696 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
697 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)))
698 return 1;
699 return 0;
700
701 case PARALLEL:
702 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
703 if (reg_referenced_p (x, XVECEXP (body, 0, i)))
704 return 1;
705 return 0;
706
707 case CLOBBER:
708 if (MEM_P (XEXP (body, 0)))
709 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)))
710 return 1;
711 return 0;
712
713 case COND_EXEC:
714 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)))
715 return 1;
716 return reg_referenced_p (x, COND_EXEC_CODE (body));
717
718 default:
719 return 0;
720 }
721 }
722
723 /* Nonzero if register REG is set or clobbered in an insn between
724 FROM_INSN and TO_INSN (exclusive of those two). */
725
726 int
reg_set_between_p(rtx reg,rtx from_insn,rtx to_insn)727 reg_set_between_p (rtx reg, rtx from_insn, rtx to_insn)
728 {
729 rtx insn;
730
731 if (from_insn == to_insn)
732 return 0;
733
734 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
735 if (INSN_P (insn) && reg_set_p (reg, insn))
736 return 1;
737 return 0;
738 }
739
740 /* Internals of reg_set_between_p. */
741 int
reg_set_p(rtx reg,rtx insn)742 reg_set_p (rtx reg, rtx insn)
743 {
744 /* We can be passed an insn or part of one. If we are passed an insn,
745 check if a side-effect of the insn clobbers REG. */
746 if (INSN_P (insn)
747 && (FIND_REG_INC_NOTE (insn, reg)
748 || (CALL_P (insn)
749 && ((REG_P (reg)
750 && REGNO (reg) < FIRST_PSEUDO_REGISTER
751 && TEST_HARD_REG_BIT (regs_invalidated_by_call,
752 REGNO (reg)))
753 || MEM_P (reg)
754 || find_reg_fusage (insn, CLOBBER, reg)))))
755 return 1;
756
757 return set_of (reg, insn) != NULL_RTX;
758 }
759
760 /* Similar to reg_set_between_p, but check all registers in X. Return 0
761 only if none of them are modified between START and END. Return 1 if
762 X contains a MEM; this routine does usememory aliasing. */
763
764 int
modified_between_p(rtx x,rtx start,rtx end)765 modified_between_p (rtx x, rtx start, rtx end)
766 {
767 enum rtx_code code = GET_CODE (x);
768 const char *fmt;
769 int i, j;
770 rtx insn;
771
772 if (start == end)
773 return 0;
774
775 switch (code)
776 {
777 case CONST_INT:
778 case CONST_DOUBLE:
779 case CONST_VECTOR:
780 case CONST:
781 case SYMBOL_REF:
782 case LABEL_REF:
783 return 0;
784
785 case PC:
786 case CC0:
787 return 1;
788
789 case MEM:
790 if (modified_between_p (XEXP (x, 0), start, end))
791 return 1;
792 if (MEM_READONLY_P (x))
793 return 0;
794 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
795 if (memory_modified_in_insn_p (x, insn))
796 return 1;
797 return 0;
798 break;
799
800 case REG:
801 return reg_set_between_p (x, start, end);
802
803 default:
804 break;
805 }
806
807 fmt = GET_RTX_FORMAT (code);
808 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
809 {
810 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i), start, end))
811 return 1;
812
813 else if (fmt[i] == 'E')
814 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
815 if (modified_between_p (XVECEXP (x, i, j), start, end))
816 return 1;
817 }
818
819 return 0;
820 }
821
822 /* Similar to reg_set_p, but check all registers in X. Return 0 only if none
823 of them are modified in INSN. Return 1 if X contains a MEM; this routine
824 does use memory aliasing. */
825
826 int
modified_in_p(rtx x,rtx insn)827 modified_in_p (rtx x, rtx insn)
828 {
829 enum rtx_code code = GET_CODE (x);
830 const char *fmt;
831 int i, j;
832
833 switch (code)
834 {
835 case CONST_INT:
836 case CONST_DOUBLE:
837 case CONST_VECTOR:
838 case CONST:
839 case SYMBOL_REF:
840 case LABEL_REF:
841 return 0;
842
843 case PC:
844 case CC0:
845 return 1;
846
847 case MEM:
848 if (modified_in_p (XEXP (x, 0), insn))
849 return 1;
850 if (MEM_READONLY_P (x))
851 return 0;
852 if (memory_modified_in_insn_p (x, insn))
853 return 1;
854 return 0;
855 break;
856
857 case REG:
858 return reg_set_p (x, insn);
859
860 default:
861 break;
862 }
863
864 fmt = GET_RTX_FORMAT (code);
865 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
866 {
867 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i), insn))
868 return 1;
869
870 else if (fmt[i] == 'E')
871 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
872 if (modified_in_p (XVECEXP (x, i, j), insn))
873 return 1;
874 }
875
876 return 0;
877 }
878
879 /* Helper function for set_of. */
880 struct set_of_data
881 {
882 rtx found;
883 rtx pat;
884 };
885
886 static void
set_of_1(rtx x,rtx pat,void * data1)887 set_of_1 (rtx x, rtx pat, void *data1)
888 {
889 struct set_of_data *data = (struct set_of_data *) (data1);
890 if (rtx_equal_p (x, data->pat)
891 || (!MEM_P (x) && reg_overlap_mentioned_p (data->pat, x)))
892 data->found = pat;
893 }
894
895 /* Give an INSN, return a SET or CLOBBER expression that does modify PAT
896 (either directly or via STRICT_LOW_PART and similar modifiers). */
897 rtx
set_of(rtx pat,rtx insn)898 set_of (rtx pat, rtx insn)
899 {
900 struct set_of_data data;
901 data.found = NULL_RTX;
902 data.pat = pat;
903 note_stores (INSN_P (insn) ? PATTERN (insn) : insn, set_of_1, &data);
904 return data.found;
905 }
906
907 /* Given an INSN, return a SET expression if this insn has only a single SET.
908 It may also have CLOBBERs, USEs, or SET whose output
909 will not be used, which we ignore. */
910
911 rtx
single_set_2(rtx insn,rtx pat)912 single_set_2 (rtx insn, rtx pat)
913 {
914 rtx set = NULL;
915 int set_verified = 1;
916 int i;
917
918 if (GET_CODE (pat) == PARALLEL)
919 {
920 for (i = 0; i < XVECLEN (pat, 0); i++)
921 {
922 rtx sub = XVECEXP (pat, 0, i);
923 switch (GET_CODE (sub))
924 {
925 case USE:
926 case CLOBBER:
927 break;
928
929 case SET:
930 /* We can consider insns having multiple sets, where all
931 but one are dead as single set insns. In common case
932 only single set is present in the pattern so we want
933 to avoid checking for REG_UNUSED notes unless necessary.
934
935 When we reach set first time, we just expect this is
936 the single set we are looking for and only when more
937 sets are found in the insn, we check them. */
938 if (!set_verified)
939 {
940 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set))
941 && !side_effects_p (set))
942 set = NULL;
943 else
944 set_verified = 1;
945 }
946 if (!set)
947 set = sub, set_verified = 0;
948 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub))
949 || side_effects_p (sub))
950 return NULL_RTX;
951 break;
952
953 default:
954 return NULL_RTX;
955 }
956 }
957 }
958 return set;
959 }
960
961 /* Given an INSN, return nonzero if it has more than one SET, else return
962 zero. */
963
964 int
multiple_sets(rtx insn)965 multiple_sets (rtx insn)
966 {
967 int found;
968 int i;
969
970 /* INSN must be an insn. */
971 if (! INSN_P (insn))
972 return 0;
973
974 /* Only a PARALLEL can have multiple SETs. */
975 if (GET_CODE (PATTERN (insn)) == PARALLEL)
976 {
977 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0); i++)
978 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == SET)
979 {
980 /* If we have already found a SET, then return now. */
981 if (found)
982 return 1;
983 else
984 found = 1;
985 }
986 }
987
988 /* Either zero or one SET. */
989 return 0;
990 }
991
992 /* Return nonzero if the destination of SET equals the source
993 and there are no side effects. */
994
995 int
set_noop_p(rtx set)996 set_noop_p (rtx set)
997 {
998 rtx src = SET_SRC (set);
999 rtx dst = SET_DEST (set);
1000
1001 if (dst == pc_rtx && src == pc_rtx)
1002 return 1;
1003
1004 if (MEM_P (dst) && MEM_P (src))
1005 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1006
1007 if (GET_CODE (dst) == ZERO_EXTRACT)
1008 return rtx_equal_p (XEXP (dst, 0), src)
1009 && ! BYTES_BIG_ENDIAN && XEXP (dst, 2) == const0_rtx
1010 && !side_effects_p (src);
1011
1012 if (GET_CODE (dst) == STRICT_LOW_PART)
1013 dst = XEXP (dst, 0);
1014
1015 if (GET_CODE (src) == SUBREG && GET_CODE (dst) == SUBREG)
1016 {
1017 if (SUBREG_BYTE (src) != SUBREG_BYTE (dst))
1018 return 0;
1019 src = SUBREG_REG (src);
1020 dst = SUBREG_REG (dst);
1021 }
1022
1023 return (REG_P (src) && REG_P (dst)
1024 && REGNO (src) == REGNO (dst));
1025 }
1026
1027 /* Return nonzero if an insn consists only of SETs, each of which only sets a
1028 value to itself. */
1029
1030 int
noop_move_p(rtx insn)1031 noop_move_p (rtx insn)
1032 {
1033 rtx pat = PATTERN (insn);
1034
1035 if (INSN_CODE (insn) == NOOP_MOVE_INSN_CODE)
1036 return 1;
1037
1038 /* Insns carrying these notes are useful later on. */
1039 if (find_reg_note (insn, REG_EQUAL, NULL_RTX))
1040 return 0;
1041
1042 /* For now treat an insn with a REG_RETVAL note as a
1043 a special insn which should not be considered a no-op. */
1044 if (find_reg_note (insn, REG_RETVAL, NULL_RTX))
1045 return 0;
1046
1047 if (GET_CODE (pat) == SET && set_noop_p (pat))
1048 return 1;
1049
1050 if (GET_CODE (pat) == PARALLEL)
1051 {
1052 int i;
1053 /* If nothing but SETs of registers to themselves,
1054 this insn can also be deleted. */
1055 for (i = 0; i < XVECLEN (pat, 0); i++)
1056 {
1057 rtx tem = XVECEXP (pat, 0, i);
1058
1059 if (GET_CODE (tem) == USE
1060 || GET_CODE (tem) == CLOBBER)
1061 continue;
1062
1063 if (GET_CODE (tem) != SET || ! set_noop_p (tem))
1064 return 0;
1065 }
1066
1067 return 1;
1068 }
1069 return 0;
1070 }
1071
1072
1073 /* Return the last thing that X was assigned from before *PINSN. If VALID_TO
1074 is not NULL_RTX then verify that the object is not modified up to VALID_TO.
1075 If the object was modified, if we hit a partial assignment to X, or hit a
1076 CODE_LABEL first, return X. If we found an assignment, update *PINSN to
1077 point to it. ALLOW_HWREG is set to 1 if hardware registers are allowed to
1078 be the src. */
1079
1080 rtx
find_last_value(rtx x,rtx * pinsn,rtx valid_to,int allow_hwreg)1081 find_last_value (rtx x, rtx *pinsn, rtx valid_to, int allow_hwreg)
1082 {
1083 rtx p;
1084
1085 for (p = PREV_INSN (*pinsn); p && !LABEL_P (p);
1086 p = PREV_INSN (p))
1087 if (INSN_P (p))
1088 {
1089 rtx set = single_set (p);
1090 rtx note = find_reg_note (p, REG_EQUAL, NULL_RTX);
1091
1092 if (set && rtx_equal_p (x, SET_DEST (set)))
1093 {
1094 rtx src = SET_SRC (set);
1095
1096 if (note && GET_CODE (XEXP (note, 0)) != EXPR_LIST)
1097 src = XEXP (note, 0);
1098
1099 if ((valid_to == NULL_RTX
1100 || ! modified_between_p (src, PREV_INSN (p), valid_to))
1101 /* Reject hard registers because we don't usually want
1102 to use them; we'd rather use a pseudo. */
1103 && (! (REG_P (src)
1104 && REGNO (src) < FIRST_PSEUDO_REGISTER) || allow_hwreg))
1105 {
1106 *pinsn = p;
1107 return src;
1108 }
1109 }
1110
1111 /* If set in non-simple way, we don't have a value. */
1112 if (reg_set_p (x, p))
1113 break;
1114 }
1115
1116 return x;
1117 }
1118
1119 /* Return nonzero if register in range [REGNO, ENDREGNO)
1120 appears either explicitly or implicitly in X
1121 other than being stored into.
1122
1123 References contained within the substructure at LOC do not count.
1124 LOC may be zero, meaning don't ignore anything. */
1125
1126 int
refers_to_regno_p(unsigned int regno,unsigned int endregno,rtx x,rtx * loc)1127 refers_to_regno_p (unsigned int regno, unsigned int endregno, rtx x,
1128 rtx *loc)
1129 {
1130 int i;
1131 unsigned int x_regno;
1132 RTX_CODE code;
1133 const char *fmt;
1134
1135 repeat:
1136 /* The contents of a REG_NONNEG note is always zero, so we must come here
1137 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1138 if (x == 0)
1139 return 0;
1140
1141 code = GET_CODE (x);
1142
1143 switch (code)
1144 {
1145 case REG:
1146 x_regno = REGNO (x);
1147
1148 /* If we modifying the stack, frame, or argument pointer, it will
1149 clobber a virtual register. In fact, we could be more precise,
1150 but it isn't worth it. */
1151 if ((x_regno == STACK_POINTER_REGNUM
1152 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1153 || x_regno == ARG_POINTER_REGNUM
1154 #endif
1155 || x_regno == FRAME_POINTER_REGNUM)
1156 && regno >= FIRST_VIRTUAL_REGISTER && regno <= LAST_VIRTUAL_REGISTER)
1157 return 1;
1158
1159 return (endregno > x_regno
1160 && regno < x_regno + (x_regno < FIRST_PSEUDO_REGISTER
1161 ? hard_regno_nregs[x_regno][GET_MODE (x)]
1162 : 1));
1163
1164 case SUBREG:
1165 /* If this is a SUBREG of a hard reg, we can see exactly which
1166 registers are being modified. Otherwise, handle normally. */
1167 if (REG_P (SUBREG_REG (x))
1168 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
1169 {
1170 unsigned int inner_regno = subreg_regno (x);
1171 unsigned int inner_endregno
1172 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER
1173 ? hard_regno_nregs[inner_regno][GET_MODE (x)] : 1);
1174
1175 return endregno > inner_regno && regno < inner_endregno;
1176 }
1177 break;
1178
1179 case CLOBBER:
1180 case SET:
1181 if (&SET_DEST (x) != loc
1182 /* Note setting a SUBREG counts as referring to the REG it is in for
1183 a pseudo but not for hard registers since we can
1184 treat each word individually. */
1185 && ((GET_CODE (SET_DEST (x)) == SUBREG
1186 && loc != &SUBREG_REG (SET_DEST (x))
1187 && REG_P (SUBREG_REG (SET_DEST (x)))
1188 && REGNO (SUBREG_REG (SET_DEST (x))) >= FIRST_PSEUDO_REGISTER
1189 && refers_to_regno_p (regno, endregno,
1190 SUBREG_REG (SET_DEST (x)), loc))
1191 || (!REG_P (SET_DEST (x))
1192 && refers_to_regno_p (regno, endregno, SET_DEST (x), loc))))
1193 return 1;
1194
1195 if (code == CLOBBER || loc == &SET_SRC (x))
1196 return 0;
1197 x = SET_SRC (x);
1198 goto repeat;
1199
1200 default:
1201 break;
1202 }
1203
1204 /* X does not match, so try its subexpressions. */
1205
1206 fmt = GET_RTX_FORMAT (code);
1207 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1208 {
1209 if (fmt[i] == 'e' && loc != &XEXP (x, i))
1210 {
1211 if (i == 0)
1212 {
1213 x = XEXP (x, 0);
1214 goto repeat;
1215 }
1216 else
1217 if (refers_to_regno_p (regno, endregno, XEXP (x, i), loc))
1218 return 1;
1219 }
1220 else if (fmt[i] == 'E')
1221 {
1222 int j;
1223 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1224 if (loc != &XVECEXP (x, i, j)
1225 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j), loc))
1226 return 1;
1227 }
1228 }
1229 return 0;
1230 }
1231
1232 /* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1233 we check if any register number in X conflicts with the relevant register
1234 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1235 contains a MEM (we don't bother checking for memory addresses that can't
1236 conflict because we expect this to be a rare case. */
1237
1238 int
reg_overlap_mentioned_p(rtx x,rtx in)1239 reg_overlap_mentioned_p (rtx x, rtx in)
1240 {
1241 unsigned int regno, endregno;
1242
1243 /* If either argument is a constant, then modifying X can not
1244 affect IN. Here we look at IN, we can profitably combine
1245 CONSTANT_P (x) with the switch statement below. */
1246 if (CONSTANT_P (in))
1247 return 0;
1248
1249 recurse:
1250 switch (GET_CODE (x))
1251 {
1252 case STRICT_LOW_PART:
1253 case ZERO_EXTRACT:
1254 case SIGN_EXTRACT:
1255 /* Overly conservative. */
1256 x = XEXP (x, 0);
1257 goto recurse;
1258
1259 case SUBREG:
1260 regno = REGNO (SUBREG_REG (x));
1261 if (regno < FIRST_PSEUDO_REGISTER)
1262 regno = subreg_regno (x);
1263 goto do_reg;
1264
1265 case REG:
1266 regno = REGNO (x);
1267 do_reg:
1268 endregno = regno + (regno < FIRST_PSEUDO_REGISTER
1269 ? hard_regno_nregs[regno][GET_MODE (x)] : 1);
1270 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1271
1272 case MEM:
1273 {
1274 const char *fmt;
1275 int i;
1276
1277 if (MEM_P (in))
1278 return 1;
1279
1280 fmt = GET_RTX_FORMAT (GET_CODE (in));
1281 for (i = GET_RTX_LENGTH (GET_CODE (in)) - 1; i >= 0; i--)
1282 if (fmt[i] == 'e')
1283 {
1284 if (reg_overlap_mentioned_p (x, XEXP (in, i)))
1285 return 1;
1286 }
1287 else if (fmt[i] == 'E')
1288 {
1289 int j;
1290 for (j = XVECLEN (in, i) - 1; j >= 0; --j)
1291 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)))
1292 return 1;
1293 }
1294
1295 return 0;
1296 }
1297
1298 case SCRATCH:
1299 case PC:
1300 case CC0:
1301 return reg_mentioned_p (x, in);
1302
1303 case PARALLEL:
1304 {
1305 int i;
1306
1307 /* If any register in here refers to it we return true. */
1308 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1309 if (XEXP (XVECEXP (x, 0, i), 0) != 0
1310 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0), in))
1311 return 1;
1312 return 0;
1313 }
1314
1315 default:
1316 gcc_assert (CONSTANT_P (x));
1317 return 0;
1318 }
1319 }
1320
1321 /* Call FUN on each register or MEM that is stored into or clobbered by X.
1322 (X would be the pattern of an insn).
1323 FUN receives two arguments:
1324 the REG, MEM, CC0 or PC being stored in or clobbered,
1325 the SET or CLOBBER rtx that does the store.
1326
1327 If the item being stored in or clobbered is a SUBREG of a hard register,
1328 the SUBREG will be passed. */
1329
1330 void
note_stores(rtx x,void (* fun)(rtx,rtx,void *),void * data)1331 note_stores (rtx x, void (*fun) (rtx, rtx, void *), void *data)
1332 {
1333 int i;
1334
1335 if (GET_CODE (x) == COND_EXEC)
1336 x = COND_EXEC_CODE (x);
1337
1338 if (GET_CODE (x) == SET || GET_CODE (x) == CLOBBER)
1339 {
1340 rtx dest = SET_DEST (x);
1341
1342 while ((GET_CODE (dest) == SUBREG
1343 && (!REG_P (SUBREG_REG (dest))
1344 || REGNO (SUBREG_REG (dest)) >= FIRST_PSEUDO_REGISTER))
1345 || GET_CODE (dest) == ZERO_EXTRACT
1346 || GET_CODE (dest) == STRICT_LOW_PART)
1347 dest = XEXP (dest, 0);
1348
1349 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1350 each of whose first operand is a register. */
1351 if (GET_CODE (dest) == PARALLEL)
1352 {
1353 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1354 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
1355 (*fun) (XEXP (XVECEXP (dest, 0, i), 0), x, data);
1356 }
1357 else
1358 (*fun) (dest, x, data);
1359 }
1360
1361 else if (GET_CODE (x) == PARALLEL)
1362 for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
1363 note_stores (XVECEXP (x, 0, i), fun, data);
1364 }
1365
1366 /* Like notes_stores, but call FUN for each expression that is being
1367 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
1368 FUN for each expression, not any interior subexpressions. FUN receives a
1369 pointer to the expression and the DATA passed to this function.
1370
1371 Note that this is not quite the same test as that done in reg_referenced_p
1372 since that considers something as being referenced if it is being
1373 partially set, while we do not. */
1374
1375 void
note_uses(rtx * pbody,void (* fun)(rtx *,void *),void * data)1376 note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
1377 {
1378 rtx body = *pbody;
1379 int i;
1380
1381 switch (GET_CODE (body))
1382 {
1383 case COND_EXEC:
1384 (*fun) (&COND_EXEC_TEST (body), data);
1385 note_uses (&COND_EXEC_CODE (body), fun, data);
1386 return;
1387
1388 case PARALLEL:
1389 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1390 note_uses (&XVECEXP (body, 0, i), fun, data);
1391 return;
1392
1393 case USE:
1394 (*fun) (&XEXP (body, 0), data);
1395 return;
1396
1397 case ASM_OPERANDS:
1398 for (i = ASM_OPERANDS_INPUT_LENGTH (body) - 1; i >= 0; i--)
1399 (*fun) (&ASM_OPERANDS_INPUT (body, i), data);
1400 return;
1401
1402 case TRAP_IF:
1403 (*fun) (&TRAP_CONDITION (body), data);
1404 return;
1405
1406 case PREFETCH:
1407 (*fun) (&XEXP (body, 0), data);
1408 return;
1409
1410 case UNSPEC:
1411 case UNSPEC_VOLATILE:
1412 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
1413 (*fun) (&XVECEXP (body, 0, i), data);
1414 return;
1415
1416 case CLOBBER:
1417 if (MEM_P (XEXP (body, 0)))
1418 (*fun) (&XEXP (XEXP (body, 0), 0), data);
1419 return;
1420
1421 case SET:
1422 {
1423 rtx dest = SET_DEST (body);
1424
1425 /* For sets we replace everything in source plus registers in memory
1426 expression in store and operands of a ZERO_EXTRACT. */
1427 (*fun) (&SET_SRC (body), data);
1428
1429 if (GET_CODE (dest) == ZERO_EXTRACT)
1430 {
1431 (*fun) (&XEXP (dest, 1), data);
1432 (*fun) (&XEXP (dest, 2), data);
1433 }
1434
1435 while (GET_CODE (dest) == SUBREG || GET_CODE (dest) == STRICT_LOW_PART)
1436 dest = XEXP (dest, 0);
1437
1438 if (MEM_P (dest))
1439 (*fun) (&XEXP (dest, 0), data);
1440 }
1441 return;
1442
1443 default:
1444 /* All the other possibilities never store. */
1445 (*fun) (pbody, data);
1446 return;
1447 }
1448 }
1449
1450 /* Return nonzero if X's old contents don't survive after INSN.
1451 This will be true if X is (cc0) or if X is a register and
1452 X dies in INSN or because INSN entirely sets X.
1453
1454 "Entirely set" means set directly and not through a SUBREG, or
1455 ZERO_EXTRACT, so no trace of the old contents remains.
1456 Likewise, REG_INC does not count.
1457
1458 REG may be a hard or pseudo reg. Renumbering is not taken into account,
1459 but for this use that makes no difference, since regs don't overlap
1460 during their lifetimes. Therefore, this function may be used
1461 at any time after deaths have been computed (in flow.c).
1462
1463 If REG is a hard reg that occupies multiple machine registers, this
1464 function will only return 1 if each of those registers will be replaced
1465 by INSN. */
1466
1467 int
dead_or_set_p(rtx insn,rtx x)1468 dead_or_set_p (rtx insn, rtx x)
1469 {
1470 unsigned int regno, last_regno;
1471 unsigned int i;
1472
1473 /* Can't use cc0_rtx below since this file is used by genattrtab.c. */
1474 if (GET_CODE (x) == CC0)
1475 return 1;
1476
1477 gcc_assert (REG_P (x));
1478
1479 regno = REGNO (x);
1480 last_regno = (regno >= FIRST_PSEUDO_REGISTER ? regno
1481 : regno + hard_regno_nregs[regno][GET_MODE (x)] - 1);
1482
1483 for (i = regno; i <= last_regno; i++)
1484 if (! dead_or_set_regno_p (insn, i))
1485 return 0;
1486
1487 return 1;
1488 }
1489
1490 /* Return TRUE iff DEST is a register or subreg of a register and
1491 doesn't change the number of words of the inner register, and any
1492 part of the register is TEST_REGNO. */
1493
1494 static bool
covers_regno_no_parallel_p(rtx dest,unsigned int test_regno)1495 covers_regno_no_parallel_p (rtx dest, unsigned int test_regno)
1496 {
1497 unsigned int regno, endregno;
1498
1499 if (GET_CODE (dest) == SUBREG
1500 && (((GET_MODE_SIZE (GET_MODE (dest))
1501 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
1502 == ((GET_MODE_SIZE (GET_MODE (SUBREG_REG (dest)))
1503 + UNITS_PER_WORD - 1) / UNITS_PER_WORD)))
1504 dest = SUBREG_REG (dest);
1505
1506 if (!REG_P (dest))
1507 return false;
1508
1509 regno = REGNO (dest);
1510 endregno = (regno >= FIRST_PSEUDO_REGISTER ? regno + 1
1511 : regno + hard_regno_nregs[regno][GET_MODE (dest)]);
1512 return (test_regno >= regno && test_regno < endregno);
1513 }
1514
1515 /* Like covers_regno_no_parallel_p, but also handles PARALLELs where
1516 any member matches the covers_regno_no_parallel_p criteria. */
1517
1518 static bool
covers_regno_p(rtx dest,unsigned int test_regno)1519 covers_regno_p (rtx dest, unsigned int test_regno)
1520 {
1521 if (GET_CODE (dest) == PARALLEL)
1522 {
1523 /* Some targets place small structures in registers for return
1524 values of functions, and those registers are wrapped in
1525 PARALLELs that we may see as the destination of a SET. */
1526 int i;
1527
1528 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
1529 {
1530 rtx inner = XEXP (XVECEXP (dest, 0, i), 0);
1531 if (inner != NULL_RTX
1532 && covers_regno_no_parallel_p (inner, test_regno))
1533 return true;
1534 }
1535
1536 return false;
1537 }
1538 else
1539 return covers_regno_no_parallel_p (dest, test_regno);
1540 }
1541
1542 /* Utility function for dead_or_set_p to check an individual register. Also
1543 called from flow.c. */
1544
1545 int
dead_or_set_regno_p(rtx insn,unsigned int test_regno)1546 dead_or_set_regno_p (rtx insn, unsigned int test_regno)
1547 {
1548 rtx pattern;
1549
1550 /* See if there is a death note for something that includes TEST_REGNO. */
1551 if (find_regno_note (insn, REG_DEAD, test_regno))
1552 return 1;
1553
1554 if (CALL_P (insn)
1555 && find_regno_fusage (insn, CLOBBER, test_regno))
1556 return 1;
1557
1558 pattern = PATTERN (insn);
1559
1560 if (GET_CODE (pattern) == COND_EXEC)
1561 pattern = COND_EXEC_CODE (pattern);
1562
1563 if (GET_CODE (pattern) == SET)
1564 return covers_regno_p (SET_DEST (pattern), test_regno);
1565 else if (GET_CODE (pattern) == PARALLEL)
1566 {
1567 int i;
1568
1569 for (i = XVECLEN (pattern, 0) - 1; i >= 0; i--)
1570 {
1571 rtx body = XVECEXP (pattern, 0, i);
1572
1573 if (GET_CODE (body) == COND_EXEC)
1574 body = COND_EXEC_CODE (body);
1575
1576 if ((GET_CODE (body) == SET || GET_CODE (body) == CLOBBER)
1577 && covers_regno_p (SET_DEST (body), test_regno))
1578 return 1;
1579 }
1580 }
1581
1582 return 0;
1583 }
1584
1585 /* Return the reg-note of kind KIND in insn INSN, if there is one.
1586 If DATUM is nonzero, look for one whose datum is DATUM. */
1587
1588 rtx
find_reg_note(rtx insn,enum reg_note kind,rtx datum)1589 find_reg_note (rtx insn, enum reg_note kind, rtx datum)
1590 {
1591 rtx link;
1592
1593 gcc_assert (insn);
1594
1595 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1596 if (! INSN_P (insn))
1597 return 0;
1598 if (datum == 0)
1599 {
1600 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1601 if (REG_NOTE_KIND (link) == kind)
1602 return link;
1603 return 0;
1604 }
1605
1606 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1607 if (REG_NOTE_KIND (link) == kind && datum == XEXP (link, 0))
1608 return link;
1609 return 0;
1610 }
1611
1612 /* Return the reg-note of kind KIND in insn INSN which applies to register
1613 number REGNO, if any. Return 0 if there is no such reg-note. Note that
1614 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
1615 it might be the case that the note overlaps REGNO. */
1616
1617 rtx
find_regno_note(rtx insn,enum reg_note kind,unsigned int regno)1618 find_regno_note (rtx insn, enum reg_note kind, unsigned int regno)
1619 {
1620 rtx link;
1621
1622 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
1623 if (! INSN_P (insn))
1624 return 0;
1625
1626 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1627 if (REG_NOTE_KIND (link) == kind
1628 /* Verify that it is a register, so that scratch and MEM won't cause a
1629 problem here. */
1630 && REG_P (XEXP (link, 0))
1631 && REGNO (XEXP (link, 0)) <= regno
1632 && ((REGNO (XEXP (link, 0))
1633 + (REGNO (XEXP (link, 0)) >= FIRST_PSEUDO_REGISTER ? 1
1634 : hard_regno_nregs[REGNO (XEXP (link, 0))]
1635 [GET_MODE (XEXP (link, 0))]))
1636 > regno))
1637 return link;
1638 return 0;
1639 }
1640
1641 /* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
1642 has such a note. */
1643
1644 rtx
find_reg_equal_equiv_note(rtx insn)1645 find_reg_equal_equiv_note (rtx insn)
1646 {
1647 rtx link;
1648
1649 if (!INSN_P (insn))
1650 return 0;
1651 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1652 if (REG_NOTE_KIND (link) == REG_EQUAL
1653 || REG_NOTE_KIND (link) == REG_EQUIV)
1654 {
1655 if (single_set (insn) == 0)
1656 return 0;
1657 return link;
1658 }
1659 return NULL;
1660 }
1661
1662 /* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
1663 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1664
1665 int
find_reg_fusage(rtx insn,enum rtx_code code,rtx datum)1666 find_reg_fusage (rtx insn, enum rtx_code code, rtx datum)
1667 {
1668 /* If it's not a CALL_INSN, it can't possibly have a
1669 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
1670 if (!CALL_P (insn))
1671 return 0;
1672
1673 gcc_assert (datum);
1674
1675 if (!REG_P (datum))
1676 {
1677 rtx link;
1678
1679 for (link = CALL_INSN_FUNCTION_USAGE (insn);
1680 link;
1681 link = XEXP (link, 1))
1682 if (GET_CODE (XEXP (link, 0)) == code
1683 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)))
1684 return 1;
1685 }
1686 else
1687 {
1688 unsigned int regno = REGNO (datum);
1689
1690 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1691 to pseudo registers, so don't bother checking. */
1692
1693 if (regno < FIRST_PSEUDO_REGISTER)
1694 {
1695 unsigned int end_regno
1696 = regno + hard_regno_nregs[regno][GET_MODE (datum)];
1697 unsigned int i;
1698
1699 for (i = regno; i < end_regno; i++)
1700 if (find_regno_fusage (insn, code, i))
1701 return 1;
1702 }
1703 }
1704
1705 return 0;
1706 }
1707
1708 /* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
1709 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
1710
1711 int
find_regno_fusage(rtx insn,enum rtx_code code,unsigned int regno)1712 find_regno_fusage (rtx insn, enum rtx_code code, unsigned int regno)
1713 {
1714 rtx link;
1715
1716 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
1717 to pseudo registers, so don't bother checking. */
1718
1719 if (regno >= FIRST_PSEUDO_REGISTER
1720 || !CALL_P (insn) )
1721 return 0;
1722
1723 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1724 {
1725 unsigned int regnote;
1726 rtx op, reg;
1727
1728 if (GET_CODE (op = XEXP (link, 0)) == code
1729 && REG_P (reg = XEXP (op, 0))
1730 && (regnote = REGNO (reg)) <= regno
1731 && regnote + hard_regno_nregs[regnote][GET_MODE (reg)] > regno)
1732 return 1;
1733 }
1734
1735 return 0;
1736 }
1737
1738 /* Return true if INSN is a call to a pure function. */
1739
1740 int
pure_call_p(rtx insn)1741 pure_call_p (rtx insn)
1742 {
1743 rtx link;
1744
1745 if (!CALL_P (insn) || ! CONST_OR_PURE_CALL_P (insn))
1746 return 0;
1747
1748 /* Look for the note that differentiates const and pure functions. */
1749 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
1750 {
1751 rtx u, m;
1752
1753 if (GET_CODE (u = XEXP (link, 0)) == USE
1754 && MEM_P (m = XEXP (u, 0)) && GET_MODE (m) == BLKmode
1755 && GET_CODE (XEXP (m, 0)) == SCRATCH)
1756 return 1;
1757 }
1758
1759 return 0;
1760 }
1761
1762 /* Remove register note NOTE from the REG_NOTES of INSN. */
1763
1764 void
remove_note(rtx insn,rtx note)1765 remove_note (rtx insn, rtx note)
1766 {
1767 rtx link;
1768
1769 if (note == NULL_RTX)
1770 return;
1771
1772 if (REG_NOTES (insn) == note)
1773 {
1774 REG_NOTES (insn) = XEXP (note, 1);
1775 return;
1776 }
1777
1778 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
1779 if (XEXP (link, 1) == note)
1780 {
1781 XEXP (link, 1) = XEXP (note, 1);
1782 return;
1783 }
1784
1785 gcc_unreachable ();
1786 }
1787
1788 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1789 return 1 if it is found. A simple equality test is used to determine if
1790 NODE matches. */
1791
1792 int
in_expr_list_p(rtx listp,rtx node)1793 in_expr_list_p (rtx listp, rtx node)
1794 {
1795 rtx x;
1796
1797 for (x = listp; x; x = XEXP (x, 1))
1798 if (node == XEXP (x, 0))
1799 return 1;
1800
1801 return 0;
1802 }
1803
1804 /* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
1805 remove that entry from the list if it is found.
1806
1807 A simple equality test is used to determine if NODE matches. */
1808
1809 void
remove_node_from_expr_list(rtx node,rtx * listp)1810 remove_node_from_expr_list (rtx node, rtx *listp)
1811 {
1812 rtx temp = *listp;
1813 rtx prev = NULL_RTX;
1814
1815 while (temp)
1816 {
1817 if (node == XEXP (temp, 0))
1818 {
1819 /* Splice the node out of the list. */
1820 if (prev)
1821 XEXP (prev, 1) = XEXP (temp, 1);
1822 else
1823 *listp = XEXP (temp, 1);
1824
1825 return;
1826 }
1827
1828 prev = temp;
1829 temp = XEXP (temp, 1);
1830 }
1831 }
1832
1833 /* Nonzero if X contains any volatile instructions. These are instructions
1834 which may cause unpredictable machine state instructions, and thus no
1835 instructions should be moved or combined across them. This includes
1836 only volatile asms and UNSPEC_VOLATILE instructions. */
1837
1838 int
volatile_insn_p(rtx x)1839 volatile_insn_p (rtx x)
1840 {
1841 RTX_CODE code;
1842
1843 code = GET_CODE (x);
1844 switch (code)
1845 {
1846 case LABEL_REF:
1847 case SYMBOL_REF:
1848 case CONST_INT:
1849 case CONST:
1850 case CONST_DOUBLE:
1851 case CONST_VECTOR:
1852 case CC0:
1853 case PC:
1854 case REG:
1855 case SCRATCH:
1856 case CLOBBER:
1857 case ADDR_VEC:
1858 case ADDR_DIFF_VEC:
1859 case CALL:
1860 case MEM:
1861 return 0;
1862
1863 case UNSPEC_VOLATILE:
1864 /* case TRAP_IF: This isn't clear yet. */
1865 return 1;
1866
1867 case ASM_INPUT:
1868 case ASM_OPERANDS:
1869 if (MEM_VOLATILE_P (x))
1870 return 1;
1871
1872 default:
1873 break;
1874 }
1875
1876 /* Recursively scan the operands of this expression. */
1877
1878 {
1879 const char *fmt = GET_RTX_FORMAT (code);
1880 int i;
1881
1882 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1883 {
1884 if (fmt[i] == 'e')
1885 {
1886 if (volatile_insn_p (XEXP (x, i)))
1887 return 1;
1888 }
1889 else if (fmt[i] == 'E')
1890 {
1891 int j;
1892 for (j = 0; j < XVECLEN (x, i); j++)
1893 if (volatile_insn_p (XVECEXP (x, i, j)))
1894 return 1;
1895 }
1896 }
1897 }
1898 return 0;
1899 }
1900
1901 /* Nonzero if X contains any volatile memory references
1902 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
1903
1904 int
volatile_refs_p(rtx x)1905 volatile_refs_p (rtx x)
1906 {
1907 RTX_CODE code;
1908
1909 code = GET_CODE (x);
1910 switch (code)
1911 {
1912 case LABEL_REF:
1913 case SYMBOL_REF:
1914 case CONST_INT:
1915 case CONST:
1916 case CONST_DOUBLE:
1917 case CONST_VECTOR:
1918 case CC0:
1919 case PC:
1920 case REG:
1921 case SCRATCH:
1922 case CLOBBER:
1923 case ADDR_VEC:
1924 case ADDR_DIFF_VEC:
1925 return 0;
1926
1927 case UNSPEC_VOLATILE:
1928 return 1;
1929
1930 case MEM:
1931 case ASM_INPUT:
1932 case ASM_OPERANDS:
1933 if (MEM_VOLATILE_P (x))
1934 return 1;
1935
1936 default:
1937 break;
1938 }
1939
1940 /* Recursively scan the operands of this expression. */
1941
1942 {
1943 const char *fmt = GET_RTX_FORMAT (code);
1944 int i;
1945
1946 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
1947 {
1948 if (fmt[i] == 'e')
1949 {
1950 if (volatile_refs_p (XEXP (x, i)))
1951 return 1;
1952 }
1953 else if (fmt[i] == 'E')
1954 {
1955 int j;
1956 for (j = 0; j < XVECLEN (x, i); j++)
1957 if (volatile_refs_p (XVECEXP (x, i, j)))
1958 return 1;
1959 }
1960 }
1961 }
1962 return 0;
1963 }
1964
1965 /* Similar to above, except that it also rejects register pre- and post-
1966 incrementing. */
1967
1968 int
side_effects_p(rtx x)1969 side_effects_p (rtx x)
1970 {
1971 RTX_CODE code;
1972
1973 code = GET_CODE (x);
1974 switch (code)
1975 {
1976 case LABEL_REF:
1977 case SYMBOL_REF:
1978 case CONST_INT:
1979 case CONST:
1980 case CONST_DOUBLE:
1981 case CONST_VECTOR:
1982 case CC0:
1983 case PC:
1984 case REG:
1985 case SCRATCH:
1986 case ADDR_VEC:
1987 case ADDR_DIFF_VEC:
1988 return 0;
1989
1990 case CLOBBER:
1991 /* Reject CLOBBER with a non-VOID mode. These are made by combine.c
1992 when some combination can't be done. If we see one, don't think
1993 that we can simplify the expression. */
1994 return (GET_MODE (x) != VOIDmode);
1995
1996 case PRE_INC:
1997 case PRE_DEC:
1998 case POST_INC:
1999 case POST_DEC:
2000 case PRE_MODIFY:
2001 case POST_MODIFY:
2002 case CALL:
2003 case UNSPEC_VOLATILE:
2004 /* case TRAP_IF: This isn't clear yet. */
2005 return 1;
2006
2007 case MEM:
2008 case ASM_INPUT:
2009 case ASM_OPERANDS:
2010 if (MEM_VOLATILE_P (x))
2011 return 1;
2012
2013 default:
2014 break;
2015 }
2016
2017 /* Recursively scan the operands of this expression. */
2018
2019 {
2020 const char *fmt = GET_RTX_FORMAT (code);
2021 int i;
2022
2023 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2024 {
2025 if (fmt[i] == 'e')
2026 {
2027 if (side_effects_p (XEXP (x, i)))
2028 return 1;
2029 }
2030 else if (fmt[i] == 'E')
2031 {
2032 int j;
2033 for (j = 0; j < XVECLEN (x, i); j++)
2034 if (side_effects_p (XVECEXP (x, i, j)))
2035 return 1;
2036 }
2037 }
2038 }
2039 return 0;
2040 }
2041
2042 enum may_trap_p_flags
2043 {
2044 MTP_UNALIGNED_MEMS = 1,
2045 MTP_AFTER_MOVE = 2
2046 };
2047 /* Return nonzero if evaluating rtx X might cause a trap.
2048 (FLAGS & MTP_UNALIGNED_MEMS) controls whether nonzero is returned for
2049 unaligned memory accesses on strict alignment machines. If
2050 (FLAGS & AFTER_MOVE) is true, returns nonzero even in case the expression
2051 cannot trap at its current location, but it might become trapping if moved
2052 elsewhere. */
2053
2054 static int
may_trap_p_1(rtx x,unsigned flags)2055 may_trap_p_1 (rtx x, unsigned flags)
2056 {
2057 int i;
2058 enum rtx_code code;
2059 const char *fmt;
2060 bool unaligned_mems = (flags & MTP_UNALIGNED_MEMS) != 0;
2061
2062 if (x == 0)
2063 return 0;
2064 code = GET_CODE (x);
2065 switch (code)
2066 {
2067 /* Handle these cases quickly. */
2068 case CONST_INT:
2069 case CONST_DOUBLE:
2070 case CONST_VECTOR:
2071 case SYMBOL_REF:
2072 case LABEL_REF:
2073 case CONST:
2074 case PC:
2075 case CC0:
2076 case REG:
2077 case SCRATCH:
2078 return 0;
2079
2080 case ASM_INPUT:
2081 case UNSPEC_VOLATILE:
2082 case TRAP_IF:
2083 return 1;
2084
2085 case ASM_OPERANDS:
2086 return MEM_VOLATILE_P (x);
2087
2088 /* Memory ref can trap unless it's a static var or a stack slot. */
2089 case MEM:
2090 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
2091 reference; moving it out of condition might cause its address
2092 become invalid. */
2093 !(flags & MTP_AFTER_MOVE)
2094 && MEM_NOTRAP_P (x)
2095 && (!STRICT_ALIGNMENT || !unaligned_mems))
2096 return 0;
2097 return
2098 rtx_addr_can_trap_p_1 (XEXP (x, 0), GET_MODE (x), unaligned_mems);
2099
2100 /* Division by a non-constant might trap. */
2101 case DIV:
2102 case MOD:
2103 case UDIV:
2104 case UMOD:
2105 if (HONOR_SNANS (GET_MODE (x)))
2106 return 1;
2107 if (SCALAR_FLOAT_MODE_P (GET_MODE (x)))
2108 return flag_trapping_math;
2109 if (!CONSTANT_P (XEXP (x, 1)) || (XEXP (x, 1) == const0_rtx))
2110 return 1;
2111 break;
2112
2113 case EXPR_LIST:
2114 /* An EXPR_LIST is used to represent a function call. This
2115 certainly may trap. */
2116 return 1;
2117
2118 case GE:
2119 case GT:
2120 case LE:
2121 case LT:
2122 case LTGT:
2123 case COMPARE:
2124 /* Some floating point comparisons may trap. */
2125 if (!flag_trapping_math)
2126 break;
2127 /* ??? There is no machine independent way to check for tests that trap
2128 when COMPARE is used, though many targets do make this distinction.
2129 For instance, sparc uses CCFPE for compares which generate exceptions
2130 and CCFP for compares which do not generate exceptions. */
2131 if (HONOR_NANS (GET_MODE (x)))
2132 return 1;
2133 /* But often the compare has some CC mode, so check operand
2134 modes as well. */
2135 if (HONOR_NANS (GET_MODE (XEXP (x, 0)))
2136 || HONOR_NANS (GET_MODE (XEXP (x, 1))))
2137 return 1;
2138 break;
2139
2140 case EQ:
2141 case NE:
2142 if (HONOR_SNANS (GET_MODE (x)))
2143 return 1;
2144 /* Often comparison is CC mode, so check operand modes. */
2145 if (HONOR_SNANS (GET_MODE (XEXP (x, 0)))
2146 || HONOR_SNANS (GET_MODE (XEXP (x, 1))))
2147 return 1;
2148 break;
2149
2150 case FIX:
2151 /* Conversion of floating point might trap. */
2152 if (flag_trapping_math && HONOR_NANS (GET_MODE (XEXP (x, 0))))
2153 return 1;
2154 break;
2155
2156 case NEG:
2157 case ABS:
2158 case SUBREG:
2159 /* These operations don't trap even with floating point. */
2160 break;
2161
2162 default:
2163 /* Any floating arithmetic may trap. */
2164 if (SCALAR_FLOAT_MODE_P (GET_MODE (x))
2165 && flag_trapping_math)
2166 return 1;
2167 }
2168
2169 fmt = GET_RTX_FORMAT (code);
2170 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2171 {
2172 if (fmt[i] == 'e')
2173 {
2174 if (may_trap_p_1 (XEXP (x, i), flags))
2175 return 1;
2176 }
2177 else if (fmt[i] == 'E')
2178 {
2179 int j;
2180 for (j = 0; j < XVECLEN (x, i); j++)
2181 if (may_trap_p_1 (XVECEXP (x, i, j), flags))
2182 return 1;
2183 }
2184 }
2185 return 0;
2186 }
2187
2188 /* Return nonzero if evaluating rtx X might cause a trap. */
2189
2190 int
may_trap_p(rtx x)2191 may_trap_p (rtx x)
2192 {
2193 return may_trap_p_1 (x, 0);
2194 }
2195
2196 /* Return nonzero if evaluating rtx X might cause a trap, when the expression
2197 is moved from its current location by some optimization. */
2198
2199 int
may_trap_after_code_motion_p(rtx x)2200 may_trap_after_code_motion_p (rtx x)
2201 {
2202 return may_trap_p_1 (x, MTP_AFTER_MOVE);
2203 }
2204
2205 /* Same as above, but additionally return nonzero if evaluating rtx X might
2206 cause a fault. We define a fault for the purpose of this function as a
2207 erroneous execution condition that cannot be encountered during the normal
2208 execution of a valid program; the typical example is an unaligned memory
2209 access on a strict alignment machine. The compiler guarantees that it
2210 doesn't generate code that will fault from a valid program, but this
2211 guarantee doesn't mean anything for individual instructions. Consider
2212 the following example:
2213
2214 struct S { int d; union { char *cp; int *ip; }; };
2215
2216 int foo(struct S *s)
2217 {
2218 if (s->d == 1)
2219 return *s->ip;
2220 else
2221 return *s->cp;
2222 }
2223
2224 on a strict alignment machine. In a valid program, foo will never be
2225 invoked on a structure for which d is equal to 1 and the underlying
2226 unique field of the union not aligned on a 4-byte boundary, but the
2227 expression *s->ip might cause a fault if considered individually.
2228
2229 At the RTL level, potentially problematic expressions will almost always
2230 verify may_trap_p; for example, the above dereference can be emitted as
2231 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
2232 However, suppose that foo is inlined in a caller that causes s->cp to
2233 point to a local character variable and guarantees that s->d is not set
2234 to 1; foo may have been effectively translated into pseudo-RTL as:
2235
2236 if ((reg:SI) == 1)
2237 (set (reg:SI) (mem:SI (%fp - 7)))
2238 else
2239 (set (reg:QI) (mem:QI (%fp - 7)))
2240
2241 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
2242 memory reference to a stack slot, but it will certainly cause a fault
2243 on a strict alignment machine. */
2244
2245 int
may_trap_or_fault_p(rtx x)2246 may_trap_or_fault_p (rtx x)
2247 {
2248 return may_trap_p_1 (x, MTP_UNALIGNED_MEMS);
2249 }
2250
2251 /* Return nonzero if X contains a comparison that is not either EQ or NE,
2252 i.e., an inequality. */
2253
2254 int
inequality_comparisons_p(rtx x)2255 inequality_comparisons_p (rtx x)
2256 {
2257 const char *fmt;
2258 int len, i;
2259 enum rtx_code code = GET_CODE (x);
2260
2261 switch (code)
2262 {
2263 case REG:
2264 case SCRATCH:
2265 case PC:
2266 case CC0:
2267 case CONST_INT:
2268 case CONST_DOUBLE:
2269 case CONST_VECTOR:
2270 case CONST:
2271 case LABEL_REF:
2272 case SYMBOL_REF:
2273 return 0;
2274
2275 case LT:
2276 case LTU:
2277 case GT:
2278 case GTU:
2279 case LE:
2280 case LEU:
2281 case GE:
2282 case GEU:
2283 return 1;
2284
2285 default:
2286 break;
2287 }
2288
2289 len = GET_RTX_LENGTH (code);
2290 fmt = GET_RTX_FORMAT (code);
2291
2292 for (i = 0; i < len; i++)
2293 {
2294 if (fmt[i] == 'e')
2295 {
2296 if (inequality_comparisons_p (XEXP (x, i)))
2297 return 1;
2298 }
2299 else if (fmt[i] == 'E')
2300 {
2301 int j;
2302 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2303 if (inequality_comparisons_p (XVECEXP (x, i, j)))
2304 return 1;
2305 }
2306 }
2307
2308 return 0;
2309 }
2310
2311 /* Replace any occurrence of FROM in X with TO. The function does
2312 not enter into CONST_DOUBLE for the replace.
2313
2314 Note that copying is not done so X must not be shared unless all copies
2315 are to be modified. */
2316
2317 rtx
replace_rtx(rtx x,rtx from,rtx to)2318 replace_rtx (rtx x, rtx from, rtx to)
2319 {
2320 int i, j;
2321 const char *fmt;
2322
2323 /* The following prevents loops occurrence when we change MEM in
2324 CONST_DOUBLE onto the same CONST_DOUBLE. */
2325 if (x != 0 && GET_CODE (x) == CONST_DOUBLE)
2326 return x;
2327
2328 if (x == from)
2329 return to;
2330
2331 /* Allow this function to make replacements in EXPR_LISTs. */
2332 if (x == 0)
2333 return 0;
2334
2335 if (GET_CODE (x) == SUBREG)
2336 {
2337 rtx new = replace_rtx (SUBREG_REG (x), from, to);
2338
2339 if (GET_CODE (new) == CONST_INT)
2340 {
2341 x = simplify_subreg (GET_MODE (x), new,
2342 GET_MODE (SUBREG_REG (x)),
2343 SUBREG_BYTE (x));
2344 gcc_assert (x);
2345 }
2346 else
2347 SUBREG_REG (x) = new;
2348
2349 return x;
2350 }
2351 else if (GET_CODE (x) == ZERO_EXTEND)
2352 {
2353 rtx new = replace_rtx (XEXP (x, 0), from, to);
2354
2355 if (GET_CODE (new) == CONST_INT)
2356 {
2357 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
2358 new, GET_MODE (XEXP (x, 0)));
2359 gcc_assert (x);
2360 }
2361 else
2362 XEXP (x, 0) = new;
2363
2364 return x;
2365 }
2366
2367 fmt = GET_RTX_FORMAT (GET_CODE (x));
2368 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2369 {
2370 if (fmt[i] == 'e')
2371 XEXP (x, i) = replace_rtx (XEXP (x, i), from, to);
2372 else if (fmt[i] == 'E')
2373 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2374 XVECEXP (x, i, j) = replace_rtx (XVECEXP (x, i, j), from, to);
2375 }
2376
2377 return x;
2378 }
2379
2380 /* Replace occurrences of the old label in *X with the new one.
2381 DATA is a REPLACE_LABEL_DATA containing the old and new labels. */
2382
2383 int
replace_label(rtx * x,void * data)2384 replace_label (rtx *x, void *data)
2385 {
2386 rtx l = *x;
2387 rtx old_label = ((replace_label_data *) data)->r1;
2388 rtx new_label = ((replace_label_data *) data)->r2;
2389 bool update_label_nuses = ((replace_label_data *) data)->update_label_nuses;
2390
2391 if (l == NULL_RTX)
2392 return 0;
2393
2394 if (GET_CODE (l) == SYMBOL_REF
2395 && CONSTANT_POOL_ADDRESS_P (l))
2396 {
2397 rtx c = get_pool_constant (l);
2398 if (rtx_referenced_p (old_label, c))
2399 {
2400 rtx new_c, new_l;
2401 replace_label_data *d = (replace_label_data *) data;
2402
2403 /* Create a copy of constant C; replace the label inside
2404 but do not update LABEL_NUSES because uses in constant pool
2405 are not counted. */
2406 new_c = copy_rtx (c);
2407 d->update_label_nuses = false;
2408 for_each_rtx (&new_c, replace_label, data);
2409 d->update_label_nuses = update_label_nuses;
2410
2411 /* Add the new constant NEW_C to constant pool and replace
2412 the old reference to constant by new reference. */
2413 new_l = XEXP (force_const_mem (get_pool_mode (l), new_c), 0);
2414 *x = replace_rtx (l, l, new_l);
2415 }
2416 return 0;
2417 }
2418
2419 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
2420 field. This is not handled by for_each_rtx because it doesn't
2421 handle unprinted ('0') fields. */
2422 if (JUMP_P (l) && JUMP_LABEL (l) == old_label)
2423 JUMP_LABEL (l) = new_label;
2424
2425 if ((GET_CODE (l) == LABEL_REF
2426 || GET_CODE (l) == INSN_LIST)
2427 && XEXP (l, 0) == old_label)
2428 {
2429 XEXP (l, 0) = new_label;
2430 if (update_label_nuses)
2431 {
2432 ++LABEL_NUSES (new_label);
2433 --LABEL_NUSES (old_label);
2434 }
2435 return 0;
2436 }
2437
2438 return 0;
2439 }
2440
2441 /* When *BODY is equal to X or X is directly referenced by *BODY
2442 return nonzero, thus FOR_EACH_RTX stops traversing and returns nonzero
2443 too, otherwise FOR_EACH_RTX continues traversing *BODY. */
2444
2445 static int
rtx_referenced_p_1(rtx * body,void * x)2446 rtx_referenced_p_1 (rtx *body, void *x)
2447 {
2448 rtx y = (rtx) x;
2449
2450 if (*body == NULL_RTX)
2451 return y == NULL_RTX;
2452
2453 /* Return true if a label_ref *BODY refers to label Y. */
2454 if (GET_CODE (*body) == LABEL_REF && LABEL_P (y))
2455 return XEXP (*body, 0) == y;
2456
2457 /* If *BODY is a reference to pool constant traverse the constant. */
2458 if (GET_CODE (*body) == SYMBOL_REF
2459 && CONSTANT_POOL_ADDRESS_P (*body))
2460 return rtx_referenced_p (y, get_pool_constant (*body));
2461
2462 /* By default, compare the RTL expressions. */
2463 return rtx_equal_p (*body, y);
2464 }
2465
2466 /* Return true if X is referenced in BODY. */
2467
2468 int
rtx_referenced_p(rtx x,rtx body)2469 rtx_referenced_p (rtx x, rtx body)
2470 {
2471 return for_each_rtx (&body, rtx_referenced_p_1, x);
2472 }
2473
2474 /* If INSN is a tablejump return true and store the label (before jump table) to
2475 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
2476
2477 bool
tablejump_p(rtx insn,rtx * labelp,rtx * tablep)2478 tablejump_p (rtx insn, rtx *labelp, rtx *tablep)
2479 {
2480 rtx label, table;
2481
2482 if (JUMP_P (insn)
2483 && (label = JUMP_LABEL (insn)) != NULL_RTX
2484 && (table = next_active_insn (label)) != NULL_RTX
2485 && JUMP_P (table)
2486 && (GET_CODE (PATTERN (table)) == ADDR_VEC
2487 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
2488 {
2489 if (labelp)
2490 *labelp = label;
2491 if (tablep)
2492 *tablep = table;
2493 return true;
2494 }
2495 return false;
2496 }
2497
2498 /* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
2499 constant that is not in the constant pool and not in the condition
2500 of an IF_THEN_ELSE. */
2501
2502 static int
computed_jump_p_1(rtx x)2503 computed_jump_p_1 (rtx x)
2504 {
2505 enum rtx_code code = GET_CODE (x);
2506 int i, j;
2507 const char *fmt;
2508
2509 switch (code)
2510 {
2511 case LABEL_REF:
2512 case PC:
2513 return 0;
2514
2515 case CONST:
2516 case CONST_INT:
2517 case CONST_DOUBLE:
2518 case CONST_VECTOR:
2519 case SYMBOL_REF:
2520 case REG:
2521 return 1;
2522
2523 case MEM:
2524 return ! (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
2525 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
2526
2527 case IF_THEN_ELSE:
2528 return (computed_jump_p_1 (XEXP (x, 1))
2529 || computed_jump_p_1 (XEXP (x, 2)));
2530
2531 default:
2532 break;
2533 }
2534
2535 fmt = GET_RTX_FORMAT (code);
2536 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2537 {
2538 if (fmt[i] == 'e'
2539 && computed_jump_p_1 (XEXP (x, i)))
2540 return 1;
2541
2542 else if (fmt[i] == 'E')
2543 for (j = 0; j < XVECLEN (x, i); j++)
2544 if (computed_jump_p_1 (XVECEXP (x, i, j)))
2545 return 1;
2546 }
2547
2548 return 0;
2549 }
2550
2551 /* Return nonzero if INSN is an indirect jump (aka computed jump).
2552
2553 Tablejumps and casesi insns are not considered indirect jumps;
2554 we can recognize them by a (use (label_ref)). */
2555
2556 int
computed_jump_p(rtx insn)2557 computed_jump_p (rtx insn)
2558 {
2559 int i;
2560 if (JUMP_P (insn))
2561 {
2562 rtx pat = PATTERN (insn);
2563
2564 if (find_reg_note (insn, REG_LABEL, NULL_RTX))
2565 return 0;
2566 else if (GET_CODE (pat) == PARALLEL)
2567 {
2568 int len = XVECLEN (pat, 0);
2569 int has_use_labelref = 0;
2570
2571 for (i = len - 1; i >= 0; i--)
2572 if (GET_CODE (XVECEXP (pat, 0, i)) == USE
2573 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))
2574 == LABEL_REF))
2575 has_use_labelref = 1;
2576
2577 if (! has_use_labelref)
2578 for (i = len - 1; i >= 0; i--)
2579 if (GET_CODE (XVECEXP (pat, 0, i)) == SET
2580 && SET_DEST (XVECEXP (pat, 0, i)) == pc_rtx
2581 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))))
2582 return 1;
2583 }
2584 else if (GET_CODE (pat) == SET
2585 && SET_DEST (pat) == pc_rtx
2586 && computed_jump_p_1 (SET_SRC (pat)))
2587 return 1;
2588 }
2589 return 0;
2590 }
2591
2592 /* Optimized loop of for_each_rtx, trying to avoid useless recursive
2593 calls. Processes the subexpressions of EXP and passes them to F. */
2594 static int
for_each_rtx_1(rtx exp,int n,rtx_function f,void * data)2595 for_each_rtx_1 (rtx exp, int n, rtx_function f, void *data)
2596 {
2597 int result, i, j;
2598 const char *format = GET_RTX_FORMAT (GET_CODE (exp));
2599 rtx *x;
2600
2601 for (; format[n] != '\0'; n++)
2602 {
2603 switch (format[n])
2604 {
2605 case 'e':
2606 /* Call F on X. */
2607 x = &XEXP (exp, n);
2608 result = (*f) (x, data);
2609 if (result == -1)
2610 /* Do not traverse sub-expressions. */
2611 continue;
2612 else if (result != 0)
2613 /* Stop the traversal. */
2614 return result;
2615
2616 if (*x == NULL_RTX)
2617 /* There are no sub-expressions. */
2618 continue;
2619
2620 i = non_rtx_starting_operands[GET_CODE (*x)];
2621 if (i >= 0)
2622 {
2623 result = for_each_rtx_1 (*x, i, f, data);
2624 if (result != 0)
2625 return result;
2626 }
2627 break;
2628
2629 case 'V':
2630 case 'E':
2631 if (XVEC (exp, n) == 0)
2632 continue;
2633 for (j = 0; j < XVECLEN (exp, n); ++j)
2634 {
2635 /* Call F on X. */
2636 x = &XVECEXP (exp, n, j);
2637 result = (*f) (x, data);
2638 if (result == -1)
2639 /* Do not traverse sub-expressions. */
2640 continue;
2641 else if (result != 0)
2642 /* Stop the traversal. */
2643 return result;
2644
2645 if (*x == NULL_RTX)
2646 /* There are no sub-expressions. */
2647 continue;
2648
2649 i = non_rtx_starting_operands[GET_CODE (*x)];
2650 if (i >= 0)
2651 {
2652 result = for_each_rtx_1 (*x, i, f, data);
2653 if (result != 0)
2654 return result;
2655 }
2656 }
2657 break;
2658
2659 default:
2660 /* Nothing to do. */
2661 break;
2662 }
2663 }
2664
2665 return 0;
2666 }
2667
2668 /* Traverse X via depth-first search, calling F for each
2669 sub-expression (including X itself). F is also passed the DATA.
2670 If F returns -1, do not traverse sub-expressions, but continue
2671 traversing the rest of the tree. If F ever returns any other
2672 nonzero value, stop the traversal, and return the value returned
2673 by F. Otherwise, return 0. This function does not traverse inside
2674 tree structure that contains RTX_EXPRs, or into sub-expressions
2675 whose format code is `0' since it is not known whether or not those
2676 codes are actually RTL.
2677
2678 This routine is very general, and could (should?) be used to
2679 implement many of the other routines in this file. */
2680
2681 int
for_each_rtx(rtx * x,rtx_function f,void * data)2682 for_each_rtx (rtx *x, rtx_function f, void *data)
2683 {
2684 int result;
2685 int i;
2686
2687 /* Call F on X. */
2688 result = (*f) (x, data);
2689 if (result == -1)
2690 /* Do not traverse sub-expressions. */
2691 return 0;
2692 else if (result != 0)
2693 /* Stop the traversal. */
2694 return result;
2695
2696 if (*x == NULL_RTX)
2697 /* There are no sub-expressions. */
2698 return 0;
2699
2700 i = non_rtx_starting_operands[GET_CODE (*x)];
2701 if (i < 0)
2702 return 0;
2703
2704 return for_each_rtx_1 (*x, i, f, data);
2705 }
2706
2707
2708 /* Searches X for any reference to REGNO, returning the rtx of the
2709 reference found if any. Otherwise, returns NULL_RTX. */
2710
2711 rtx
regno_use_in(unsigned int regno,rtx x)2712 regno_use_in (unsigned int regno, rtx x)
2713 {
2714 const char *fmt;
2715 int i, j;
2716 rtx tem;
2717
2718 if (REG_P (x) && REGNO (x) == regno)
2719 return x;
2720
2721 fmt = GET_RTX_FORMAT (GET_CODE (x));
2722 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2723 {
2724 if (fmt[i] == 'e')
2725 {
2726 if ((tem = regno_use_in (regno, XEXP (x, i))))
2727 return tem;
2728 }
2729 else if (fmt[i] == 'E')
2730 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
2731 if ((tem = regno_use_in (regno , XVECEXP (x, i, j))))
2732 return tem;
2733 }
2734
2735 return NULL_RTX;
2736 }
2737
2738 /* Return a value indicating whether OP, an operand of a commutative
2739 operation, is preferred as the first or second operand. The higher
2740 the value, the stronger the preference for being the first operand.
2741 We use negative values to indicate a preference for the first operand
2742 and positive values for the second operand. */
2743
2744 int
commutative_operand_precedence(rtx op)2745 commutative_operand_precedence (rtx op)
2746 {
2747 enum rtx_code code = GET_CODE (op);
2748
2749 /* Constants always come the second operand. Prefer "nice" constants. */
2750 if (code == CONST_INT)
2751 return -7;
2752 if (code == CONST_DOUBLE)
2753 return -6;
2754 op = avoid_constant_pool_reference (op);
2755 code = GET_CODE (op);
2756
2757 switch (GET_RTX_CLASS (code))
2758 {
2759 case RTX_CONST_OBJ:
2760 if (code == CONST_INT)
2761 return -5;
2762 if (code == CONST_DOUBLE)
2763 return -4;
2764 return -3;
2765
2766 case RTX_EXTRA:
2767 /* SUBREGs of objects should come second. */
2768 if (code == SUBREG && OBJECT_P (SUBREG_REG (op)))
2769 return -2;
2770
2771 if (!CONSTANT_P (op))
2772 return 0;
2773 else
2774 /* As for RTX_CONST_OBJ. */
2775 return -3;
2776
2777 case RTX_OBJ:
2778 /* Complex expressions should be the first, so decrease priority
2779 of objects. */
2780 return -1;
2781
2782 case RTX_COMM_ARITH:
2783 /* Prefer operands that are themselves commutative to be first.
2784 This helps to make things linear. In particular,
2785 (and (and (reg) (reg)) (not (reg))) is canonical. */
2786 return 4;
2787
2788 case RTX_BIN_ARITH:
2789 /* If only one operand is a binary expression, it will be the first
2790 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
2791 is canonical, although it will usually be further simplified. */
2792 return 2;
2793
2794 case RTX_UNARY:
2795 /* Then prefer NEG and NOT. */
2796 if (code == NEG || code == NOT)
2797 return 1;
2798
2799 default:
2800 return 0;
2801 }
2802 }
2803
2804 /* Return 1 iff it is necessary to swap operands of commutative operation
2805 in order to canonicalize expression. */
2806
2807 int
swap_commutative_operands_p(rtx x,rtx y)2808 swap_commutative_operands_p (rtx x, rtx y)
2809 {
2810 return (commutative_operand_precedence (x)
2811 < commutative_operand_precedence (y));
2812 }
2813
2814 /* Return 1 if X is an autoincrement side effect and the register is
2815 not the stack pointer. */
2816 int
auto_inc_p(rtx x)2817 auto_inc_p (rtx x)
2818 {
2819 switch (GET_CODE (x))
2820 {
2821 case PRE_INC:
2822 case POST_INC:
2823 case PRE_DEC:
2824 case POST_DEC:
2825 case PRE_MODIFY:
2826 case POST_MODIFY:
2827 /* There are no REG_INC notes for SP. */
2828 if (XEXP (x, 0) != stack_pointer_rtx)
2829 return 1;
2830 default:
2831 break;
2832 }
2833 return 0;
2834 }
2835
2836 /* Return nonzero if IN contains a piece of rtl that has the address LOC. */
2837 int
loc_mentioned_in_p(rtx * loc,rtx in)2838 loc_mentioned_in_p (rtx *loc, rtx in)
2839 {
2840 enum rtx_code code = GET_CODE (in);
2841 const char *fmt = GET_RTX_FORMAT (code);
2842 int i, j;
2843
2844 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2845 {
2846 if (loc == &in->u.fld[i].rt_rtx)
2847 return 1;
2848 if (fmt[i] == 'e')
2849 {
2850 if (loc_mentioned_in_p (loc, XEXP (in, i)))
2851 return 1;
2852 }
2853 else if (fmt[i] == 'E')
2854 for (j = XVECLEN (in, i) - 1; j >= 0; j--)
2855 if (loc_mentioned_in_p (loc, XVECEXP (in, i, j)))
2856 return 1;
2857 }
2858 return 0;
2859 }
2860
2861 /* Helper function for subreg_lsb. Given a subreg's OUTER_MODE, INNER_MODE,
2862 and SUBREG_BYTE, return the bit offset where the subreg begins
2863 (counting from the least significant bit of the operand). */
2864
2865 unsigned int
subreg_lsb_1(enum machine_mode outer_mode,enum machine_mode inner_mode,unsigned int subreg_byte)2866 subreg_lsb_1 (enum machine_mode outer_mode,
2867 enum machine_mode inner_mode,
2868 unsigned int subreg_byte)
2869 {
2870 unsigned int bitpos;
2871 unsigned int byte;
2872 unsigned int word;
2873
2874 /* A paradoxical subreg begins at bit position 0. */
2875 if (GET_MODE_BITSIZE (outer_mode) > GET_MODE_BITSIZE (inner_mode))
2876 return 0;
2877
2878 if (WORDS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
2879 /* If the subreg crosses a word boundary ensure that
2880 it also begins and ends on a word boundary. */
2881 gcc_assert (!((subreg_byte % UNITS_PER_WORD
2882 + GET_MODE_SIZE (outer_mode)) > UNITS_PER_WORD
2883 && (subreg_byte % UNITS_PER_WORD
2884 || GET_MODE_SIZE (outer_mode) % UNITS_PER_WORD)));
2885
2886 if (WORDS_BIG_ENDIAN)
2887 word = (GET_MODE_SIZE (inner_mode)
2888 - (subreg_byte + GET_MODE_SIZE (outer_mode))) / UNITS_PER_WORD;
2889 else
2890 word = subreg_byte / UNITS_PER_WORD;
2891 bitpos = word * BITS_PER_WORD;
2892
2893 if (BYTES_BIG_ENDIAN)
2894 byte = (GET_MODE_SIZE (inner_mode)
2895 - (subreg_byte + GET_MODE_SIZE (outer_mode))) % UNITS_PER_WORD;
2896 else
2897 byte = subreg_byte % UNITS_PER_WORD;
2898 bitpos += byte * BITS_PER_UNIT;
2899
2900 return bitpos;
2901 }
2902
2903 /* Given a subreg X, return the bit offset where the subreg begins
2904 (counting from the least significant bit of the reg). */
2905
2906 unsigned int
subreg_lsb(rtx x)2907 subreg_lsb (rtx x)
2908 {
2909 return subreg_lsb_1 (GET_MODE (x), GET_MODE (SUBREG_REG (x)),
2910 SUBREG_BYTE (x));
2911 }
2912
2913 /* This function returns the regno offset of a subreg expression.
2914 xregno - A regno of an inner hard subreg_reg (or what will become one).
2915 xmode - The mode of xregno.
2916 offset - The byte offset.
2917 ymode - The mode of a top level SUBREG (or what may become one).
2918 RETURN - The regno offset which would be used. */
2919 unsigned int
subreg_regno_offset(unsigned int xregno,enum machine_mode xmode,unsigned int offset,enum machine_mode ymode)2920 subreg_regno_offset (unsigned int xregno, enum machine_mode xmode,
2921 unsigned int offset, enum machine_mode ymode)
2922 {
2923 int nregs_xmode, nregs_ymode;
2924 int mode_multiple, nregs_multiple;
2925 int y_offset;
2926
2927 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
2928
2929 /* Adjust nregs_xmode to allow for 'holes'. */
2930 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
2931 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
2932 else
2933 nregs_xmode = hard_regno_nregs[xregno][xmode];
2934
2935 nregs_ymode = hard_regno_nregs[xregno][ymode];
2936
2937 /* If this is a big endian paradoxical subreg, which uses more actual
2938 hard registers than the original register, we must return a negative
2939 offset so that we find the proper highpart of the register. */
2940 if (offset == 0
2941 && nregs_ymode > nregs_xmode
2942 && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
2943 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
2944 return nregs_xmode - nregs_ymode;
2945
2946 if (offset == 0 || nregs_xmode == nregs_ymode)
2947 return 0;
2948
2949 /* Size of ymode must not be greater than the size of xmode. */
2950 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
2951 gcc_assert (mode_multiple != 0);
2952
2953 y_offset = offset / GET_MODE_SIZE (ymode);
2954 nregs_multiple = nregs_xmode / nregs_ymode;
2955 return (y_offset / (mode_multiple / nregs_multiple)) * nregs_ymode;
2956 }
2957
2958 /* This function returns true when the offset is representable via
2959 subreg_offset in the given regno.
2960 xregno - A regno of an inner hard subreg_reg (or what will become one).
2961 xmode - The mode of xregno.
2962 offset - The byte offset.
2963 ymode - The mode of a top level SUBREG (or what may become one).
2964 RETURN - Whether the offset is representable. */
2965 bool
subreg_offset_representable_p(unsigned int xregno,enum machine_mode xmode,unsigned int offset,enum machine_mode ymode)2966 subreg_offset_representable_p (unsigned int xregno, enum machine_mode xmode,
2967 unsigned int offset, enum machine_mode ymode)
2968 {
2969 int nregs_xmode, nregs_ymode;
2970 int mode_multiple, nregs_multiple;
2971 int y_offset;
2972 int regsize_xmode, regsize_ymode;
2973
2974 gcc_assert (xregno < FIRST_PSEUDO_REGISTER);
2975
2976 /* If there are holes in a non-scalar mode in registers, we expect
2977 that it is made up of its units concatenated together. */
2978 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode))
2979 {
2980 enum machine_mode xmode_unit;
2981
2982 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode);
2983 if (GET_MODE_INNER (xmode) == VOIDmode)
2984 xmode_unit = xmode;
2985 else
2986 xmode_unit = GET_MODE_INNER (xmode);
2987 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit));
2988 gcc_assert (nregs_xmode
2989 == (GET_MODE_NUNITS (xmode)
2990 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)));
2991 gcc_assert (hard_regno_nregs[xregno][xmode]
2992 == (hard_regno_nregs[xregno][xmode_unit]
2993 * GET_MODE_NUNITS (xmode)));
2994
2995 /* You can only ask for a SUBREG of a value with holes in the middle
2996 if you don't cross the holes. (Such a SUBREG should be done by
2997 picking a different register class, or doing it in memory if
2998 necessary.) An example of a value with holes is XCmode on 32-bit
2999 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
3000 3 for each part, but in memory it's two 128-bit parts.
3001 Padding is assumed to be at the end (not necessarily the 'high part')
3002 of each unit. */
3003 if ((offset / GET_MODE_SIZE (xmode_unit) + 1
3004 < GET_MODE_NUNITS (xmode))
3005 && (offset / GET_MODE_SIZE (xmode_unit)
3006 != ((offset + GET_MODE_SIZE (ymode) - 1)
3007 / GET_MODE_SIZE (xmode_unit))))
3008 return false;
3009 }
3010 else
3011 nregs_xmode = hard_regno_nregs[xregno][xmode];
3012
3013 nregs_ymode = hard_regno_nregs[xregno][ymode];
3014
3015 /* Paradoxical subregs are otherwise valid. */
3016 if (offset == 0
3017 && nregs_ymode > nregs_xmode
3018 && (GET_MODE_SIZE (ymode) > UNITS_PER_WORD
3019 ? WORDS_BIG_ENDIAN : BYTES_BIG_ENDIAN))
3020 return true;
3021
3022 /* If registers store different numbers of bits in the different
3023 modes, we cannot generally form this subreg. */
3024 regsize_xmode = GET_MODE_SIZE (xmode) / nregs_xmode;
3025 regsize_ymode = GET_MODE_SIZE (ymode) / nregs_ymode;
3026 if (regsize_xmode > regsize_ymode && nregs_ymode > 1)
3027 return false;
3028 if (regsize_ymode > regsize_xmode && nregs_xmode > 1)
3029 return false;
3030
3031 /* Lowpart subregs are otherwise valid. */
3032 if (offset == subreg_lowpart_offset (ymode, xmode))
3033 return true;
3034
3035 /* This should always pass, otherwise we don't know how to verify
3036 the constraint. These conditions may be relaxed but
3037 subreg_regno_offset would need to be redesigned. */
3038 gcc_assert ((GET_MODE_SIZE (xmode) % GET_MODE_SIZE (ymode)) == 0);
3039 gcc_assert ((nregs_xmode % nregs_ymode) == 0);
3040
3041 /* The XMODE value can be seen as a vector of NREGS_XMODE
3042 values. The subreg must represent a lowpart of given field.
3043 Compute what field it is. */
3044 offset -= subreg_lowpart_offset (ymode,
3045 mode_for_size (GET_MODE_BITSIZE (xmode)
3046 / nregs_xmode,
3047 MODE_INT, 0));
3048
3049 /* Size of ymode must not be greater than the size of xmode. */
3050 mode_multiple = GET_MODE_SIZE (xmode) / GET_MODE_SIZE (ymode);
3051 gcc_assert (mode_multiple != 0);
3052
3053 y_offset = offset / GET_MODE_SIZE (ymode);
3054 nregs_multiple = nregs_xmode / nregs_ymode;
3055
3056 gcc_assert ((offset % GET_MODE_SIZE (ymode)) == 0);
3057 gcc_assert ((mode_multiple % nregs_multiple) == 0);
3058
3059 return (!(y_offset % (mode_multiple / nregs_multiple)));
3060 }
3061
3062 /* Return the final regno that a subreg expression refers to. */
3063 unsigned int
subreg_regno(rtx x)3064 subreg_regno (rtx x)
3065 {
3066 unsigned int ret;
3067 rtx subreg = SUBREG_REG (x);
3068 int regno = REGNO (subreg);
3069
3070 ret = regno + subreg_regno_offset (regno,
3071 GET_MODE (subreg),
3072 SUBREG_BYTE (x),
3073 GET_MODE (x));
3074 return ret;
3075
3076 }
3077 struct parms_set_data
3078 {
3079 int nregs;
3080 HARD_REG_SET regs;
3081 };
3082
3083 /* Helper function for noticing stores to parameter registers. */
3084 static void
parms_set(rtx x,rtx pat ATTRIBUTE_UNUSED,void * data)3085 parms_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
3086 {
3087 struct parms_set_data *d = data;
3088 if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER
3089 && TEST_HARD_REG_BIT (d->regs, REGNO (x)))
3090 {
3091 CLEAR_HARD_REG_BIT (d->regs, REGNO (x));
3092 d->nregs--;
3093 }
3094 }
3095
3096 /* Look backward for first parameter to be loaded.
3097 Note that loads of all parameters will not necessarily be
3098 found if CSE has eliminated some of them (e.g., an argument
3099 to the outer function is passed down as a parameter).
3100 Do not skip BOUNDARY. */
3101 rtx
find_first_parameter_load(rtx call_insn,rtx boundary)3102 find_first_parameter_load (rtx call_insn, rtx boundary)
3103 {
3104 struct parms_set_data parm;
3105 rtx p, before, first_set;
3106
3107 /* Since different machines initialize their parameter registers
3108 in different orders, assume nothing. Collect the set of all
3109 parameter registers. */
3110 CLEAR_HARD_REG_SET (parm.regs);
3111 parm.nregs = 0;
3112 for (p = CALL_INSN_FUNCTION_USAGE (call_insn); p; p = XEXP (p, 1))
3113 if (GET_CODE (XEXP (p, 0)) == USE
3114 && REG_P (XEXP (XEXP (p, 0), 0)))
3115 {
3116 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER);
3117
3118 /* We only care about registers which can hold function
3119 arguments. */
3120 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0))))
3121 continue;
3122
3123 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0)));
3124 parm.nregs++;
3125 }
3126 before = call_insn;
3127 first_set = call_insn;
3128
3129 /* Search backward for the first set of a register in this set. */
3130 while (parm.nregs && before != boundary)
3131 {
3132 before = PREV_INSN (before);
3133
3134 /* It is possible that some loads got CSEed from one call to
3135 another. Stop in that case. */
3136 if (CALL_P (before))
3137 break;
3138
3139 /* Our caller needs either ensure that we will find all sets
3140 (in case code has not been optimized yet), or take care
3141 for possible labels in a way by setting boundary to preceding
3142 CODE_LABEL. */
3143 if (LABEL_P (before))
3144 {
3145 gcc_assert (before == boundary);
3146 break;
3147 }
3148
3149 if (INSN_P (before))
3150 {
3151 int nregs_old = parm.nregs;
3152 note_stores (PATTERN (before), parms_set, &parm);
3153 /* If we found something that did not set a parameter reg,
3154 we're done. Do not keep going, as that might result
3155 in hoisting an insn before the setting of a pseudo
3156 that is used by the hoisted insn. */
3157 if (nregs_old != parm.nregs)
3158 first_set = before;
3159 else
3160 break;
3161 }
3162 }
3163 return first_set;
3164 }
3165
3166 /* Return true if we should avoid inserting code between INSN and preceding
3167 call instruction. */
3168
3169 bool
keep_with_call_p(rtx insn)3170 keep_with_call_p (rtx insn)
3171 {
3172 rtx set;
3173
3174 if (INSN_P (insn) && (set = single_set (insn)) != NULL)
3175 {
3176 if (REG_P (SET_DEST (set))
3177 && REGNO (SET_DEST (set)) < FIRST_PSEUDO_REGISTER
3178 && fixed_regs[REGNO (SET_DEST (set))]
3179 && general_operand (SET_SRC (set), VOIDmode))
3180 return true;
3181 if (REG_P (SET_SRC (set))
3182 && FUNCTION_VALUE_REGNO_P (REGNO (SET_SRC (set)))
3183 && REG_P (SET_DEST (set))
3184 && REGNO (SET_DEST (set)) >= FIRST_PSEUDO_REGISTER)
3185 return true;
3186 /* There may be a stack pop just after the call and before the store
3187 of the return register. Search for the actual store when deciding
3188 if we can break or not. */
3189 if (SET_DEST (set) == stack_pointer_rtx)
3190 {
3191 rtx i2 = next_nonnote_insn (insn);
3192 if (i2 && keep_with_call_p (i2))
3193 return true;
3194 }
3195 }
3196 return false;
3197 }
3198
3199 /* Return true if LABEL is a target of JUMP_INSN. This applies only
3200 to non-complex jumps. That is, direct unconditional, conditional,
3201 and tablejumps, but not computed jumps or returns. It also does
3202 not apply to the fallthru case of a conditional jump. */
3203
3204 bool
label_is_jump_target_p(rtx label,rtx jump_insn)3205 label_is_jump_target_p (rtx label, rtx jump_insn)
3206 {
3207 rtx tmp = JUMP_LABEL (jump_insn);
3208
3209 if (label == tmp)
3210 return true;
3211
3212 if (tablejump_p (jump_insn, NULL, &tmp))
3213 {
3214 rtvec vec = XVEC (PATTERN (tmp),
3215 GET_CODE (PATTERN (tmp)) == ADDR_DIFF_VEC);
3216 int i, veclen = GET_NUM_ELEM (vec);
3217
3218 for (i = 0; i < veclen; ++i)
3219 if (XEXP (RTVEC_ELT (vec, i), 0) == label)
3220 return true;
3221 }
3222
3223 return false;
3224 }
3225
3226
3227 /* Return an estimate of the cost of computing rtx X.
3228 One use is in cse, to decide which expression to keep in the hash table.
3229 Another is in rtl generation, to pick the cheapest way to multiply.
3230 Other uses like the latter are expected in the future. */
3231
3232 int
rtx_cost(rtx x,enum rtx_code outer_code ATTRIBUTE_UNUSED)3233 rtx_cost (rtx x, enum rtx_code outer_code ATTRIBUTE_UNUSED)
3234 {
3235 int i, j;
3236 enum rtx_code code;
3237 const char *fmt;
3238 int total;
3239
3240 if (x == 0)
3241 return 0;
3242
3243 /* Compute the default costs of certain things.
3244 Note that targetm.rtx_costs can override the defaults. */
3245
3246 code = GET_CODE (x);
3247 switch (code)
3248 {
3249 case MULT:
3250 total = COSTS_N_INSNS (5);
3251 break;
3252 case DIV:
3253 case UDIV:
3254 case MOD:
3255 case UMOD:
3256 total = COSTS_N_INSNS (7);
3257 break;
3258 case USE:
3259 /* Used in combine.c as a marker. */
3260 total = 0;
3261 break;
3262 default:
3263 total = COSTS_N_INSNS (1);
3264 }
3265
3266 switch (code)
3267 {
3268 case REG:
3269 return 0;
3270
3271 case SUBREG:
3272 total = 0;
3273 /* If we can't tie these modes, make this expensive. The larger
3274 the mode, the more expensive it is. */
3275 if (! MODES_TIEABLE_P (GET_MODE (x), GET_MODE (SUBREG_REG (x))))
3276 return COSTS_N_INSNS (2
3277 + GET_MODE_SIZE (GET_MODE (x)) / UNITS_PER_WORD);
3278 break;
3279
3280 default:
3281 if (targetm.rtx_costs (x, code, outer_code, &total))
3282 return total;
3283 break;
3284 }
3285
3286 /* Sum the costs of the sub-rtx's, plus cost of this operation,
3287 which is already in total. */
3288
3289 fmt = GET_RTX_FORMAT (code);
3290 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
3291 if (fmt[i] == 'e')
3292 total += rtx_cost (XEXP (x, i), code);
3293 else if (fmt[i] == 'E')
3294 for (j = 0; j < XVECLEN (x, i); j++)
3295 total += rtx_cost (XVECEXP (x, i, j), code);
3296
3297 return total;
3298 }
3299
3300 /* Return cost of address expression X.
3301 Expect that X is properly formed address reference. */
3302
3303 int
address_cost(rtx x,enum machine_mode mode)3304 address_cost (rtx x, enum machine_mode mode)
3305 {
3306 /* We may be asked for cost of various unusual addresses, such as operands
3307 of push instruction. It is not worthwhile to complicate writing
3308 of the target hook by such cases. */
3309
3310 if (!memory_address_p (mode, x))
3311 return 1000;
3312
3313 return targetm.address_cost (x);
3314 }
3315
3316 /* If the target doesn't override, compute the cost as with arithmetic. */
3317
3318 int
default_address_cost(rtx x)3319 default_address_cost (rtx x)
3320 {
3321 return rtx_cost (x, MEM);
3322 }
3323
3324
3325 unsigned HOST_WIDE_INT
nonzero_bits(rtx x,enum machine_mode mode)3326 nonzero_bits (rtx x, enum machine_mode mode)
3327 {
3328 return cached_nonzero_bits (x, mode, NULL_RTX, VOIDmode, 0);
3329 }
3330
3331 unsigned int
num_sign_bit_copies(rtx x,enum machine_mode mode)3332 num_sign_bit_copies (rtx x, enum machine_mode mode)
3333 {
3334 return cached_num_sign_bit_copies (x, mode, NULL_RTX, VOIDmode, 0);
3335 }
3336
3337 /* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
3338 It avoids exponential behavior in nonzero_bits1 when X has
3339 identical subexpressions on the first or the second level. */
3340
3341 static unsigned HOST_WIDE_INT
cached_nonzero_bits(rtx x,enum machine_mode mode,rtx known_x,enum machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)3342 cached_nonzero_bits (rtx x, enum machine_mode mode, rtx known_x,
3343 enum machine_mode known_mode,
3344 unsigned HOST_WIDE_INT known_ret)
3345 {
3346 if (x == known_x && mode == known_mode)
3347 return known_ret;
3348
3349 /* Try to find identical subexpressions. If found call
3350 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
3351 precomputed value for the subexpression as KNOWN_RET. */
3352
3353 if (ARITHMETIC_P (x))
3354 {
3355 rtx x0 = XEXP (x, 0);
3356 rtx x1 = XEXP (x, 1);
3357
3358 /* Check the first level. */
3359 if (x0 == x1)
3360 return nonzero_bits1 (x, mode, x0, mode,
3361 cached_nonzero_bits (x0, mode, known_x,
3362 known_mode, known_ret));
3363
3364 /* Check the second level. */
3365 if (ARITHMETIC_P (x0)
3366 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3367 return nonzero_bits1 (x, mode, x1, mode,
3368 cached_nonzero_bits (x1, mode, known_x,
3369 known_mode, known_ret));
3370
3371 if (ARITHMETIC_P (x1)
3372 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3373 return nonzero_bits1 (x, mode, x0, mode,
3374 cached_nonzero_bits (x0, mode, known_x,
3375 known_mode, known_ret));
3376 }
3377
3378 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
3379 }
3380
3381 /* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
3382 We don't let nonzero_bits recur into num_sign_bit_copies, because that
3383 is less useful. We can't allow both, because that results in exponential
3384 run time recursion. There is a nullstone testcase that triggered
3385 this. This macro avoids accidental uses of num_sign_bit_copies. */
3386 #define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
3387
3388 /* Given an expression, X, compute which bits in X can be nonzero.
3389 We don't care about bits outside of those defined in MODE.
3390
3391 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
3392 an arithmetic operation, we can do better. */
3393
3394 static unsigned HOST_WIDE_INT
nonzero_bits1(rtx x,enum machine_mode mode,rtx known_x,enum machine_mode known_mode,unsigned HOST_WIDE_INT known_ret)3395 nonzero_bits1 (rtx x, enum machine_mode mode, rtx known_x,
3396 enum machine_mode known_mode,
3397 unsigned HOST_WIDE_INT known_ret)
3398 {
3399 unsigned HOST_WIDE_INT nonzero = GET_MODE_MASK (mode);
3400 unsigned HOST_WIDE_INT inner_nz;
3401 enum rtx_code code;
3402 unsigned int mode_width = GET_MODE_BITSIZE (mode);
3403
3404 /* For floating-point values, assume all bits are needed. */
3405 if (FLOAT_MODE_P (GET_MODE (x)) || FLOAT_MODE_P (mode))
3406 return nonzero;
3407
3408 /* If X is wider than MODE, use its mode instead. */
3409 if (GET_MODE_BITSIZE (GET_MODE (x)) > mode_width)
3410 {
3411 mode = GET_MODE (x);
3412 nonzero = GET_MODE_MASK (mode);
3413 mode_width = GET_MODE_BITSIZE (mode);
3414 }
3415
3416 if (mode_width > HOST_BITS_PER_WIDE_INT)
3417 /* Our only callers in this case look for single bit values. So
3418 just return the mode mask. Those tests will then be false. */
3419 return nonzero;
3420
3421 #ifndef WORD_REGISTER_OPERATIONS
3422 /* If MODE is wider than X, but both are a single word for both the host
3423 and target machines, we can compute this from which bits of the
3424 object might be nonzero in its own mode, taking into account the fact
3425 that on many CISC machines, accessing an object in a wider mode
3426 causes the high-order bits to become undefined. So they are
3427 not known to be zero. */
3428
3429 if (GET_MODE (x) != VOIDmode && GET_MODE (x) != mode
3430 && GET_MODE_BITSIZE (GET_MODE (x)) <= BITS_PER_WORD
3431 && GET_MODE_BITSIZE (GET_MODE (x)) <= HOST_BITS_PER_WIDE_INT
3432 && GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (GET_MODE (x)))
3433 {
3434 nonzero &= cached_nonzero_bits (x, GET_MODE (x),
3435 known_x, known_mode, known_ret);
3436 nonzero |= GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x));
3437 return nonzero;
3438 }
3439 #endif
3440
3441 code = GET_CODE (x);
3442 switch (code)
3443 {
3444 case REG:
3445 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3446 /* If pointers extend unsigned and this is a pointer in Pmode, say that
3447 all the bits above ptr_mode are known to be zero. */
3448 if (POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
3449 && REG_POINTER (x))
3450 nonzero &= GET_MODE_MASK (ptr_mode);
3451 #endif
3452
3453 /* Include declared information about alignment of pointers. */
3454 /* ??? We don't properly preserve REG_POINTER changes across
3455 pointer-to-integer casts, so we can't trust it except for
3456 things that we know must be pointers. See execute/960116-1.c. */
3457 if ((x == stack_pointer_rtx
3458 || x == frame_pointer_rtx
3459 || x == arg_pointer_rtx)
3460 && REGNO_POINTER_ALIGN (REGNO (x)))
3461 {
3462 unsigned HOST_WIDE_INT alignment
3463 = REGNO_POINTER_ALIGN (REGNO (x)) / BITS_PER_UNIT;
3464
3465 #ifdef PUSH_ROUNDING
3466 /* If PUSH_ROUNDING is defined, it is possible for the
3467 stack to be momentarily aligned only to that amount,
3468 so we pick the least alignment. */
3469 if (x == stack_pointer_rtx && PUSH_ARGS)
3470 alignment = MIN ((unsigned HOST_WIDE_INT) PUSH_ROUNDING (1),
3471 alignment);
3472 #endif
3473
3474 nonzero &= ~(alignment - 1);
3475 }
3476
3477 {
3478 unsigned HOST_WIDE_INT nonzero_for_hook = nonzero;
3479 rtx new = rtl_hooks.reg_nonzero_bits (x, mode, known_x,
3480 known_mode, known_ret,
3481 &nonzero_for_hook);
3482
3483 if (new)
3484 nonzero_for_hook &= cached_nonzero_bits (new, mode, known_x,
3485 known_mode, known_ret);
3486
3487 return nonzero_for_hook;
3488 }
3489
3490 case CONST_INT:
3491 #ifdef SHORT_IMMEDIATES_SIGN_EXTEND
3492 /* If X is negative in MODE, sign-extend the value. */
3493 if (INTVAL (x) > 0 && mode_width < BITS_PER_WORD
3494 && 0 != (INTVAL (x) & ((HOST_WIDE_INT) 1 << (mode_width - 1))))
3495 return (INTVAL (x) | ((HOST_WIDE_INT) (-1) << mode_width));
3496 #endif
3497
3498 return INTVAL (x);
3499
3500 case MEM:
3501 #ifdef LOAD_EXTEND_OP
3502 /* In many, if not most, RISC machines, reading a byte from memory
3503 zeros the rest of the register. Noticing that fact saves a lot
3504 of extra zero-extends. */
3505 if (LOAD_EXTEND_OP (GET_MODE (x)) == ZERO_EXTEND)
3506 nonzero &= GET_MODE_MASK (GET_MODE (x));
3507 #endif
3508 break;
3509
3510 case EQ: case NE:
3511 case UNEQ: case LTGT:
3512 case GT: case GTU: case UNGT:
3513 case LT: case LTU: case UNLT:
3514 case GE: case GEU: case UNGE:
3515 case LE: case LEU: case UNLE:
3516 case UNORDERED: case ORDERED:
3517 /* If this produces an integer result, we know which bits are set.
3518 Code here used to clear bits outside the mode of X, but that is
3519 now done above. */
3520 /* Mind that MODE is the mode the caller wants to look at this
3521 operation in, and not the actual operation mode. We can wind
3522 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
3523 that describes the results of a vector compare. */
3524 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_INT
3525 && mode_width <= HOST_BITS_PER_WIDE_INT)
3526 nonzero = STORE_FLAG_VALUE;
3527 break;
3528
3529 case NEG:
3530 #if 0
3531 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3532 and num_sign_bit_copies. */
3533 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3534 == GET_MODE_BITSIZE (GET_MODE (x)))
3535 nonzero = 1;
3536 #endif
3537
3538 if (GET_MODE_SIZE (GET_MODE (x)) < mode_width)
3539 nonzero |= (GET_MODE_MASK (mode) & ~GET_MODE_MASK (GET_MODE (x)));
3540 break;
3541
3542 case ABS:
3543 #if 0
3544 /* Disabled to avoid exponential mutual recursion between nonzero_bits
3545 and num_sign_bit_copies. */
3546 if (num_sign_bit_copies (XEXP (x, 0), GET_MODE (x))
3547 == GET_MODE_BITSIZE (GET_MODE (x)))
3548 nonzero = 1;
3549 #endif
3550 break;
3551
3552 case TRUNCATE:
3553 nonzero &= (cached_nonzero_bits (XEXP (x, 0), mode,
3554 known_x, known_mode, known_ret)
3555 & GET_MODE_MASK (mode));
3556 break;
3557
3558 case ZERO_EXTEND:
3559 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3560 known_x, known_mode, known_ret);
3561 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3562 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3563 break;
3564
3565 case SIGN_EXTEND:
3566 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
3567 Otherwise, show all the bits in the outer mode but not the inner
3568 may be nonzero. */
3569 inner_nz = cached_nonzero_bits (XEXP (x, 0), mode,
3570 known_x, known_mode, known_ret);
3571 if (GET_MODE (XEXP (x, 0)) != VOIDmode)
3572 {
3573 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)));
3574 if (inner_nz
3575 & (((HOST_WIDE_INT) 1
3576 << (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0))) - 1))))
3577 inner_nz |= (GET_MODE_MASK (mode)
3578 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0))));
3579 }
3580
3581 nonzero &= inner_nz;
3582 break;
3583
3584 case AND:
3585 nonzero &= cached_nonzero_bits (XEXP (x, 0), mode,
3586 known_x, known_mode, known_ret)
3587 & cached_nonzero_bits (XEXP (x, 1), mode,
3588 known_x, known_mode, known_ret);
3589 break;
3590
3591 case XOR: case IOR:
3592 case UMIN: case UMAX: case SMIN: case SMAX:
3593 {
3594 unsigned HOST_WIDE_INT nonzero0 =
3595 cached_nonzero_bits (XEXP (x, 0), mode,
3596 known_x, known_mode, known_ret);
3597
3598 /* Don't call nonzero_bits for the second time if it cannot change
3599 anything. */
3600 if ((nonzero & nonzero0) != nonzero)
3601 nonzero &= nonzero0
3602 | cached_nonzero_bits (XEXP (x, 1), mode,
3603 known_x, known_mode, known_ret);
3604 }
3605 break;
3606
3607 case PLUS: case MINUS:
3608 case MULT:
3609 case DIV: case UDIV:
3610 case MOD: case UMOD:
3611 /* We can apply the rules of arithmetic to compute the number of
3612 high- and low-order zero bits of these operations. We start by
3613 computing the width (position of the highest-order nonzero bit)
3614 and the number of low-order zero bits for each value. */
3615 {
3616 unsigned HOST_WIDE_INT nz0 =
3617 cached_nonzero_bits (XEXP (x, 0), mode,
3618 known_x, known_mode, known_ret);
3619 unsigned HOST_WIDE_INT nz1 =
3620 cached_nonzero_bits (XEXP (x, 1), mode,
3621 known_x, known_mode, known_ret);
3622 int sign_index = GET_MODE_BITSIZE (GET_MODE (x)) - 1;
3623 int width0 = floor_log2 (nz0) + 1;
3624 int width1 = floor_log2 (nz1) + 1;
3625 int low0 = floor_log2 (nz0 & -nz0);
3626 int low1 = floor_log2 (nz1 & -nz1);
3627 HOST_WIDE_INT op0_maybe_minusp
3628 = (nz0 & ((HOST_WIDE_INT) 1 << sign_index));
3629 HOST_WIDE_INT op1_maybe_minusp
3630 = (nz1 & ((HOST_WIDE_INT) 1 << sign_index));
3631 unsigned int result_width = mode_width;
3632 int result_low = 0;
3633
3634 switch (code)
3635 {
3636 case PLUS:
3637 result_width = MAX (width0, width1) + 1;
3638 result_low = MIN (low0, low1);
3639 break;
3640 case MINUS:
3641 result_low = MIN (low0, low1);
3642 break;
3643 case MULT:
3644 result_width = width0 + width1;
3645 result_low = low0 + low1;
3646 break;
3647 case DIV:
3648 if (width1 == 0)
3649 break;
3650 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3651 result_width = width0;
3652 break;
3653 case UDIV:
3654 if (width1 == 0)
3655 break;
3656 result_width = width0;
3657 break;
3658 case MOD:
3659 if (width1 == 0)
3660 break;
3661 if (! op0_maybe_minusp && ! op1_maybe_minusp)
3662 result_width = MIN (width0, width1);
3663 result_low = MIN (low0, low1);
3664 break;
3665 case UMOD:
3666 if (width1 == 0)
3667 break;
3668 result_width = MIN (width0, width1);
3669 result_low = MIN (low0, low1);
3670 break;
3671 default:
3672 gcc_unreachable ();
3673 }
3674
3675 if (result_width < mode_width)
3676 nonzero &= ((HOST_WIDE_INT) 1 << result_width) - 1;
3677
3678 if (result_low > 0)
3679 nonzero &= ~(((HOST_WIDE_INT) 1 << result_low) - 1);
3680
3681 #ifdef POINTERS_EXTEND_UNSIGNED
3682 /* If pointers extend unsigned and this is an addition or subtraction
3683 to a pointer in Pmode, all the bits above ptr_mode are known to be
3684 zero. */
3685 if (POINTERS_EXTEND_UNSIGNED > 0 && GET_MODE (x) == Pmode
3686 && (code == PLUS || code == MINUS)
3687 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
3688 nonzero &= GET_MODE_MASK (ptr_mode);
3689 #endif
3690 }
3691 break;
3692
3693 case ZERO_EXTRACT:
3694 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3695 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3696 nonzero &= ((HOST_WIDE_INT) 1 << INTVAL (XEXP (x, 1))) - 1;
3697 break;
3698
3699 case SUBREG:
3700 /* If this is a SUBREG formed for a promoted variable that has
3701 been zero-extended, we know that at least the high-order bits
3702 are zero, though others might be too. */
3703
3704 if (SUBREG_PROMOTED_VAR_P (x) && SUBREG_PROMOTED_UNSIGNED_P (x) > 0)
3705 nonzero = GET_MODE_MASK (GET_MODE (x))
3706 & cached_nonzero_bits (SUBREG_REG (x), GET_MODE (x),
3707 known_x, known_mode, known_ret);
3708
3709 /* If the inner mode is a single word for both the host and target
3710 machines, we can compute this from which bits of the inner
3711 object might be nonzero. */
3712 if (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) <= BITS_PER_WORD
3713 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
3714 <= HOST_BITS_PER_WIDE_INT))
3715 {
3716 nonzero &= cached_nonzero_bits (SUBREG_REG (x), mode,
3717 known_x, known_mode, known_ret);
3718
3719 #if defined (WORD_REGISTER_OPERATIONS) && defined (LOAD_EXTEND_OP)
3720 /* If this is a typical RISC machine, we only have to worry
3721 about the way loads are extended. */
3722 if ((LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
3723 ? (((nonzero
3724 & (((unsigned HOST_WIDE_INT) 1
3725 << (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))) - 1))))
3726 != 0))
3727 : LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) != ZERO_EXTEND)
3728 || !MEM_P (SUBREG_REG (x)))
3729 #endif
3730 {
3731 /* On many CISC machines, accessing an object in a wider mode
3732 causes the high-order bits to become undefined. So they are
3733 not known to be zero. */
3734 if (GET_MODE_SIZE (GET_MODE (x))
3735 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
3736 nonzero |= (GET_MODE_MASK (GET_MODE (x))
3737 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x))));
3738 }
3739 }
3740 break;
3741
3742 case ASHIFTRT:
3743 case LSHIFTRT:
3744 case ASHIFT:
3745 case ROTATE:
3746 /* The nonzero bits are in two classes: any bits within MODE
3747 that aren't in GET_MODE (x) are always significant. The rest of the
3748 nonzero bits are those that are significant in the operand of
3749 the shift when shifted the appropriate number of bits. This
3750 shows that high-order bits are cleared by the right shift and
3751 low-order bits by left shifts. */
3752 if (GET_CODE (XEXP (x, 1)) == CONST_INT
3753 && INTVAL (XEXP (x, 1)) >= 0
3754 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
3755 {
3756 enum machine_mode inner_mode = GET_MODE (x);
3757 unsigned int width = GET_MODE_BITSIZE (inner_mode);
3758 int count = INTVAL (XEXP (x, 1));
3759 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (inner_mode);
3760 unsigned HOST_WIDE_INT op_nonzero =
3761 cached_nonzero_bits (XEXP (x, 0), mode,
3762 known_x, known_mode, known_ret);
3763 unsigned HOST_WIDE_INT inner = op_nonzero & mode_mask;
3764 unsigned HOST_WIDE_INT outer = 0;
3765
3766 if (mode_width > width)
3767 outer = (op_nonzero & nonzero & ~mode_mask);
3768
3769 if (code == LSHIFTRT)
3770 inner >>= count;
3771 else if (code == ASHIFTRT)
3772 {
3773 inner >>= count;
3774
3775 /* If the sign bit may have been nonzero before the shift, we
3776 need to mark all the places it could have been copied to
3777 by the shift as possibly nonzero. */
3778 if (inner & ((HOST_WIDE_INT) 1 << (width - 1 - count)))
3779 inner |= (((HOST_WIDE_INT) 1 << count) - 1) << (width - count);
3780 }
3781 else if (code == ASHIFT)
3782 inner <<= count;
3783 else
3784 inner = ((inner << (count % width)
3785 | (inner >> (width - (count % width)))) & mode_mask);
3786
3787 nonzero &= (outer | inner);
3788 }
3789 break;
3790
3791 case FFS:
3792 case POPCOUNT:
3793 /* This is at most the number of bits in the mode. */
3794 nonzero = ((HOST_WIDE_INT) 2 << (floor_log2 (mode_width))) - 1;
3795 break;
3796
3797 case CLZ:
3798 /* If CLZ has a known value at zero, then the nonzero bits are
3799 that value, plus the number of bits in the mode minus one. */
3800 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3801 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3802 else
3803 nonzero = -1;
3804 break;
3805
3806 case CTZ:
3807 /* If CTZ has a known value at zero, then the nonzero bits are
3808 that value, plus the number of bits in the mode minus one. */
3809 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero))
3810 nonzero |= ((HOST_WIDE_INT) 1 << (floor_log2 (mode_width))) - 1;
3811 else
3812 nonzero = -1;
3813 break;
3814
3815 case PARITY:
3816 nonzero = 1;
3817 break;
3818
3819 case IF_THEN_ELSE:
3820 {
3821 unsigned HOST_WIDE_INT nonzero_true =
3822 cached_nonzero_bits (XEXP (x, 1), mode,
3823 known_x, known_mode, known_ret);
3824
3825 /* Don't call nonzero_bits for the second time if it cannot change
3826 anything. */
3827 if ((nonzero & nonzero_true) != nonzero)
3828 nonzero &= nonzero_true
3829 | cached_nonzero_bits (XEXP (x, 2), mode,
3830 known_x, known_mode, known_ret);
3831 }
3832 break;
3833
3834 default:
3835 break;
3836 }
3837
3838 return nonzero;
3839 }
3840
3841 /* See the macro definition above. */
3842 #undef cached_num_sign_bit_copies
3843
3844
3845 /* The function cached_num_sign_bit_copies is a wrapper around
3846 num_sign_bit_copies1. It avoids exponential behavior in
3847 num_sign_bit_copies1 when X has identical subexpressions on the
3848 first or the second level. */
3849
3850 static unsigned int
cached_num_sign_bit_copies(rtx x,enum machine_mode mode,rtx known_x,enum machine_mode known_mode,unsigned int known_ret)3851 cached_num_sign_bit_copies (rtx x, enum machine_mode mode, rtx known_x,
3852 enum machine_mode known_mode,
3853 unsigned int known_ret)
3854 {
3855 if (x == known_x && mode == known_mode)
3856 return known_ret;
3857
3858 /* Try to find identical subexpressions. If found call
3859 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
3860 the precomputed value for the subexpression as KNOWN_RET. */
3861
3862 if (ARITHMETIC_P (x))
3863 {
3864 rtx x0 = XEXP (x, 0);
3865 rtx x1 = XEXP (x, 1);
3866
3867 /* Check the first level. */
3868 if (x0 == x1)
3869 return
3870 num_sign_bit_copies1 (x, mode, x0, mode,
3871 cached_num_sign_bit_copies (x0, mode, known_x,
3872 known_mode,
3873 known_ret));
3874
3875 /* Check the second level. */
3876 if (ARITHMETIC_P (x0)
3877 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
3878 return
3879 num_sign_bit_copies1 (x, mode, x1, mode,
3880 cached_num_sign_bit_copies (x1, mode, known_x,
3881 known_mode,
3882 known_ret));
3883
3884 if (ARITHMETIC_P (x1)
3885 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
3886 return
3887 num_sign_bit_copies1 (x, mode, x0, mode,
3888 cached_num_sign_bit_copies (x0, mode, known_x,
3889 known_mode,
3890 known_ret));
3891 }
3892
3893 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
3894 }
3895
3896 /* Return the number of bits at the high-order end of X that are known to
3897 be equal to the sign bit. X will be used in mode MODE; if MODE is
3898 VOIDmode, X will be used in its own mode. The returned value will always
3899 be between 1 and the number of bits in MODE. */
3900
3901 static unsigned int
num_sign_bit_copies1(rtx x,enum machine_mode mode,rtx known_x,enum machine_mode known_mode,unsigned int known_ret)3902 num_sign_bit_copies1 (rtx x, enum machine_mode mode, rtx known_x,
3903 enum machine_mode known_mode,
3904 unsigned int known_ret)
3905 {
3906 enum rtx_code code = GET_CODE (x);
3907 unsigned int bitwidth = GET_MODE_BITSIZE (mode);
3908 int num0, num1, result;
3909 unsigned HOST_WIDE_INT nonzero;
3910
3911 /* If we weren't given a mode, use the mode of X. If the mode is still
3912 VOIDmode, we don't know anything. Likewise if one of the modes is
3913 floating-point. */
3914
3915 if (mode == VOIDmode)
3916 mode = GET_MODE (x);
3917
3918 if (mode == VOIDmode || FLOAT_MODE_P (mode) || FLOAT_MODE_P (GET_MODE (x)))
3919 return 1;
3920
3921 /* For a smaller object, just ignore the high bits. */
3922 if (bitwidth < GET_MODE_BITSIZE (GET_MODE (x)))
3923 {
3924 num0 = cached_num_sign_bit_copies (x, GET_MODE (x),
3925 known_x, known_mode, known_ret);
3926 return MAX (1,
3927 num0 - (int) (GET_MODE_BITSIZE (GET_MODE (x)) - bitwidth));
3928 }
3929
3930 if (GET_MODE (x) != VOIDmode && bitwidth > GET_MODE_BITSIZE (GET_MODE (x)))
3931 {
3932 #ifndef WORD_REGISTER_OPERATIONS
3933 /* If this machine does not do all register operations on the entire
3934 register and MODE is wider than the mode of X, we can say nothing
3935 at all about the high-order bits. */
3936 return 1;
3937 #else
3938 /* Likewise on machines that do, if the mode of the object is smaller
3939 than a word and loads of that size don't sign extend, we can say
3940 nothing about the high order bits. */
3941 if (GET_MODE_BITSIZE (GET_MODE (x)) < BITS_PER_WORD
3942 #ifdef LOAD_EXTEND_OP
3943 && LOAD_EXTEND_OP (GET_MODE (x)) != SIGN_EXTEND
3944 #endif
3945 )
3946 return 1;
3947 #endif
3948 }
3949
3950 switch (code)
3951 {
3952 case REG:
3953
3954 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
3955 /* If pointers extend signed and this is a pointer in Pmode, say that
3956 all the bits above ptr_mode are known to be sign bit copies. */
3957 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode && mode == Pmode
3958 && REG_POINTER (x))
3959 return GET_MODE_BITSIZE (Pmode) - GET_MODE_BITSIZE (ptr_mode) + 1;
3960 #endif
3961
3962 {
3963 unsigned int copies_for_hook = 1, copies = 1;
3964 rtx new = rtl_hooks.reg_num_sign_bit_copies (x, mode, known_x,
3965 known_mode, known_ret,
3966 &copies_for_hook);
3967
3968 if (new)
3969 copies = cached_num_sign_bit_copies (new, mode, known_x,
3970 known_mode, known_ret);
3971
3972 if (copies > 1 || copies_for_hook > 1)
3973 return MAX (copies, copies_for_hook);
3974
3975 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
3976 }
3977 break;
3978
3979 case MEM:
3980 #ifdef LOAD_EXTEND_OP
3981 /* Some RISC machines sign-extend all loads of smaller than a word. */
3982 if (LOAD_EXTEND_OP (GET_MODE (x)) == SIGN_EXTEND)
3983 return MAX (1, ((int) bitwidth
3984 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1));
3985 #endif
3986 break;
3987
3988 case CONST_INT:
3989 /* If the constant is negative, take its 1's complement and remask.
3990 Then see how many zero bits we have. */
3991 nonzero = INTVAL (x) & GET_MODE_MASK (mode);
3992 if (bitwidth <= HOST_BITS_PER_WIDE_INT
3993 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
3994 nonzero = (~nonzero) & GET_MODE_MASK (mode);
3995
3996 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
3997
3998 case SUBREG:
3999 /* If this is a SUBREG for a promoted object that is sign-extended
4000 and we are looking at it in a wider mode, we know that at least the
4001 high-order bits are known to be sign bit copies. */
4002
4003 if (SUBREG_PROMOTED_VAR_P (x) && ! SUBREG_PROMOTED_UNSIGNED_P (x))
4004 {
4005 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4006 known_x, known_mode, known_ret);
4007 return MAX ((int) bitwidth
4008 - (int) GET_MODE_BITSIZE (GET_MODE (x)) + 1,
4009 num0);
4010 }
4011
4012 /* For a smaller object, just ignore the high bits. */
4013 if (bitwidth <= GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x))))
4014 {
4015 num0 = cached_num_sign_bit_copies (SUBREG_REG (x), VOIDmode,
4016 known_x, known_mode, known_ret);
4017 return MAX (1, (num0
4018 - (int) (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (x)))
4019 - bitwidth)));
4020 }
4021
4022 #ifdef WORD_REGISTER_OPERATIONS
4023 #ifdef LOAD_EXTEND_OP
4024 /* For paradoxical SUBREGs on machines where all register operations
4025 affect the entire register, just look inside. Note that we are
4026 passing MODE to the recursive call, so the number of sign bit copies
4027 will remain relative to that mode, not the inner mode. */
4028
4029 /* This works only if loads sign extend. Otherwise, if we get a
4030 reload for the inner part, it may be loaded from the stack, and
4031 then we lose all sign bit copies that existed before the store
4032 to the stack. */
4033
4034 if ((GET_MODE_SIZE (GET_MODE (x))
4035 > GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))
4036 && LOAD_EXTEND_OP (GET_MODE (SUBREG_REG (x))) == SIGN_EXTEND
4037 && MEM_P (SUBREG_REG (x)))
4038 return cached_num_sign_bit_copies (SUBREG_REG (x), mode,
4039 known_x, known_mode, known_ret);
4040 #endif
4041 #endif
4042 break;
4043
4044 case SIGN_EXTRACT:
4045 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4046 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)));
4047 break;
4048
4049 case SIGN_EXTEND:
4050 return (bitwidth - GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4051 + cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4052 known_x, known_mode, known_ret));
4053
4054 case TRUNCATE:
4055 /* For a smaller object, just ignore the high bits. */
4056 num0 = cached_num_sign_bit_copies (XEXP (x, 0), VOIDmode,
4057 known_x, known_mode, known_ret);
4058 return MAX (1, (num0 - (int) (GET_MODE_BITSIZE (GET_MODE (XEXP (x, 0)))
4059 - bitwidth)));
4060
4061 case NOT:
4062 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4063 known_x, known_mode, known_ret);
4064
4065 case ROTATE: case ROTATERT:
4066 /* If we are rotating left by a number of bits less than the number
4067 of sign bit copies, we can just subtract that amount from the
4068 number. */
4069 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4070 && INTVAL (XEXP (x, 1)) >= 0
4071 && INTVAL (XEXP (x, 1)) < (int) bitwidth)
4072 {
4073 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4074 known_x, known_mode, known_ret);
4075 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))
4076 : (int) bitwidth - INTVAL (XEXP (x, 1))));
4077 }
4078 break;
4079
4080 case NEG:
4081 /* In general, this subtracts one sign bit copy. But if the value
4082 is known to be positive, the number of sign bit copies is the
4083 same as that of the input. Finally, if the input has just one bit
4084 that might be nonzero, all the bits are copies of the sign bit. */
4085 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4086 known_x, known_mode, known_ret);
4087 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4088 return num0 > 1 ? num0 - 1 : 1;
4089
4090 nonzero = nonzero_bits (XEXP (x, 0), mode);
4091 if (nonzero == 1)
4092 return bitwidth;
4093
4094 if (num0 > 1
4095 && (((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero))
4096 num0--;
4097
4098 return num0;
4099
4100 case IOR: case AND: case XOR:
4101 case SMIN: case SMAX: case UMIN: case UMAX:
4102 /* Logical operations will preserve the number of sign-bit copies.
4103 MIN and MAX operations always return one of the operands. */
4104 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4105 known_x, known_mode, known_ret);
4106 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4107 known_x, known_mode, known_ret);
4108 return MIN (num0, num1);
4109
4110 case PLUS: case MINUS:
4111 /* For addition and subtraction, we can have a 1-bit carry. However,
4112 if we are subtracting 1 from a positive number, there will not
4113 be such a carry. Furthermore, if the positive number is known to
4114 be 0 or 1, we know the result is either -1 or 0. */
4115
4116 if (code == PLUS && XEXP (x, 1) == constm1_rtx
4117 && bitwidth <= HOST_BITS_PER_WIDE_INT)
4118 {
4119 nonzero = nonzero_bits (XEXP (x, 0), mode);
4120 if ((((HOST_WIDE_INT) 1 << (bitwidth - 1)) & nonzero) == 0)
4121 return (nonzero == 1 || nonzero == 0 ? bitwidth
4122 : bitwidth - floor_log2 (nonzero) - 1);
4123 }
4124
4125 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4126 known_x, known_mode, known_ret);
4127 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4128 known_x, known_mode, known_ret);
4129 result = MAX (1, MIN (num0, num1) - 1);
4130
4131 #ifdef POINTERS_EXTEND_UNSIGNED
4132 /* If pointers extend signed and this is an addition or subtraction
4133 to a pointer in Pmode, all the bits above ptr_mode are known to be
4134 sign bit copies. */
4135 if (! POINTERS_EXTEND_UNSIGNED && GET_MODE (x) == Pmode
4136 && (code == PLUS || code == MINUS)
4137 && REG_P (XEXP (x, 0)) && REG_POINTER (XEXP (x, 0)))
4138 result = MAX ((int) (GET_MODE_BITSIZE (Pmode)
4139 - GET_MODE_BITSIZE (ptr_mode) + 1),
4140 result);
4141 #endif
4142 return result;
4143
4144 case MULT:
4145 /* The number of bits of the product is the sum of the number of
4146 bits of both terms. However, unless one of the terms if known
4147 to be positive, we must allow for an additional bit since negating
4148 a negative number can remove one sign bit copy. */
4149
4150 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4151 known_x, known_mode, known_ret);
4152 num1 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4153 known_x, known_mode, known_ret);
4154
4155 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
4156 if (result > 0
4157 && (bitwidth > HOST_BITS_PER_WIDE_INT
4158 || (((nonzero_bits (XEXP (x, 0), mode)
4159 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4160 && ((nonzero_bits (XEXP (x, 1), mode)
4161 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))))
4162 result--;
4163
4164 return MAX (1, result);
4165
4166 case UDIV:
4167 /* The result must be <= the first operand. If the first operand
4168 has the high bit set, we know nothing about the number of sign
4169 bit copies. */
4170 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4171 return 1;
4172 else if ((nonzero_bits (XEXP (x, 0), mode)
4173 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4174 return 1;
4175 else
4176 return cached_num_sign_bit_copies (XEXP (x, 0), mode,
4177 known_x, known_mode, known_ret);
4178
4179 case UMOD:
4180 /* The result must be <= the second operand. */
4181 return cached_num_sign_bit_copies (XEXP (x, 1), mode,
4182 known_x, known_mode, known_ret);
4183
4184 case DIV:
4185 /* Similar to unsigned division, except that we have to worry about
4186 the case where the divisor is negative, in which case we have
4187 to add 1. */
4188 result = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4189 known_x, known_mode, known_ret);
4190 if (result > 1
4191 && (bitwidth > HOST_BITS_PER_WIDE_INT
4192 || (nonzero_bits (XEXP (x, 1), mode)
4193 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4194 result--;
4195
4196 return result;
4197
4198 case MOD:
4199 result = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4200 known_x, known_mode, known_ret);
4201 if (result > 1
4202 && (bitwidth > HOST_BITS_PER_WIDE_INT
4203 || (nonzero_bits (XEXP (x, 1), mode)
4204 & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0))
4205 result--;
4206
4207 return result;
4208
4209 case ASHIFTRT:
4210 /* Shifts by a constant add to the number of bits equal to the
4211 sign bit. */
4212 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4213 known_x, known_mode, known_ret);
4214 if (GET_CODE (XEXP (x, 1)) == CONST_INT
4215 && INTVAL (XEXP (x, 1)) > 0)
4216 num0 = MIN ((int) bitwidth, num0 + INTVAL (XEXP (x, 1)));
4217
4218 return num0;
4219
4220 case ASHIFT:
4221 /* Left shifts destroy copies. */
4222 if (GET_CODE (XEXP (x, 1)) != CONST_INT
4223 || INTVAL (XEXP (x, 1)) < 0
4224 || INTVAL (XEXP (x, 1)) >= (int) bitwidth)
4225 return 1;
4226
4227 num0 = cached_num_sign_bit_copies (XEXP (x, 0), mode,
4228 known_x, known_mode, known_ret);
4229 return MAX (1, num0 - INTVAL (XEXP (x, 1)));
4230
4231 case IF_THEN_ELSE:
4232 num0 = cached_num_sign_bit_copies (XEXP (x, 1), mode,
4233 known_x, known_mode, known_ret);
4234 num1 = cached_num_sign_bit_copies (XEXP (x, 2), mode,
4235 known_x, known_mode, known_ret);
4236 return MIN (num0, num1);
4237
4238 case EQ: case NE: case GE: case GT: case LE: case LT:
4239 case UNEQ: case LTGT: case UNGE: case UNGT: case UNLE: case UNLT:
4240 case GEU: case GTU: case LEU: case LTU:
4241 case UNORDERED: case ORDERED:
4242 /* If the constant is negative, take its 1's complement and remask.
4243 Then see how many zero bits we have. */
4244 nonzero = STORE_FLAG_VALUE;
4245 if (bitwidth <= HOST_BITS_PER_WIDE_INT
4246 && (nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))) != 0)
4247 nonzero = (~nonzero) & GET_MODE_MASK (mode);
4248
4249 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
4250
4251 default:
4252 break;
4253 }
4254
4255 /* If we haven't been able to figure it out by one of the above rules,
4256 see if some of the high-order bits are known to be zero. If so,
4257 count those bits and return one less than that amount. If we can't
4258 safely compute the mask for this mode, always return BITWIDTH. */
4259
4260 bitwidth = GET_MODE_BITSIZE (mode);
4261 if (bitwidth > HOST_BITS_PER_WIDE_INT)
4262 return 1;
4263
4264 nonzero = nonzero_bits (x, mode);
4265 return nonzero & ((HOST_WIDE_INT) 1 << (bitwidth - 1))
4266 ? 1 : bitwidth - floor_log2 (nonzero) - 1;
4267 }
4268
4269 /* Calculate the rtx_cost of a single instruction. A return value of
4270 zero indicates an instruction pattern without a known cost. */
4271
4272 int
insn_rtx_cost(rtx pat)4273 insn_rtx_cost (rtx pat)
4274 {
4275 int i, cost;
4276 rtx set;
4277
4278 /* Extract the single set rtx from the instruction pattern.
4279 We can't use single_set since we only have the pattern. */
4280 if (GET_CODE (pat) == SET)
4281 set = pat;
4282 else if (GET_CODE (pat) == PARALLEL)
4283 {
4284 set = NULL_RTX;
4285 for (i = 0; i < XVECLEN (pat, 0); i++)
4286 {
4287 rtx x = XVECEXP (pat, 0, i);
4288 if (GET_CODE (x) == SET)
4289 {
4290 if (set)
4291 return 0;
4292 set = x;
4293 }
4294 }
4295 if (!set)
4296 return 0;
4297 }
4298 else
4299 return 0;
4300
4301 cost = rtx_cost (SET_SRC (set), SET);
4302 return cost > 0 ? cost : COSTS_N_INSNS (1);
4303 }
4304
4305 /* Given an insn INSN and condition COND, return the condition in a
4306 canonical form to simplify testing by callers. Specifically:
4307
4308 (1) The code will always be a comparison operation (EQ, NE, GT, etc.).
4309 (2) Both operands will be machine operands; (cc0) will have been replaced.
4310 (3) If an operand is a constant, it will be the second operand.
4311 (4) (LE x const) will be replaced with (LT x <const+1>) and similarly
4312 for GE, GEU, and LEU.
4313
4314 If the condition cannot be understood, or is an inequality floating-point
4315 comparison which needs to be reversed, 0 will be returned.
4316
4317 If REVERSE is nonzero, then reverse the condition prior to canonizing it.
4318
4319 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4320 insn used in locating the condition was found. If a replacement test
4321 of the condition is desired, it should be placed in front of that
4322 insn and we will be sure that the inputs are still valid.
4323
4324 If WANT_REG is nonzero, we wish the condition to be relative to that
4325 register, if possible. Therefore, do not canonicalize the condition
4326 further. If ALLOW_CC_MODE is nonzero, allow the condition returned
4327 to be a compare to a CC mode register.
4328
4329 If VALID_AT_INSN_P, the condition must be valid at both *EARLIEST
4330 and at INSN. */
4331
4332 rtx
canonicalize_condition(rtx insn,rtx cond,int reverse,rtx * earliest,rtx want_reg,int allow_cc_mode,int valid_at_insn_p)4333 canonicalize_condition (rtx insn, rtx cond, int reverse, rtx *earliest,
4334 rtx want_reg, int allow_cc_mode, int valid_at_insn_p)
4335 {
4336 enum rtx_code code;
4337 rtx prev = insn;
4338 rtx set;
4339 rtx tem;
4340 rtx op0, op1;
4341 int reverse_code = 0;
4342 enum machine_mode mode;
4343 basic_block bb = BLOCK_FOR_INSN (insn);
4344
4345 code = GET_CODE (cond);
4346 mode = GET_MODE (cond);
4347 op0 = XEXP (cond, 0);
4348 op1 = XEXP (cond, 1);
4349
4350 if (reverse)
4351 code = reversed_comparison_code (cond, insn);
4352 if (code == UNKNOWN)
4353 return 0;
4354
4355 if (earliest)
4356 *earliest = insn;
4357
4358 /* If we are comparing a register with zero, see if the register is set
4359 in the previous insn to a COMPARE or a comparison operation. Perform
4360 the same tests as a function of STORE_FLAG_VALUE as find_comparison_args
4361 in cse.c */
4362
4363 while ((GET_RTX_CLASS (code) == RTX_COMPARE
4364 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
4365 && op1 == CONST0_RTX (GET_MODE (op0))
4366 && op0 != want_reg)
4367 {
4368 /* Set nonzero when we find something of interest. */
4369 rtx x = 0;
4370
4371 #ifdef HAVE_cc0
4372 /* If comparison with cc0, import actual comparison from compare
4373 insn. */
4374 if (op0 == cc0_rtx)
4375 {
4376 if ((prev = prev_nonnote_insn (prev)) == 0
4377 || !NONJUMP_INSN_P (prev)
4378 || (set = single_set (prev)) == 0
4379 || SET_DEST (set) != cc0_rtx)
4380 return 0;
4381
4382 op0 = SET_SRC (set);
4383 op1 = CONST0_RTX (GET_MODE (op0));
4384 if (earliest)
4385 *earliest = prev;
4386 }
4387 #endif
4388
4389 /* If this is a COMPARE, pick up the two things being compared. */
4390 if (GET_CODE (op0) == COMPARE)
4391 {
4392 op1 = XEXP (op0, 1);
4393 op0 = XEXP (op0, 0);
4394 continue;
4395 }
4396 else if (!REG_P (op0))
4397 break;
4398
4399 /* Go back to the previous insn. Stop if it is not an INSN. We also
4400 stop if it isn't a single set or if it has a REG_INC note because
4401 we don't want to bother dealing with it. */
4402
4403 if ((prev = prev_nonnote_insn (prev)) == 0
4404 || !NONJUMP_INSN_P (prev)
4405 || FIND_REG_INC_NOTE (prev, NULL_RTX)
4406 /* In cfglayout mode, there do not have to be labels at the
4407 beginning of a block, or jumps at the end, so the previous
4408 conditions would not stop us when we reach bb boundary. */
4409 || BLOCK_FOR_INSN (prev) != bb)
4410 break;
4411
4412 set = set_of (op0, prev);
4413
4414 if (set
4415 && (GET_CODE (set) != SET
4416 || !rtx_equal_p (SET_DEST (set), op0)))
4417 break;
4418
4419 /* If this is setting OP0, get what it sets it to if it looks
4420 relevant. */
4421 if (set)
4422 {
4423 enum machine_mode inner_mode = GET_MODE (SET_DEST (set));
4424 #ifdef FLOAT_STORE_FLAG_VALUE
4425 REAL_VALUE_TYPE fsfv;
4426 #endif
4427
4428 /* ??? We may not combine comparisons done in a CCmode with
4429 comparisons not done in a CCmode. This is to aid targets
4430 like Alpha that have an IEEE compliant EQ instruction, and
4431 a non-IEEE compliant BEQ instruction. The use of CCmode is
4432 actually artificial, simply to prevent the combination, but
4433 should not affect other platforms.
4434
4435 However, we must allow VOIDmode comparisons to match either
4436 CCmode or non-CCmode comparison, because some ports have
4437 modeless comparisons inside branch patterns.
4438
4439 ??? This mode check should perhaps look more like the mode check
4440 in simplify_comparison in combine. */
4441
4442 if ((GET_CODE (SET_SRC (set)) == COMPARE
4443 || (((code == NE
4444 || (code == LT
4445 && GET_MODE_CLASS (inner_mode) == MODE_INT
4446 && (GET_MODE_BITSIZE (inner_mode)
4447 <= HOST_BITS_PER_WIDE_INT)
4448 && (STORE_FLAG_VALUE
4449 & ((HOST_WIDE_INT) 1
4450 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4451 #ifdef FLOAT_STORE_FLAG_VALUE
4452 || (code == LT
4453 && SCALAR_FLOAT_MODE_P (inner_mode)
4454 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4455 REAL_VALUE_NEGATIVE (fsfv)))
4456 #endif
4457 ))
4458 && COMPARISON_P (SET_SRC (set))))
4459 && (((GET_MODE_CLASS (mode) == MODE_CC)
4460 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4461 || mode == VOIDmode || inner_mode == VOIDmode))
4462 x = SET_SRC (set);
4463 else if (((code == EQ
4464 || (code == GE
4465 && (GET_MODE_BITSIZE (inner_mode)
4466 <= HOST_BITS_PER_WIDE_INT)
4467 && GET_MODE_CLASS (inner_mode) == MODE_INT
4468 && (STORE_FLAG_VALUE
4469 & ((HOST_WIDE_INT) 1
4470 << (GET_MODE_BITSIZE (inner_mode) - 1))))
4471 #ifdef FLOAT_STORE_FLAG_VALUE
4472 || (code == GE
4473 && SCALAR_FLOAT_MODE_P (inner_mode)
4474 && (fsfv = FLOAT_STORE_FLAG_VALUE (inner_mode),
4475 REAL_VALUE_NEGATIVE (fsfv)))
4476 #endif
4477 ))
4478 && COMPARISON_P (SET_SRC (set))
4479 && (((GET_MODE_CLASS (mode) == MODE_CC)
4480 == (GET_MODE_CLASS (inner_mode) == MODE_CC))
4481 || mode == VOIDmode || inner_mode == VOIDmode))
4482
4483 {
4484 reverse_code = 1;
4485 x = SET_SRC (set);
4486 }
4487 else
4488 break;
4489 }
4490
4491 else if (reg_set_p (op0, prev))
4492 /* If this sets OP0, but not directly, we have to give up. */
4493 break;
4494
4495 if (x)
4496 {
4497 /* If the caller is expecting the condition to be valid at INSN,
4498 make sure X doesn't change before INSN. */
4499 if (valid_at_insn_p)
4500 if (modified_in_p (x, prev) || modified_between_p (x, prev, insn))
4501 break;
4502 if (COMPARISON_P (x))
4503 code = GET_CODE (x);
4504 if (reverse_code)
4505 {
4506 code = reversed_comparison_code (x, prev);
4507 if (code == UNKNOWN)
4508 return 0;
4509 reverse_code = 0;
4510 }
4511
4512 op0 = XEXP (x, 0), op1 = XEXP (x, 1);
4513 if (earliest)
4514 *earliest = prev;
4515 }
4516 }
4517
4518 /* If constant is first, put it last. */
4519 if (CONSTANT_P (op0))
4520 code = swap_condition (code), tem = op0, op0 = op1, op1 = tem;
4521
4522 /* If OP0 is the result of a comparison, we weren't able to find what
4523 was really being compared, so fail. */
4524 if (!allow_cc_mode
4525 && GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
4526 return 0;
4527
4528 /* Canonicalize any ordered comparison with integers involving equality
4529 if we can do computations in the relevant mode and we do not
4530 overflow. */
4531
4532 if (GET_MODE_CLASS (GET_MODE (op0)) != MODE_CC
4533 && GET_CODE (op1) == CONST_INT
4534 && GET_MODE (op0) != VOIDmode
4535 && GET_MODE_BITSIZE (GET_MODE (op0)) <= HOST_BITS_PER_WIDE_INT)
4536 {
4537 HOST_WIDE_INT const_val = INTVAL (op1);
4538 unsigned HOST_WIDE_INT uconst_val = const_val;
4539 unsigned HOST_WIDE_INT max_val
4540 = (unsigned HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (op0));
4541
4542 switch (code)
4543 {
4544 case LE:
4545 if ((unsigned HOST_WIDE_INT) const_val != max_val >> 1)
4546 code = LT, op1 = gen_int_mode (const_val + 1, GET_MODE (op0));
4547 break;
4548
4549 /* When cross-compiling, const_val might be sign-extended from
4550 BITS_PER_WORD to HOST_BITS_PER_WIDE_INT */
4551 case GE:
4552 if ((HOST_WIDE_INT) (const_val & max_val)
4553 != (((HOST_WIDE_INT) 1
4554 << (GET_MODE_BITSIZE (GET_MODE (op0)) - 1))))
4555 code = GT, op1 = gen_int_mode (const_val - 1, GET_MODE (op0));
4556 break;
4557
4558 case LEU:
4559 if (uconst_val < max_val)
4560 code = LTU, op1 = gen_int_mode (uconst_val + 1, GET_MODE (op0));
4561 break;
4562
4563 case GEU:
4564 if (uconst_val != 0)
4565 code = GTU, op1 = gen_int_mode (uconst_val - 1, GET_MODE (op0));
4566 break;
4567
4568 default:
4569 break;
4570 }
4571 }
4572
4573 /* Never return CC0; return zero instead. */
4574 if (CC0_P (op0))
4575 return 0;
4576
4577 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
4578 }
4579
4580 /* Given a jump insn JUMP, return the condition that will cause it to branch
4581 to its JUMP_LABEL. If the condition cannot be understood, or is an
4582 inequality floating-point comparison which needs to be reversed, 0 will
4583 be returned.
4584
4585 If EARLIEST is nonzero, it is a pointer to a place where the earliest
4586 insn used in locating the condition was found. If a replacement test
4587 of the condition is desired, it should be placed in front of that
4588 insn and we will be sure that the inputs are still valid. If EARLIEST
4589 is null, the returned condition will be valid at INSN.
4590
4591 If ALLOW_CC_MODE is nonzero, allow the condition returned to be a
4592 compare CC mode register.
4593
4594 VALID_AT_INSN_P is the same as for canonicalize_condition. */
4595
4596 rtx
get_condition(rtx jump,rtx * earliest,int allow_cc_mode,int valid_at_insn_p)4597 get_condition (rtx jump, rtx *earliest, int allow_cc_mode, int valid_at_insn_p)
4598 {
4599 rtx cond;
4600 int reverse;
4601 rtx set;
4602
4603 /* If this is not a standard conditional jump, we can't parse it. */
4604 if (!JUMP_P (jump)
4605 || ! any_condjump_p (jump))
4606 return 0;
4607 set = pc_set (jump);
4608
4609 cond = XEXP (SET_SRC (set), 0);
4610
4611 /* If this branches to JUMP_LABEL when the condition is false, reverse
4612 the condition. */
4613 reverse
4614 = GET_CODE (XEXP (SET_SRC (set), 2)) == LABEL_REF
4615 && XEXP (XEXP (SET_SRC (set), 2), 0) == JUMP_LABEL (jump);
4616
4617 return canonicalize_condition (jump, cond, reverse, earliest, NULL_RTX,
4618 allow_cc_mode, valid_at_insn_p);
4619 }
4620
4621 /* Initialize the table NUM_SIGN_BIT_COPIES_IN_REP based on
4622 TARGET_MODE_REP_EXTENDED.
4623
4624 Note that we assume that the property of
4625 TARGET_MODE_REP_EXTENDED(B, C) is sticky to the integral modes
4626 narrower than mode B. I.e., if A is a mode narrower than B then in
4627 order to be able to operate on it in mode B, mode A needs to
4628 satisfy the requirements set by the representation of mode B. */
4629
4630 static void
init_num_sign_bit_copies_in_rep(void)4631 init_num_sign_bit_copies_in_rep (void)
4632 {
4633 enum machine_mode mode, in_mode;
4634
4635 for (in_mode = GET_CLASS_NARROWEST_MODE (MODE_INT); in_mode != VOIDmode;
4636 in_mode = GET_MODE_WIDER_MODE (mode))
4637 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != in_mode;
4638 mode = GET_MODE_WIDER_MODE (mode))
4639 {
4640 enum machine_mode i;
4641
4642 /* Currently, it is assumed that TARGET_MODE_REP_EXTENDED
4643 extends to the next widest mode. */
4644 gcc_assert (targetm.mode_rep_extended (mode, in_mode) == UNKNOWN
4645 || GET_MODE_WIDER_MODE (mode) == in_mode);
4646
4647 /* We are in in_mode. Count how many bits outside of mode
4648 have to be copies of the sign-bit. */
4649 for (i = mode; i != in_mode; i = GET_MODE_WIDER_MODE (i))
4650 {
4651 enum machine_mode wider = GET_MODE_WIDER_MODE (i);
4652
4653 if (targetm.mode_rep_extended (i, wider) == SIGN_EXTEND
4654 /* We can only check sign-bit copies starting from the
4655 top-bit. In order to be able to check the bits we
4656 have already seen we pretend that subsequent bits
4657 have to be sign-bit copies too. */
4658 || num_sign_bit_copies_in_rep [in_mode][mode])
4659 num_sign_bit_copies_in_rep [in_mode][mode]
4660 += GET_MODE_BITSIZE (wider) - GET_MODE_BITSIZE (i);
4661 }
4662 }
4663 }
4664
4665 /* Suppose that truncation from the machine mode of X to MODE is not a
4666 no-op. See if there is anything special about X so that we can
4667 assume it already contains a truncated value of MODE. */
4668
4669 bool
truncated_to_mode(enum machine_mode mode,rtx x)4670 truncated_to_mode (enum machine_mode mode, rtx x)
4671 {
4672 /* This register has already been used in MODE without explicit
4673 truncation. */
4674 if (REG_P (x) && rtl_hooks.reg_truncated_to_mode (mode, x))
4675 return true;
4676
4677 /* See if we already satisfy the requirements of MODE. If yes we
4678 can just switch to MODE. */
4679 if (num_sign_bit_copies_in_rep[GET_MODE (x)][mode]
4680 && (num_sign_bit_copies (x, GET_MODE (x))
4681 >= num_sign_bit_copies_in_rep[GET_MODE (x)][mode] + 1))
4682 return true;
4683
4684 return false;
4685 }
4686
4687 /* Initialize non_rtx_starting_operands, which is used to speed up
4688 for_each_rtx. */
4689 void
init_rtlanal(void)4690 init_rtlanal (void)
4691 {
4692 int i;
4693 for (i = 0; i < NUM_RTX_CODE; i++)
4694 {
4695 const char *format = GET_RTX_FORMAT (i);
4696 const char *first = strpbrk (format, "eEV");
4697 non_rtx_starting_operands[i] = first ? first - format : -1;
4698 }
4699
4700 init_num_sign_bit_copies_in_rep ();
4701 }
4702
4703 /* Check whether this is a constant pool constant. */
4704 bool
constant_pool_constant_p(rtx x)4705 constant_pool_constant_p (rtx x)
4706 {
4707 x = avoid_constant_pool_reference (x);
4708 return GET_CODE (x) == CONST_DOUBLE;
4709 }
4710
4711