1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107
108 /* Number of attempts to combine instructions in this function. */
109
110 static int combine_attempts;
111
112 /* Number of attempts that got as far as substitution in this function. */
113
114 static int combine_merges;
115
116 /* Number of instructions combined with added SETs in this function. */
117
118 static int combine_extras;
119
120 /* Number of instructions combined in this function. */
121
122 static int combine_successes;
123
124 /* Totals over entire compilation. */
125
126 static int total_attempts, total_merges, total_extras, total_successes;
127
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
134
135 static rtx_insn *i2mod;
136
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
138
139 static rtx i2mod_old_rhs;
140
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
142
143 static rtx i2mod_new_rhs;
144
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
148
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
151
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
157
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
160
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
164
165 Therefore, we maintain the following fields:
166
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
175
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
180
181 (The next two parameters are out of date).
182
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
194
195 /* Record last value assigned to (hard or pseudo) register n. */
196
197 rtx last_set_value;
198
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
201
202 int last_set_table_tick;
203
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
206
207 int last_set_label;
208
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
213
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
217
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
221
222 char last_set_invalid;
223
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
228
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
233
234 If an entry is zero, it means that we don't know anything special. */
235
236 unsigned char sign_bit_copies;
237
238 unsigned HOST_WIDE_INT nonzero_bits;
239
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
243
244 int truncation_label;
245
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
250
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
252 };
253
254
255 static vec<reg_stat_type> reg_stat;
256
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
264
265 static unsigned int reg_n_sets_max;
266
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
269
270 static int mem_last_set;
271
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
274
275 static int last_call_luid;
276
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
282
283 static rtx_insn *subst_insn;
284
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
291
292 static int subst_low_luid;
293
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
296
297 static HARD_REG_SET newpat_used_regs;
298
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
302
303 static rtx_insn *added_links_insn;
304
305 /* And similarly, for notes. */
306
307 static rtx_insn *added_notes_insn;
308
309 /* Basic block in which we are performing combines. */
310 static basic_block this_basic_block;
311 static bool optimize_this_for_speed_p;
312
313
314 /* Length of the currently allocated uid_insn_cost array. */
315
316 static int max_uid_known;
317
318 /* The following array records the insn_cost for every insn
319 in the instruction stream. */
320
321 static int *uid_insn_cost;
322
323 /* The following array records the LOG_LINKS for every insn in the
324 instruction stream as struct insn_link pointers. */
325
326 struct insn_link {
327 rtx_insn *insn;
328 unsigned int regno;
329 struct insn_link *next;
330 };
331
332 static struct insn_link **uid_log_links;
333
334 static inline int
insn_uid_check(const_rtx insn)335 insn_uid_check (const_rtx insn)
336 {
337 int uid = INSN_UID (insn);
338 gcc_checking_assert (uid <= max_uid_known);
339 return uid;
340 }
341
342 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
343 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
344
345 #define FOR_EACH_LOG_LINK(L, INSN) \
346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
347
348 /* Links for LOG_LINKS are allocated from this obstack. */
349
350 static struct obstack insn_link_obstack;
351
352 /* Allocate a link. */
353
354 static inline struct insn_link *
alloc_insn_link(rtx_insn * insn,unsigned int regno,struct insn_link * next)355 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
356 {
357 struct insn_link *l
358 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
359 sizeof (struct insn_link));
360 l->insn = insn;
361 l->regno = regno;
362 l->next = next;
363 return l;
364 }
365
366 /* Incremented for each basic block. */
367
368 static int label_tick;
369
370 /* Reset to label_tick for each extended basic block in scanning order. */
371
372 static int label_tick_ebb_start;
373
374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
376
377 static scalar_int_mode nonzero_bits_mode;
378
379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
380 be safely used. It is zero while computing them and after combine has
381 completed. This former test prevents propagating values based on
382 previously set values, which can be incorrect if a variable is modified
383 in a loop. */
384
385 static int nonzero_sign_valid;
386
387
388 /* Record one modification to rtl structure
389 to be undone by storing old_contents into *where. */
390
391 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
392
393 struct undo
394 {
395 struct undo *next;
396 enum undo_kind kind;
397 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
398 union { rtx *r; int *i; struct insn_link **l; } where;
399 };
400
401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
402 num_undo says how many are currently recorded.
403
404 other_insn is nonzero if we have modified some other insn in the process
405 of working on subst_insn. It must be verified too. */
406
407 struct undobuf
408 {
409 struct undo *undos;
410 struct undo *frees;
411 rtx_insn *other_insn;
412 };
413
414 static struct undobuf undobuf;
415
416 /* Number of times the pseudo being substituted for
417 was found and replaced. */
418
419 static int n_occurrences;
420
421 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
422 scalar_int_mode,
423 unsigned HOST_WIDE_INT *);
424 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
425 scalar_int_mode,
426 unsigned int *);
427 static void do_SUBST (rtx *, rtx);
428 static void do_SUBST_INT (int *, int);
429 static void init_reg_last (void);
430 static void setup_incoming_promotions (rtx_insn *);
431 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
432 static int cant_combine_insn_p (rtx_insn *);
433 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 rtx_insn *, rtx_insn *, rtx *, rtx *);
435 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
436 static int contains_muldiv (rtx);
437 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
438 int *, rtx_insn *);
439 static void undo_all (void);
440 static void undo_commit (void);
441 static rtx *find_split_point (rtx *, rtx_insn *, bool);
442 static rtx subst (rtx, rtx, rtx, int, int, int);
443 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
444 static rtx simplify_if_then_else (rtx);
445 static rtx simplify_set (rtx);
446 static rtx simplify_logical (rtx);
447 static rtx expand_compound_operation (rtx);
448 static const_rtx expand_field_assignment (const_rtx);
449 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
450 rtx, unsigned HOST_WIDE_INT, int, int, int);
451 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
452 unsigned HOST_WIDE_INT *);
453 static rtx canon_reg_for_combine (rtx, rtx);
454 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
455 scalar_int_mode, unsigned HOST_WIDE_INT, int);
456 static rtx force_to_mode (rtx, machine_mode,
457 unsigned HOST_WIDE_INT, int);
458 static rtx if_then_else_cond (rtx, rtx *, rtx *);
459 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
460 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
461 static rtx make_field_assignment (rtx);
462 static rtx apply_distributive_law (rtx);
463 static rtx distribute_and_simplify_rtx (rtx, int);
464 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
465 unsigned HOST_WIDE_INT);
466 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
467 unsigned HOST_WIDE_INT);
468 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
469 HOST_WIDE_INT, machine_mode, int *);
470 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
471 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
472 int);
473 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
474 static rtx gen_lowpart_for_combine (machine_mode, rtx);
475 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
476 rtx, rtx *);
477 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
478 static void update_table_tick (rtx);
479 static void record_value_for_reg (rtx, rtx_insn *, rtx);
480 static void check_promoted_subreg (rtx_insn *, rtx);
481 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
482 static void record_dead_and_set_regs (rtx_insn *);
483 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
484 static rtx get_last_value (const_rtx);
485 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
486 static int reg_dead_at_p (rtx, rtx_insn *);
487 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
488 static int reg_bitfield_target_p (rtx, rtx);
489 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
490 static void distribute_links (struct insn_link *);
491 static void mark_used_regs_combine (rtx);
492 static void record_promoted_value (rtx_insn *, rtx);
493 static bool unmentioned_reg_p (rtx, rtx);
494 static void record_truncated_values (rtx *, void *);
495 static bool reg_truncated_to_mode (machine_mode, const_rtx);
496 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
497
498
499 /* It is not safe to use ordinary gen_lowpart in combine.
500 See comments in gen_lowpart_for_combine. */
501 #undef RTL_HOOKS_GEN_LOWPART
502 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
503
504 /* Our implementation of gen_lowpart never emits a new pseudo. */
505 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
506 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
507
508 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
509 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
510
511 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
512 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
513
514 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
515 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
516
517 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
518
519
520 /* Convenience wrapper for the canonicalize_comparison target hook.
521 Target hooks cannot use enum rtx_code. */
522 static inline void
target_canonicalize_comparison(enum rtx_code * code,rtx * op0,rtx * op1,bool op0_preserve_value)523 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
524 bool op0_preserve_value)
525 {
526 int code_int = (int)*code;
527 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
528 *code = (enum rtx_code)code_int;
529 }
530
531 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
532 PATTERN can not be split. Otherwise, it returns an insn sequence.
533 This is a wrapper around split_insns which ensures that the
534 reg_stat vector is made larger if the splitter creates a new
535 register. */
536
537 static rtx_insn *
combine_split_insns(rtx pattern,rtx_insn * insn)538 combine_split_insns (rtx pattern, rtx_insn *insn)
539 {
540 rtx_insn *ret;
541 unsigned int nregs;
542
543 ret = split_insns (pattern, insn);
544 nregs = max_reg_num ();
545 if (nregs > reg_stat.length ())
546 reg_stat.safe_grow_cleared (nregs);
547 return ret;
548 }
549
550 /* This is used by find_single_use to locate an rtx in LOC that
551 contains exactly one use of DEST, which is typically either a REG
552 or CC0. It returns a pointer to the innermost rtx expression
553 containing DEST. Appearances of DEST that are being used to
554 totally replace it are not counted. */
555
556 static rtx *
find_single_use_1(rtx dest,rtx * loc)557 find_single_use_1 (rtx dest, rtx *loc)
558 {
559 rtx x = *loc;
560 enum rtx_code code = GET_CODE (x);
561 rtx *result = NULL;
562 rtx *this_result;
563 int i;
564 const char *fmt;
565
566 switch (code)
567 {
568 case CONST:
569 case LABEL_REF:
570 case SYMBOL_REF:
571 CASE_CONST_ANY:
572 case CLOBBER:
573 return 0;
574
575 case SET:
576 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
577 of a REG that occupies all of the REG, the insn uses DEST if
578 it is mentioned in the destination or the source. Otherwise, we
579 need just check the source. */
580 if (GET_CODE (SET_DEST (x)) != CC0
581 && GET_CODE (SET_DEST (x)) != PC
582 && !REG_P (SET_DEST (x))
583 && ! (GET_CODE (SET_DEST (x)) == SUBREG
584 && REG_P (SUBREG_REG (SET_DEST (x)))
585 && !read_modify_subreg_p (SET_DEST (x))))
586 break;
587
588 return find_single_use_1 (dest, &SET_SRC (x));
589
590 case MEM:
591 case SUBREG:
592 return find_single_use_1 (dest, &XEXP (x, 0));
593
594 default:
595 break;
596 }
597
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
600
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
603 {
604 if (fmt[i] == 'e')
605 {
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
609 this_result = loc;
610 else
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
612
613 if (result == NULL)
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
617 return NULL;
618 }
619 else if (fmt[i] == 'E')
620 {
621 int j;
622
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
624 {
625 if (XVECEXP (x, i, j) == dest
626 || (REG_P (dest)
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 this_result = loc;
630 else
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
632
633 if (result == NULL)
634 result = this_result;
635 else if (this_result)
636 return NULL;
637 }
638 }
639 }
640
641 return result;
642 }
643
644
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
647 it is used.
648
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
650
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
653
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
658
659 static rtx *
find_single_use(rtx dest,rtx_insn * insn,rtx_insn ** ploc)660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
661 {
662 basic_block bb;
663 rtx_insn *next;
664 rtx *result;
665 struct insn_link *link;
666
667 if (dest == cc0_rtx)
668 {
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
673
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
678 }
679
680 if (!REG_P (dest))
681 return 0;
682
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
688 {
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
691 break;
692
693 if (link)
694 {
695 result = find_single_use_1 (dest, &PATTERN (next));
696 if (ploc)
697 *ploc = next;
698 return result;
699 }
700 }
701
702 return 0;
703 }
704
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
709 the undo table. */
710
711 static void
do_SUBST(rtx * into,rtx newval)712 do_SUBST (rtx *into, rtx newval)
713 {
714 struct undo *buf;
715 rtx oldval = *into;
716
717 if (oldval == newval)
718 return;
719
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
727 {
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
732
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
743 }
744
745 if (undobuf.frees)
746 buf = undobuf.frees, undobuf.frees = buf->next;
747 else
748 buf = XNEW (struct undo);
749
750 buf->kind = UNDO_RTX;
751 buf->where.r = into;
752 buf->old_contents.r = oldval;
753 *into = newval;
754
755 buf->next = undobuf.undos, undobuf.undos = buf;
756 }
757
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
759
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
762 not safe. */
763
764 static void
do_SUBST_INT(int * into,int newval)765 do_SUBST_INT (int *into, int newval)
766 {
767 struct undo *buf;
768 int oldval = *into;
769
770 if (oldval == newval)
771 return;
772
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
777
778 buf->kind = UNDO_INT;
779 buf->where.i = into;
780 buf->old_contents.i = oldval;
781 *into = newval;
782
783 buf->next = undobuf.undos, undobuf.undos = buf;
784 }
785
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
787
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
791 well. */
792
793 static void
do_SUBST_MODE(rtx * into,machine_mode newval)794 do_SUBST_MODE (rtx *into, machine_mode newval)
795 {
796 struct undo *buf;
797 machine_mode oldval = GET_MODE (*into);
798
799 if (oldval == newval)
800 return;
801
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
806
807 buf->kind = UNDO_MODE;
808 buf->where.r = into;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
811
812 buf->next = undobuf.undos, undobuf.undos = buf;
813 }
814
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
816
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
818
819 static void
do_SUBST_LINK(struct insn_link ** into,struct insn_link * newval)820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
821 {
822 struct undo *buf;
823 struct insn_link * oldval = *into;
824
825 if (oldval == newval)
826 return;
827
828 if (undobuf.frees)
829 buf = undobuf.frees, undobuf.frees = buf->next;
830 else
831 buf = XNEW (struct undo);
832
833 buf->kind = UNDO_LINKS;
834 buf->where.l = into;
835 buf->old_contents.l = oldval;
836 *into = newval;
837
838 buf->next = undobuf.undos, undobuf.undos = buf;
839 }
840
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
842
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
850
851 static bool
combine_validate_cost(rtx_insn * i0,rtx_insn * i1,rtx_insn * i2,rtx_insn * i3,rtx newpat,rtx newi2pat,rtx newotherpat)852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
854 {
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
858
859 /* Lookup the original insn_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
862
863 if (i1)
864 {
865 i1_cost = INSN_COST (i1);
866 if (i0)
867 {
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
871 }
872 else
873 {
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
876 i0_cost = 0;
877 }
878 }
879 else
880 {
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
883 }
884
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886 correct that. */
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888 old_cost -= i1_cost;
889
890
891 /* Calculate the replacement insn_costs. */
892 rtx tmp = PATTERN (i3);
893 PATTERN (i3) = newpat;
894 int tmpi = INSN_CODE (i3);
895 INSN_CODE (i3) = -1;
896 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
897 PATTERN (i3) = tmp;
898 INSN_CODE (i3) = tmpi;
899 if (newi2pat)
900 {
901 tmp = PATTERN (i2);
902 PATTERN (i2) = newi2pat;
903 tmpi = INSN_CODE (i2);
904 INSN_CODE (i2) = -1;
905 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
906 PATTERN (i2) = tmp;
907 INSN_CODE (i2) = tmpi;
908 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
909 ? new_i2_cost + new_i3_cost : 0;
910 }
911 else
912 {
913 new_cost = new_i3_cost;
914 new_i2_cost = 0;
915 }
916
917 if (undobuf.other_insn)
918 {
919 int old_other_cost, new_other_cost;
920
921 old_other_cost = INSN_COST (undobuf.other_insn);
922 tmp = PATTERN (undobuf.other_insn);
923 PATTERN (undobuf.other_insn) = newotherpat;
924 tmpi = INSN_CODE (undobuf.other_insn);
925 INSN_CODE (undobuf.other_insn) = -1;
926 new_other_cost = insn_cost (undobuf.other_insn,
927 optimize_this_for_speed_p);
928 PATTERN (undobuf.other_insn) = tmp;
929 INSN_CODE (undobuf.other_insn) = tmpi;
930 if (old_other_cost > 0 && new_other_cost > 0)
931 {
932 old_cost += old_other_cost;
933 new_cost += new_other_cost;
934 }
935 else
936 old_cost = 0;
937 }
938
939 /* Disallow this combination if both new_cost and old_cost are greater than
940 zero, and new_cost is greater than old cost. */
941 int reject = old_cost > 0 && new_cost > old_cost;
942
943 if (dump_file)
944 {
945 fprintf (dump_file, "%s combination of insns ",
946 reject ? "rejecting" : "allowing");
947 if (i0)
948 fprintf (dump_file, "%d, ", INSN_UID (i0));
949 if (i1 && INSN_UID (i1) != INSN_UID (i2))
950 fprintf (dump_file, "%d, ", INSN_UID (i1));
951 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
952
953 fprintf (dump_file, "original costs ");
954 if (i0)
955 fprintf (dump_file, "%d + ", i0_cost);
956 if (i1 && INSN_UID (i1) != INSN_UID (i2))
957 fprintf (dump_file, "%d + ", i1_cost);
958 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
959
960 if (newi2pat)
961 fprintf (dump_file, "replacement costs %d + %d = %d\n",
962 new_i2_cost, new_i3_cost, new_cost);
963 else
964 fprintf (dump_file, "replacement cost %d\n", new_cost);
965 }
966
967 if (reject)
968 return false;
969
970 /* Update the uid_insn_cost array with the replacement costs. */
971 INSN_COST (i2) = new_i2_cost;
972 INSN_COST (i3) = new_i3_cost;
973 if (i1)
974 {
975 INSN_COST (i1) = 0;
976 if (i0)
977 INSN_COST (i0) = 0;
978 }
979
980 return true;
981 }
982
983
984 /* Delete any insns that copy a register to itself.
985 Return true if the CFG was changed. */
986
987 static bool
delete_noop_moves(void)988 delete_noop_moves (void)
989 {
990 rtx_insn *insn, *next;
991 basic_block bb;
992
993 bool edges_deleted = false;
994
995 FOR_EACH_BB_FN (bb, cfun)
996 {
997 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
998 {
999 next = NEXT_INSN (insn);
1000 if (INSN_P (insn) && noop_move_p (insn))
1001 {
1002 if (dump_file)
1003 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1004
1005 edges_deleted |= delete_insn_and_edges (insn);
1006 }
1007 }
1008 }
1009
1010 return edges_deleted;
1011 }
1012
1013
1014 /* Return false if we do not want to (or cannot) combine DEF. */
1015 static bool
can_combine_def_p(df_ref def)1016 can_combine_def_p (df_ref def)
1017 {
1018 /* Do not consider if it is pre/post modification in MEM. */
1019 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1020 return false;
1021
1022 unsigned int regno = DF_REF_REGNO (def);
1023
1024 /* Do not combine frame pointer adjustments. */
1025 if ((regno == FRAME_POINTER_REGNUM
1026 && (!reload_completed || frame_pointer_needed))
1027 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1028 && regno == HARD_FRAME_POINTER_REGNUM
1029 && (!reload_completed || frame_pointer_needed))
1030 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1031 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1032 return false;
1033
1034 return true;
1035 }
1036
1037 /* Return false if we do not want to (or cannot) combine USE. */
1038 static bool
can_combine_use_p(df_ref use)1039 can_combine_use_p (df_ref use)
1040 {
1041 /* Do not consider the usage of the stack pointer by function call. */
1042 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1043 return false;
1044
1045 return true;
1046 }
1047
1048 /* Fill in log links field for all insns. */
1049
1050 static void
create_log_links(void)1051 create_log_links (void)
1052 {
1053 basic_block bb;
1054 rtx_insn **next_use;
1055 rtx_insn *insn;
1056 df_ref def, use;
1057
1058 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1059
1060 /* Pass through each block from the end, recording the uses of each
1061 register and establishing log links when def is encountered.
1062 Note that we do not clear next_use array in order to save time,
1063 so we have to test whether the use is in the same basic block as def.
1064
1065 There are a few cases below when we do not consider the definition or
1066 usage -- these are taken from original flow.c did. Don't ask me why it is
1067 done this way; I don't know and if it works, I don't want to know. */
1068
1069 FOR_EACH_BB_FN (bb, cfun)
1070 {
1071 FOR_BB_INSNS_REVERSE (bb, insn)
1072 {
1073 if (!NONDEBUG_INSN_P (insn))
1074 continue;
1075
1076 /* Log links are created only once. */
1077 gcc_assert (!LOG_LINKS (insn));
1078
1079 FOR_EACH_INSN_DEF (def, insn)
1080 {
1081 unsigned int regno = DF_REF_REGNO (def);
1082 rtx_insn *use_insn;
1083
1084 if (!next_use[regno])
1085 continue;
1086
1087 if (!can_combine_def_p (def))
1088 continue;
1089
1090 use_insn = next_use[regno];
1091 next_use[regno] = NULL;
1092
1093 if (BLOCK_FOR_INSN (use_insn) != bb)
1094 continue;
1095
1096 /* flow.c claimed:
1097
1098 We don't build a LOG_LINK for hard registers contained
1099 in ASM_OPERANDs. If these registers get replaced,
1100 we might wind up changing the semantics of the insn,
1101 even if reload can make what appear to be valid
1102 assignments later. */
1103 if (regno < FIRST_PSEUDO_REGISTER
1104 && asm_noperands (PATTERN (use_insn)) >= 0)
1105 continue;
1106
1107 /* Don't add duplicate links between instructions. */
1108 struct insn_link *links;
1109 FOR_EACH_LOG_LINK (links, use_insn)
1110 if (insn == links->insn && regno == links->regno)
1111 break;
1112
1113 if (!links)
1114 LOG_LINKS (use_insn)
1115 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1116 }
1117
1118 FOR_EACH_INSN_USE (use, insn)
1119 if (can_combine_use_p (use))
1120 next_use[DF_REF_REGNO (use)] = insn;
1121 }
1122 }
1123
1124 free (next_use);
1125 }
1126
1127 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1128 true if we found a LOG_LINK that proves that A feeds B. This only works
1129 if there are no instructions between A and B which could have a link
1130 depending on A, since in that case we would not record a link for B.
1131 We also check the implicit dependency created by a cc0 setter/user
1132 pair. */
1133
1134 static bool
insn_a_feeds_b(rtx_insn * a,rtx_insn * b)1135 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1136 {
1137 struct insn_link *links;
1138 FOR_EACH_LOG_LINK (links, b)
1139 if (links->insn == a)
1140 return true;
1141 if (HAVE_cc0 && sets_cc0_p (a))
1142 return true;
1143 return false;
1144 }
1145
1146 /* Main entry point for combiner. F is the first insn of the function.
1147 NREGS is the first unused pseudo-reg number.
1148
1149 Return nonzero if the CFG was changed (e.g. if the combiner has
1150 turned an indirect jump instruction into a direct jump). */
1151 static int
combine_instructions(rtx_insn * f,unsigned int nregs)1152 combine_instructions (rtx_insn *f, unsigned int nregs)
1153 {
1154 rtx_insn *insn, *next;
1155 rtx_insn *prev;
1156 struct insn_link *links, *nextlinks;
1157 rtx_insn *first;
1158 basic_block last_bb;
1159
1160 int new_direct_jump_p = 0;
1161
1162 for (first = f; first && !NONDEBUG_INSN_P (first); )
1163 first = NEXT_INSN (first);
1164 if (!first)
1165 return 0;
1166
1167 combine_attempts = 0;
1168 combine_merges = 0;
1169 combine_extras = 0;
1170 combine_successes = 0;
1171
1172 rtl_hooks = combine_rtl_hooks;
1173
1174 reg_stat.safe_grow_cleared (nregs);
1175
1176 init_recog_no_volatile ();
1177
1178 /* Allocate array for insn info. */
1179 max_uid_known = get_max_uid ();
1180 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1181 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1182 gcc_obstack_init (&insn_link_obstack);
1183
1184 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1185
1186 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1187 problems when, for example, we have j <<= 1 in a loop. */
1188
1189 nonzero_sign_valid = 0;
1190 label_tick = label_tick_ebb_start = 1;
1191
1192 /* Scan all SETs and see if we can deduce anything about what
1193 bits are known to be zero for some registers and how many copies
1194 of the sign bit are known to exist for those registers.
1195
1196 Also set any known values so that we can use it while searching
1197 for what bits are known to be set. */
1198
1199 setup_incoming_promotions (first);
1200 /* Allow the entry block and the first block to fall into the same EBB.
1201 Conceptually the incoming promotions are assigned to the entry block. */
1202 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1203
1204 create_log_links ();
1205 FOR_EACH_BB_FN (this_basic_block, cfun)
1206 {
1207 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1208 last_call_luid = 0;
1209 mem_last_set = -1;
1210
1211 label_tick++;
1212 if (!single_pred_p (this_basic_block)
1213 || single_pred (this_basic_block) != last_bb)
1214 label_tick_ebb_start = label_tick;
1215 last_bb = this_basic_block;
1216
1217 FOR_BB_INSNS (this_basic_block, insn)
1218 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1219 {
1220 rtx links;
1221
1222 subst_low_luid = DF_INSN_LUID (insn);
1223 subst_insn = insn;
1224
1225 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1226 insn);
1227 record_dead_and_set_regs (insn);
1228
1229 if (AUTO_INC_DEC)
1230 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1231 if (REG_NOTE_KIND (links) == REG_INC)
1232 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1233 insn);
1234
1235 /* Record the current insn_cost of this instruction. */
1236 if (NONJUMP_INSN_P (insn))
1237 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1238 if (dump_file)
1239 {
1240 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1241 dump_insn_slim (dump_file, insn);
1242 }
1243 }
1244 }
1245
1246 nonzero_sign_valid = 1;
1247
1248 /* Now scan all the insns in forward order. */
1249 label_tick = label_tick_ebb_start = 1;
1250 init_reg_last ();
1251 setup_incoming_promotions (first);
1252 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1253 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1254
1255 FOR_EACH_BB_FN (this_basic_block, cfun)
1256 {
1257 rtx_insn *last_combined_insn = NULL;
1258
1259 /* Ignore instruction combination in basic blocks that are going to
1260 be removed as unreachable anyway. See PR82386. */
1261 if (EDGE_COUNT (this_basic_block->preds) == 0)
1262 continue;
1263
1264 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1265 last_call_luid = 0;
1266 mem_last_set = -1;
1267
1268 label_tick++;
1269 if (!single_pred_p (this_basic_block)
1270 || single_pred (this_basic_block) != last_bb)
1271 label_tick_ebb_start = label_tick;
1272 last_bb = this_basic_block;
1273
1274 rtl_profile_for_bb (this_basic_block);
1275 for (insn = BB_HEAD (this_basic_block);
1276 insn != NEXT_INSN (BB_END (this_basic_block));
1277 insn = next ? next : NEXT_INSN (insn))
1278 {
1279 next = 0;
1280 if (!NONDEBUG_INSN_P (insn))
1281 continue;
1282
1283 while (last_combined_insn
1284 && (!NONDEBUG_INSN_P (last_combined_insn)
1285 || last_combined_insn->deleted ()))
1286 last_combined_insn = PREV_INSN (last_combined_insn);
1287 if (last_combined_insn == NULL_RTX
1288 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1289 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1290 last_combined_insn = insn;
1291
1292 /* See if we know about function return values before this
1293 insn based upon SUBREG flags. */
1294 check_promoted_subreg (insn, PATTERN (insn));
1295
1296 /* See if we can find hardregs and subreg of pseudos in
1297 narrower modes. This could help turning TRUNCATEs
1298 into SUBREGs. */
1299 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1300
1301 /* Try this insn with each insn it links back to. */
1302
1303 FOR_EACH_LOG_LINK (links, insn)
1304 if ((next = try_combine (insn, links->insn, NULL,
1305 NULL, &new_direct_jump_p,
1306 last_combined_insn)) != 0)
1307 {
1308 statistics_counter_event (cfun, "two-insn combine", 1);
1309 goto retry;
1310 }
1311
1312 /* Try each sequence of three linked insns ending with this one. */
1313
1314 if (max_combine >= 3)
1315 FOR_EACH_LOG_LINK (links, insn)
1316 {
1317 rtx_insn *link = links->insn;
1318
1319 /* If the linked insn has been replaced by a note, then there
1320 is no point in pursuing this chain any further. */
1321 if (NOTE_P (link))
1322 continue;
1323
1324 FOR_EACH_LOG_LINK (nextlinks, link)
1325 if ((next = try_combine (insn, link, nextlinks->insn,
1326 NULL, &new_direct_jump_p,
1327 last_combined_insn)) != 0)
1328 {
1329 statistics_counter_event (cfun, "three-insn combine", 1);
1330 goto retry;
1331 }
1332 }
1333
1334 /* Try to combine a jump insn that uses CC0
1335 with a preceding insn that sets CC0, and maybe with its
1336 logical predecessor as well.
1337 This is how we make decrement-and-branch insns.
1338 We need this special code because data flow connections
1339 via CC0 do not get entered in LOG_LINKS. */
1340
1341 if (HAVE_cc0
1342 && JUMP_P (insn)
1343 && (prev = prev_nonnote_insn (insn)) != 0
1344 && NONJUMP_INSN_P (prev)
1345 && sets_cc0_p (PATTERN (prev)))
1346 {
1347 if ((next = try_combine (insn, prev, NULL, NULL,
1348 &new_direct_jump_p,
1349 last_combined_insn)) != 0)
1350 goto retry;
1351
1352 FOR_EACH_LOG_LINK (nextlinks, prev)
1353 if ((next = try_combine (insn, prev, nextlinks->insn,
1354 NULL, &new_direct_jump_p,
1355 last_combined_insn)) != 0)
1356 goto retry;
1357 }
1358
1359 /* Do the same for an insn that explicitly references CC0. */
1360 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1361 && (prev = prev_nonnote_insn (insn)) != 0
1362 && NONJUMP_INSN_P (prev)
1363 && sets_cc0_p (PATTERN (prev))
1364 && GET_CODE (PATTERN (insn)) == SET
1365 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1366 {
1367 if ((next = try_combine (insn, prev, NULL, NULL,
1368 &new_direct_jump_p,
1369 last_combined_insn)) != 0)
1370 goto retry;
1371
1372 FOR_EACH_LOG_LINK (nextlinks, prev)
1373 if ((next = try_combine (insn, prev, nextlinks->insn,
1374 NULL, &new_direct_jump_p,
1375 last_combined_insn)) != 0)
1376 goto retry;
1377 }
1378
1379 /* Finally, see if any of the insns that this insn links to
1380 explicitly references CC0. If so, try this insn, that insn,
1381 and its predecessor if it sets CC0. */
1382 if (HAVE_cc0)
1383 {
1384 FOR_EACH_LOG_LINK (links, insn)
1385 if (NONJUMP_INSN_P (links->insn)
1386 && GET_CODE (PATTERN (links->insn)) == SET
1387 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1388 && (prev = prev_nonnote_insn (links->insn)) != 0
1389 && NONJUMP_INSN_P (prev)
1390 && sets_cc0_p (PATTERN (prev))
1391 && (next = try_combine (insn, links->insn,
1392 prev, NULL, &new_direct_jump_p,
1393 last_combined_insn)) != 0)
1394 goto retry;
1395 }
1396
1397 /* Try combining an insn with two different insns whose results it
1398 uses. */
1399 if (max_combine >= 3)
1400 FOR_EACH_LOG_LINK (links, insn)
1401 for (nextlinks = links->next; nextlinks;
1402 nextlinks = nextlinks->next)
1403 if ((next = try_combine (insn, links->insn,
1404 nextlinks->insn, NULL,
1405 &new_direct_jump_p,
1406 last_combined_insn)) != 0)
1407
1408 {
1409 statistics_counter_event (cfun, "three-insn combine", 1);
1410 goto retry;
1411 }
1412
1413 /* Try four-instruction combinations. */
1414 if (max_combine >= 4)
1415 FOR_EACH_LOG_LINK (links, insn)
1416 {
1417 struct insn_link *next1;
1418 rtx_insn *link = links->insn;
1419
1420 /* If the linked insn has been replaced by a note, then there
1421 is no point in pursuing this chain any further. */
1422 if (NOTE_P (link))
1423 continue;
1424
1425 FOR_EACH_LOG_LINK (next1, link)
1426 {
1427 rtx_insn *link1 = next1->insn;
1428 if (NOTE_P (link1))
1429 continue;
1430 /* I0 -> I1 -> I2 -> I3. */
1431 FOR_EACH_LOG_LINK (nextlinks, link1)
1432 if ((next = try_combine (insn, link, link1,
1433 nextlinks->insn,
1434 &new_direct_jump_p,
1435 last_combined_insn)) != 0)
1436 {
1437 statistics_counter_event (cfun, "four-insn combine", 1);
1438 goto retry;
1439 }
1440 /* I0, I1 -> I2, I2 -> I3. */
1441 for (nextlinks = next1->next; nextlinks;
1442 nextlinks = nextlinks->next)
1443 if ((next = try_combine (insn, link, link1,
1444 nextlinks->insn,
1445 &new_direct_jump_p,
1446 last_combined_insn)) != 0)
1447 {
1448 statistics_counter_event (cfun, "four-insn combine", 1);
1449 goto retry;
1450 }
1451 }
1452
1453 for (next1 = links->next; next1; next1 = next1->next)
1454 {
1455 rtx_insn *link1 = next1->insn;
1456 if (NOTE_P (link1))
1457 continue;
1458 /* I0 -> I2; I1, I2 -> I3. */
1459 FOR_EACH_LOG_LINK (nextlinks, link)
1460 if ((next = try_combine (insn, link, link1,
1461 nextlinks->insn,
1462 &new_direct_jump_p,
1463 last_combined_insn)) != 0)
1464 {
1465 statistics_counter_event (cfun, "four-insn combine", 1);
1466 goto retry;
1467 }
1468 /* I0 -> I1; I1, I2 -> I3. */
1469 FOR_EACH_LOG_LINK (nextlinks, link1)
1470 if ((next = try_combine (insn, link, link1,
1471 nextlinks->insn,
1472 &new_direct_jump_p,
1473 last_combined_insn)) != 0)
1474 {
1475 statistics_counter_event (cfun, "four-insn combine", 1);
1476 goto retry;
1477 }
1478 }
1479 }
1480
1481 /* Try this insn with each REG_EQUAL note it links back to. */
1482 FOR_EACH_LOG_LINK (links, insn)
1483 {
1484 rtx set, note;
1485 rtx_insn *temp = links->insn;
1486 if ((set = single_set (temp)) != 0
1487 && (note = find_reg_equal_equiv_note (temp)) != 0
1488 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1489 && ! side_effects_p (SET_SRC (set))
1490 /* Avoid using a register that may already been marked
1491 dead by an earlier instruction. */
1492 && ! unmentioned_reg_p (note, SET_SRC (set))
1493 && (GET_MODE (note) == VOIDmode
1494 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1495 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1496 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1497 || (GET_MODE (XEXP (SET_DEST (set), 0))
1498 == GET_MODE (note))))))
1499 {
1500 /* Temporarily replace the set's source with the
1501 contents of the REG_EQUAL note. The insn will
1502 be deleted or recognized by try_combine. */
1503 rtx orig_src = SET_SRC (set);
1504 rtx orig_dest = SET_DEST (set);
1505 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1506 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1507 SET_SRC (set) = note;
1508 i2mod = temp;
1509 i2mod_old_rhs = copy_rtx (orig_src);
1510 i2mod_new_rhs = copy_rtx (note);
1511 next = try_combine (insn, i2mod, NULL, NULL,
1512 &new_direct_jump_p,
1513 last_combined_insn);
1514 i2mod = NULL;
1515 if (next)
1516 {
1517 statistics_counter_event (cfun, "insn-with-note combine", 1);
1518 goto retry;
1519 }
1520 SET_SRC (set) = orig_src;
1521 SET_DEST (set) = orig_dest;
1522 }
1523 }
1524
1525 if (!NOTE_P (insn))
1526 record_dead_and_set_regs (insn);
1527
1528 retry:
1529 ;
1530 }
1531 }
1532
1533 default_rtl_profile ();
1534 clear_bb_flags ();
1535 new_direct_jump_p |= purge_all_dead_edges ();
1536 new_direct_jump_p |= delete_noop_moves ();
1537
1538 /* Clean up. */
1539 obstack_free (&insn_link_obstack, NULL);
1540 free (uid_log_links);
1541 free (uid_insn_cost);
1542 reg_stat.release ();
1543
1544 {
1545 struct undo *undo, *next;
1546 for (undo = undobuf.frees; undo; undo = next)
1547 {
1548 next = undo->next;
1549 free (undo);
1550 }
1551 undobuf.frees = 0;
1552 }
1553
1554 total_attempts += combine_attempts;
1555 total_merges += combine_merges;
1556 total_extras += combine_extras;
1557 total_successes += combine_successes;
1558
1559 nonzero_sign_valid = 0;
1560 rtl_hooks = general_rtl_hooks;
1561
1562 /* Make recognizer allow volatile MEMs again. */
1563 init_recog ();
1564
1565 return new_direct_jump_p;
1566 }
1567
1568 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1569
1570 static void
init_reg_last(void)1571 init_reg_last (void)
1572 {
1573 unsigned int i;
1574 reg_stat_type *p;
1575
1576 FOR_EACH_VEC_ELT (reg_stat, i, p)
1577 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1578 }
1579
1580 /* Set up any promoted values for incoming argument registers. */
1581
1582 static void
setup_incoming_promotions(rtx_insn * first)1583 setup_incoming_promotions (rtx_insn *first)
1584 {
1585 tree arg;
1586 bool strictly_local = false;
1587
1588 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1589 arg = DECL_CHAIN (arg))
1590 {
1591 rtx x, reg = DECL_INCOMING_RTL (arg);
1592 int uns1, uns3;
1593 machine_mode mode1, mode2, mode3, mode4;
1594
1595 /* Only continue if the incoming argument is in a register. */
1596 if (!REG_P (reg))
1597 continue;
1598
1599 /* Determine, if possible, whether all call sites of the current
1600 function lie within the current compilation unit. (This does
1601 take into account the exporting of a function via taking its
1602 address, and so forth.) */
1603 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1604
1605 /* The mode and signedness of the argument before any promotions happen
1606 (equal to the mode of the pseudo holding it at that stage). */
1607 mode1 = TYPE_MODE (TREE_TYPE (arg));
1608 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1609
1610 /* The mode and signedness of the argument after any source language and
1611 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1612 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1613 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1614
1615 /* The mode and signedness of the argument as it is actually passed,
1616 see assign_parm_setup_reg in function.c. */
1617 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1618 TREE_TYPE (cfun->decl), 0);
1619
1620 /* The mode of the register in which the argument is being passed. */
1621 mode4 = GET_MODE (reg);
1622
1623 /* Eliminate sign extensions in the callee when:
1624 (a) A mode promotion has occurred; */
1625 if (mode1 == mode3)
1626 continue;
1627 /* (b) The mode of the register is the same as the mode of
1628 the argument as it is passed; */
1629 if (mode3 != mode4)
1630 continue;
1631 /* (c) There's no language level extension; */
1632 if (mode1 == mode2)
1633 ;
1634 /* (c.1) All callers are from the current compilation unit. If that's
1635 the case we don't have to rely on an ABI, we only have to know
1636 what we're generating right now, and we know that we will do the
1637 mode1 to mode2 promotion with the given sign. */
1638 else if (!strictly_local)
1639 continue;
1640 /* (c.2) The combination of the two promotions is useful. This is
1641 true when the signs match, or if the first promotion is unsigned.
1642 In the later case, (sign_extend (zero_extend x)) is the same as
1643 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1644 else if (uns1)
1645 uns3 = true;
1646 else if (uns3)
1647 continue;
1648
1649 /* Record that the value was promoted from mode1 to mode3,
1650 so that any sign extension at the head of the current
1651 function may be eliminated. */
1652 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1653 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1654 record_value_for_reg (reg, first, x);
1655 }
1656 }
1657
1658 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1659 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1660 because some machines (maybe most) will actually do the sign-extension and
1661 this is the conservative approach.
1662
1663 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1664 kludge. */
1665
1666 static rtx
sign_extend_short_imm(rtx src,machine_mode mode,unsigned int prec)1667 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1668 {
1669 scalar_int_mode int_mode;
1670 if (CONST_INT_P (src)
1671 && is_a <scalar_int_mode> (mode, &int_mode)
1672 && GET_MODE_PRECISION (int_mode) < prec
1673 && INTVAL (src) > 0
1674 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1675 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1676
1677 return src;
1678 }
1679
1680 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1681 and SET. */
1682
1683 static void
update_rsp_from_reg_equal(reg_stat_type * rsp,rtx_insn * insn,const_rtx set,rtx x)1684 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1685 rtx x)
1686 {
1687 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1688 unsigned HOST_WIDE_INT bits = 0;
1689 rtx reg_equal = NULL, src = SET_SRC (set);
1690 unsigned int num = 0;
1691
1692 if (reg_equal_note)
1693 reg_equal = XEXP (reg_equal_note, 0);
1694
1695 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1696 {
1697 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1698 if (reg_equal)
1699 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1700 }
1701
1702 /* Don't call nonzero_bits if it cannot change anything. */
1703 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1704 {
1705 bits = nonzero_bits (src, nonzero_bits_mode);
1706 if (reg_equal && bits)
1707 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1708 rsp->nonzero_bits |= bits;
1709 }
1710
1711 /* Don't call num_sign_bit_copies if it cannot change anything. */
1712 if (rsp->sign_bit_copies != 1)
1713 {
1714 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1715 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1716 {
1717 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1718 if (num == 0 || numeq > num)
1719 num = numeq;
1720 }
1721 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1722 rsp->sign_bit_copies = num;
1723 }
1724 }
1725
1726 /* Called via note_stores. If X is a pseudo that is narrower than
1727 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1728
1729 If we are setting only a portion of X and we can't figure out what
1730 portion, assume all bits will be used since we don't know what will
1731 be happening.
1732
1733 Similarly, set how many bits of X are known to be copies of the sign bit
1734 at all locations in the function. This is the smallest number implied
1735 by any set of X. */
1736
1737 static void
set_nonzero_bits_and_sign_copies(rtx x,const_rtx set,void * data)1738 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1739 {
1740 rtx_insn *insn = (rtx_insn *) data;
1741 scalar_int_mode mode;
1742
1743 if (REG_P (x)
1744 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1745 /* If this register is undefined at the start of the file, we can't
1746 say what its contents were. */
1747 && ! REGNO_REG_SET_P
1748 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1749 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1750 && HWI_COMPUTABLE_MODE_P (mode))
1751 {
1752 reg_stat_type *rsp = ®_stat[REGNO (x)];
1753
1754 if (set == 0 || GET_CODE (set) == CLOBBER)
1755 {
1756 rsp->nonzero_bits = GET_MODE_MASK (mode);
1757 rsp->sign_bit_copies = 1;
1758 return;
1759 }
1760
1761 /* If this register is being initialized using itself, and the
1762 register is uninitialized in this basic block, and there are
1763 no LOG_LINKS which set the register, then part of the
1764 register is uninitialized. In that case we can't assume
1765 anything about the number of nonzero bits.
1766
1767 ??? We could do better if we checked this in
1768 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1769 could avoid making assumptions about the insn which initially
1770 sets the register, while still using the information in other
1771 insns. We would have to be careful to check every insn
1772 involved in the combination. */
1773
1774 if (insn
1775 && reg_referenced_p (x, PATTERN (insn))
1776 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1777 REGNO (x)))
1778 {
1779 struct insn_link *link;
1780
1781 FOR_EACH_LOG_LINK (link, insn)
1782 if (dead_or_set_p (link->insn, x))
1783 break;
1784 if (!link)
1785 {
1786 rsp->nonzero_bits = GET_MODE_MASK (mode);
1787 rsp->sign_bit_copies = 1;
1788 return;
1789 }
1790 }
1791
1792 /* If this is a complex assignment, see if we can convert it into a
1793 simple assignment. */
1794 set = expand_field_assignment (set);
1795
1796 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1797 set what we know about X. */
1798
1799 if (SET_DEST (set) == x
1800 || (paradoxical_subreg_p (SET_DEST (set))
1801 && SUBREG_REG (SET_DEST (set)) == x))
1802 update_rsp_from_reg_equal (rsp, insn, set, x);
1803 else
1804 {
1805 rsp->nonzero_bits = GET_MODE_MASK (mode);
1806 rsp->sign_bit_copies = 1;
1807 }
1808 }
1809 }
1810
1811 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1812 optionally insns that were previously combined into I3 or that will be
1813 combined into the merger of INSN and I3. The order is PRED, PRED2,
1814 INSN, SUCC, SUCC2, I3.
1815
1816 Return 0 if the combination is not allowed for any reason.
1817
1818 If the combination is allowed, *PDEST will be set to the single
1819 destination of INSN and *PSRC to the single source, and this function
1820 will return 1. */
1821
1822 static int
can_combine_p(rtx_insn * insn,rtx_insn * i3,rtx_insn * pred ATTRIBUTE_UNUSED,rtx_insn * pred2 ATTRIBUTE_UNUSED,rtx_insn * succ,rtx_insn * succ2,rtx * pdest,rtx * psrc)1823 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1824 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1825 rtx *pdest, rtx *psrc)
1826 {
1827 int i;
1828 const_rtx set = 0;
1829 rtx src, dest;
1830 rtx_insn *p;
1831 rtx link;
1832 bool all_adjacent = true;
1833 int (*is_volatile_p) (const_rtx);
1834
1835 if (succ)
1836 {
1837 if (succ2)
1838 {
1839 if (next_active_insn (succ2) != i3)
1840 all_adjacent = false;
1841 if (next_active_insn (succ) != succ2)
1842 all_adjacent = false;
1843 }
1844 else if (next_active_insn (succ) != i3)
1845 all_adjacent = false;
1846 if (next_active_insn (insn) != succ)
1847 all_adjacent = false;
1848 }
1849 else if (next_active_insn (insn) != i3)
1850 all_adjacent = false;
1851
1852 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1853 or a PARALLEL consisting of such a SET and CLOBBERs.
1854
1855 If INSN has CLOBBER parallel parts, ignore them for our processing.
1856 By definition, these happen during the execution of the insn. When it
1857 is merged with another insn, all bets are off. If they are, in fact,
1858 needed and aren't also supplied in I3, they may be added by
1859 recog_for_combine. Otherwise, it won't match.
1860
1861 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1862 note.
1863
1864 Get the source and destination of INSN. If more than one, can't
1865 combine. */
1866
1867 if (GET_CODE (PATTERN (insn)) == SET)
1868 set = PATTERN (insn);
1869 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1870 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1871 {
1872 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1873 {
1874 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1875
1876 switch (GET_CODE (elt))
1877 {
1878 /* This is important to combine floating point insns
1879 for the SH4 port. */
1880 case USE:
1881 /* Combining an isolated USE doesn't make sense.
1882 We depend here on combinable_i3pat to reject them. */
1883 /* The code below this loop only verifies that the inputs of
1884 the SET in INSN do not change. We call reg_set_between_p
1885 to verify that the REG in the USE does not change between
1886 I3 and INSN.
1887 If the USE in INSN was for a pseudo register, the matching
1888 insn pattern will likely match any register; combining this
1889 with any other USE would only be safe if we knew that the
1890 used registers have identical values, or if there was
1891 something to tell them apart, e.g. different modes. For
1892 now, we forgo such complicated tests and simply disallow
1893 combining of USES of pseudo registers with any other USE. */
1894 if (REG_P (XEXP (elt, 0))
1895 && GET_CODE (PATTERN (i3)) == PARALLEL)
1896 {
1897 rtx i3pat = PATTERN (i3);
1898 int i = XVECLEN (i3pat, 0) - 1;
1899 unsigned int regno = REGNO (XEXP (elt, 0));
1900
1901 do
1902 {
1903 rtx i3elt = XVECEXP (i3pat, 0, i);
1904
1905 if (GET_CODE (i3elt) == USE
1906 && REG_P (XEXP (i3elt, 0))
1907 && (REGNO (XEXP (i3elt, 0)) == regno
1908 ? reg_set_between_p (XEXP (elt, 0),
1909 PREV_INSN (insn), i3)
1910 : regno >= FIRST_PSEUDO_REGISTER))
1911 return 0;
1912 }
1913 while (--i >= 0);
1914 }
1915 break;
1916
1917 /* We can ignore CLOBBERs. */
1918 case CLOBBER:
1919 break;
1920
1921 case SET:
1922 /* Ignore SETs whose result isn't used but not those that
1923 have side-effects. */
1924 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1925 && insn_nothrow_p (insn)
1926 && !side_effects_p (elt))
1927 break;
1928
1929 /* If we have already found a SET, this is a second one and
1930 so we cannot combine with this insn. */
1931 if (set)
1932 return 0;
1933
1934 set = elt;
1935 break;
1936
1937 default:
1938 /* Anything else means we can't combine. */
1939 return 0;
1940 }
1941 }
1942
1943 if (set == 0
1944 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1945 so don't do anything with it. */
1946 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1947 return 0;
1948 }
1949 else
1950 return 0;
1951
1952 if (set == 0)
1953 return 0;
1954
1955 /* The simplification in expand_field_assignment may call back to
1956 get_last_value, so set safe guard here. */
1957 subst_low_luid = DF_INSN_LUID (insn);
1958
1959 set = expand_field_assignment (set);
1960 src = SET_SRC (set), dest = SET_DEST (set);
1961
1962 /* Do not eliminate user-specified register if it is in an
1963 asm input because we may break the register asm usage defined
1964 in GCC manual if allow to do so.
1965 Be aware that this may cover more cases than we expect but this
1966 should be harmless. */
1967 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1968 && extract_asm_operands (PATTERN (i3)))
1969 return 0;
1970
1971 /* Don't eliminate a store in the stack pointer. */
1972 if (dest == stack_pointer_rtx
1973 /* Don't combine with an insn that sets a register to itself if it has
1974 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1975 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1976 /* Can't merge an ASM_OPERANDS. */
1977 || GET_CODE (src) == ASM_OPERANDS
1978 /* Can't merge a function call. */
1979 || GET_CODE (src) == CALL
1980 /* Don't eliminate a function call argument. */
1981 || (CALL_P (i3)
1982 && (find_reg_fusage (i3, USE, dest)
1983 || (REG_P (dest)
1984 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1985 && global_regs[REGNO (dest)])))
1986 /* Don't substitute into an incremented register. */
1987 || FIND_REG_INC_NOTE (i3, dest)
1988 || (succ && FIND_REG_INC_NOTE (succ, dest))
1989 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1990 /* Don't substitute into a non-local goto, this confuses CFG. */
1991 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1992 /* Make sure that DEST is not used after INSN but before SUCC, or
1993 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1994 || (!all_adjacent
1995 && ((succ2
1996 && (reg_used_between_p (dest, succ2, i3)
1997 || reg_used_between_p (dest, succ, succ2)))
1998 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1999 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2000 || (succ
2001 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2002 that case SUCC is not in the insn stream, so use SUCC2
2003 instead for this test. */
2004 && reg_used_between_p (dest, insn,
2005 succ2
2006 && INSN_UID (succ) == INSN_UID (succ2)
2007 ? succ2 : succ))))
2008 /* Make sure that the value that is to be substituted for the register
2009 does not use any registers whose values alter in between. However,
2010 If the insns are adjacent, a use can't cross a set even though we
2011 think it might (this can happen for a sequence of insns each setting
2012 the same destination; last_set of that register might point to
2013 a NOTE). If INSN has a REG_EQUIV note, the register is always
2014 equivalent to the memory so the substitution is valid even if there
2015 are intervening stores. Also, don't move a volatile asm or
2016 UNSPEC_VOLATILE across any other insns. */
2017 || (! all_adjacent
2018 && (((!MEM_P (src)
2019 || ! find_reg_note (insn, REG_EQUIV, src))
2020 && modified_between_p (src, insn, i3))
2021 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2022 || GET_CODE (src) == UNSPEC_VOLATILE))
2023 /* Don't combine across a CALL_INSN, because that would possibly
2024 change whether the life span of some REGs crosses calls or not,
2025 and it is a pain to update that information.
2026 Exception: if source is a constant, moving it later can't hurt.
2027 Accept that as a special case. */
2028 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2029 return 0;
2030
2031 /* DEST must either be a REG or CC0. */
2032 if (REG_P (dest))
2033 {
2034 /* If register alignment is being enforced for multi-word items in all
2035 cases except for parameters, it is possible to have a register copy
2036 insn referencing a hard register that is not allowed to contain the
2037 mode being copied and which would not be valid as an operand of most
2038 insns. Eliminate this problem by not combining with such an insn.
2039
2040 Also, on some machines we don't want to extend the life of a hard
2041 register. */
2042
2043 if (REG_P (src)
2044 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2045 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2046 /* Don't extend the life of a hard register unless it is
2047 user variable (if we have few registers) or it can't
2048 fit into the desired register (meaning something special
2049 is going on).
2050 Also avoid substituting a return register into I3, because
2051 reload can't handle a conflict with constraints of other
2052 inputs. */
2053 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2054 && !targetm.hard_regno_mode_ok (REGNO (src),
2055 GET_MODE (src)))))
2056 return 0;
2057 }
2058 else if (GET_CODE (dest) != CC0)
2059 return 0;
2060
2061
2062 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2063 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2064 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2065 {
2066 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2067
2068 /* If the clobber represents an earlyclobber operand, we must not
2069 substitute an expression containing the clobbered register.
2070 As we do not analyze the constraint strings here, we have to
2071 make the conservative assumption. However, if the register is
2072 a fixed hard reg, the clobber cannot represent any operand;
2073 we leave it up to the machine description to either accept or
2074 reject use-and-clobber patterns. */
2075 if (!REG_P (reg)
2076 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2077 || !fixed_regs[REGNO (reg)])
2078 if (reg_overlap_mentioned_p (reg, src))
2079 return 0;
2080 }
2081
2082 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2083 or not), reject, unless nothing volatile comes between it and I3 */
2084
2085 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2086 {
2087 /* Make sure neither succ nor succ2 contains a volatile reference. */
2088 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2089 return 0;
2090 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2091 return 0;
2092 /* We'll check insns between INSN and I3 below. */
2093 }
2094
2095 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2096 to be an explicit register variable, and was chosen for a reason. */
2097
2098 if (GET_CODE (src) == ASM_OPERANDS
2099 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2100 return 0;
2101
2102 /* If INSN contains volatile references (specifically volatile MEMs),
2103 we cannot combine across any other volatile references.
2104 Even if INSN doesn't contain volatile references, any intervening
2105 volatile insn might affect machine state. */
2106
2107 is_volatile_p = volatile_refs_p (PATTERN (insn))
2108 ? volatile_refs_p
2109 : volatile_insn_p;
2110
2111 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2112 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2113 return 0;
2114
2115 /* If INSN contains an autoincrement or autodecrement, make sure that
2116 register is not used between there and I3, and not already used in
2117 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2118 Also insist that I3 not be a jump; if it were one
2119 and the incremented register were spilled, we would lose. */
2120
2121 if (AUTO_INC_DEC)
2122 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2123 if (REG_NOTE_KIND (link) == REG_INC
2124 && (JUMP_P (i3)
2125 || reg_used_between_p (XEXP (link, 0), insn, i3)
2126 || (pred != NULL_RTX
2127 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2128 || (pred2 != NULL_RTX
2129 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2130 || (succ != NULL_RTX
2131 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2132 || (succ2 != NULL_RTX
2133 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2134 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2135 return 0;
2136
2137 /* Don't combine an insn that follows a CC0-setting insn.
2138 An insn that uses CC0 must not be separated from the one that sets it.
2139 We do, however, allow I2 to follow a CC0-setting insn if that insn
2140 is passed as I1; in that case it will be deleted also.
2141 We also allow combining in this case if all the insns are adjacent
2142 because that would leave the two CC0 insns adjacent as well.
2143 It would be more logical to test whether CC0 occurs inside I1 or I2,
2144 but that would be much slower, and this ought to be equivalent. */
2145
2146 if (HAVE_cc0)
2147 {
2148 p = prev_nonnote_insn (insn);
2149 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2150 && ! all_adjacent)
2151 return 0;
2152 }
2153
2154 /* If we get here, we have passed all the tests and the combination is
2155 to be allowed. */
2156
2157 *pdest = dest;
2158 *psrc = src;
2159
2160 return 1;
2161 }
2162
2163 /* LOC is the location within I3 that contains its pattern or the component
2164 of a PARALLEL of the pattern. We validate that it is valid for combining.
2165
2166 One problem is if I3 modifies its output, as opposed to replacing it
2167 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2168 doing so would produce an insn that is not equivalent to the original insns.
2169
2170 Consider:
2171
2172 (set (reg:DI 101) (reg:DI 100))
2173 (set (subreg:SI (reg:DI 101) 0) <foo>)
2174
2175 This is NOT equivalent to:
2176
2177 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2178 (set (reg:DI 101) (reg:DI 100))])
2179
2180 Not only does this modify 100 (in which case it might still be valid
2181 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2182
2183 We can also run into a problem if I2 sets a register that I1
2184 uses and I1 gets directly substituted into I3 (not via I2). In that
2185 case, we would be getting the wrong value of I2DEST into I3, so we
2186 must reject the combination. This case occurs when I2 and I1 both
2187 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2188 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2189 of a SET must prevent combination from occurring. The same situation
2190 can occur for I0, in which case I0_NOT_IN_SRC is set.
2191
2192 Before doing the above check, we first try to expand a field assignment
2193 into a set of logical operations.
2194
2195 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2196 we place a register that is both set and used within I3. If more than one
2197 such register is detected, we fail.
2198
2199 Return 1 if the combination is valid, zero otherwise. */
2200
2201 static int
combinable_i3pat(rtx_insn * i3,rtx * loc,rtx i2dest,rtx i1dest,rtx i0dest,int i1_not_in_src,int i0_not_in_src,rtx * pi3dest_killed)2202 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2203 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2204 {
2205 rtx x = *loc;
2206
2207 if (GET_CODE (x) == SET)
2208 {
2209 rtx set = x ;
2210 rtx dest = SET_DEST (set);
2211 rtx src = SET_SRC (set);
2212 rtx inner_dest = dest;
2213 rtx subdest;
2214
2215 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2216 || GET_CODE (inner_dest) == SUBREG
2217 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2218 inner_dest = XEXP (inner_dest, 0);
2219
2220 /* Check for the case where I3 modifies its output, as discussed
2221 above. We don't want to prevent pseudos from being combined
2222 into the address of a MEM, so only prevent the combination if
2223 i1 or i2 set the same MEM. */
2224 if ((inner_dest != dest &&
2225 (!MEM_P (inner_dest)
2226 || rtx_equal_p (i2dest, inner_dest)
2227 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2228 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2229 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2230 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2231 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2232
2233 /* This is the same test done in can_combine_p except we can't test
2234 all_adjacent; we don't have to, since this instruction will stay
2235 in place, thus we are not considering increasing the lifetime of
2236 INNER_DEST.
2237
2238 Also, if this insn sets a function argument, combining it with
2239 something that might need a spill could clobber a previous
2240 function argument; the all_adjacent test in can_combine_p also
2241 checks this; here, we do a more specific test for this case. */
2242
2243 || (REG_P (inner_dest)
2244 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2245 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2246 GET_MODE (inner_dest)))
2247 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2248 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2249 return 0;
2250
2251 /* If DEST is used in I3, it is being killed in this insn, so
2252 record that for later. We have to consider paradoxical
2253 subregs here, since they kill the whole register, but we
2254 ignore partial subregs, STRICT_LOW_PART, etc.
2255 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2256 STACK_POINTER_REGNUM, since these are always considered to be
2257 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2258 subdest = dest;
2259 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2260 subdest = SUBREG_REG (subdest);
2261 if (pi3dest_killed
2262 && REG_P (subdest)
2263 && reg_referenced_p (subdest, PATTERN (i3))
2264 && REGNO (subdest) != FRAME_POINTER_REGNUM
2265 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2266 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2267 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2268 || (REGNO (subdest) != ARG_POINTER_REGNUM
2269 || ! fixed_regs [REGNO (subdest)]))
2270 && REGNO (subdest) != STACK_POINTER_REGNUM)
2271 {
2272 if (*pi3dest_killed)
2273 return 0;
2274
2275 *pi3dest_killed = subdest;
2276 }
2277 }
2278
2279 else if (GET_CODE (x) == PARALLEL)
2280 {
2281 int i;
2282
2283 for (i = 0; i < XVECLEN (x, 0); i++)
2284 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2285 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2286 return 0;
2287 }
2288
2289 return 1;
2290 }
2291
2292 /* Return 1 if X is an arithmetic expression that contains a multiplication
2293 and division. We don't count multiplications by powers of two here. */
2294
2295 static int
contains_muldiv(rtx x)2296 contains_muldiv (rtx x)
2297 {
2298 switch (GET_CODE (x))
2299 {
2300 case MOD: case DIV: case UMOD: case UDIV:
2301 return 1;
2302
2303 case MULT:
2304 return ! (CONST_INT_P (XEXP (x, 1))
2305 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2306 default:
2307 if (BINARY_P (x))
2308 return contains_muldiv (XEXP (x, 0))
2309 || contains_muldiv (XEXP (x, 1));
2310
2311 if (UNARY_P (x))
2312 return contains_muldiv (XEXP (x, 0));
2313
2314 return 0;
2315 }
2316 }
2317
2318 /* Determine whether INSN can be used in a combination. Return nonzero if
2319 not. This is used in try_combine to detect early some cases where we
2320 can't perform combinations. */
2321
2322 static int
cant_combine_insn_p(rtx_insn * insn)2323 cant_combine_insn_p (rtx_insn *insn)
2324 {
2325 rtx set;
2326 rtx src, dest;
2327
2328 /* If this isn't really an insn, we can't do anything.
2329 This can occur when flow deletes an insn that it has merged into an
2330 auto-increment address. */
2331 if (!NONDEBUG_INSN_P (insn))
2332 return 1;
2333
2334 /* Never combine loads and stores involving hard regs that are likely
2335 to be spilled. The register allocator can usually handle such
2336 reg-reg moves by tying. If we allow the combiner to make
2337 substitutions of likely-spilled regs, reload might die.
2338 As an exception, we allow combinations involving fixed regs; these are
2339 not available to the register allocator so there's no risk involved. */
2340
2341 set = single_set (insn);
2342 if (! set)
2343 return 0;
2344 src = SET_SRC (set);
2345 dest = SET_DEST (set);
2346 if (GET_CODE (src) == SUBREG)
2347 src = SUBREG_REG (src);
2348 if (GET_CODE (dest) == SUBREG)
2349 dest = SUBREG_REG (dest);
2350 if (REG_P (src) && REG_P (dest)
2351 && ((HARD_REGISTER_P (src)
2352 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2353 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2354 || (HARD_REGISTER_P (dest)
2355 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2356 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2357 return 1;
2358
2359 return 0;
2360 }
2361
2362 struct likely_spilled_retval_info
2363 {
2364 unsigned regno, nregs;
2365 unsigned mask;
2366 };
2367
2368 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2369 hard registers that are known to be written to / clobbered in full. */
2370 static void
likely_spilled_retval_1(rtx x,const_rtx set,void * data)2371 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2372 {
2373 struct likely_spilled_retval_info *const info =
2374 (struct likely_spilled_retval_info *) data;
2375 unsigned regno, nregs;
2376 unsigned new_mask;
2377
2378 if (!REG_P (XEXP (set, 0)))
2379 return;
2380 regno = REGNO (x);
2381 if (regno >= info->regno + info->nregs)
2382 return;
2383 nregs = REG_NREGS (x);
2384 if (regno + nregs <= info->regno)
2385 return;
2386 new_mask = (2U << (nregs - 1)) - 1;
2387 if (regno < info->regno)
2388 new_mask >>= info->regno - regno;
2389 else
2390 new_mask <<= regno - info->regno;
2391 info->mask &= ~new_mask;
2392 }
2393
2394 /* Return nonzero iff part of the return value is live during INSN, and
2395 it is likely spilled. This can happen when more than one insn is needed
2396 to copy the return value, e.g. when we consider to combine into the
2397 second copy insn for a complex value. */
2398
2399 static int
likely_spilled_retval_p(rtx_insn * insn)2400 likely_spilled_retval_p (rtx_insn *insn)
2401 {
2402 rtx_insn *use = BB_END (this_basic_block);
2403 rtx reg;
2404 rtx_insn *p;
2405 unsigned regno, nregs;
2406 /* We assume here that no machine mode needs more than
2407 32 hard registers when the value overlaps with a register
2408 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2409 unsigned mask;
2410 struct likely_spilled_retval_info info;
2411
2412 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2413 return 0;
2414 reg = XEXP (PATTERN (use), 0);
2415 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2416 return 0;
2417 regno = REGNO (reg);
2418 nregs = REG_NREGS (reg);
2419 if (nregs == 1)
2420 return 0;
2421 mask = (2U << (nregs - 1)) - 1;
2422
2423 /* Disregard parts of the return value that are set later. */
2424 info.regno = regno;
2425 info.nregs = nregs;
2426 info.mask = mask;
2427 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2428 if (INSN_P (p))
2429 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2430 mask = info.mask;
2431
2432 /* Check if any of the (probably) live return value registers is
2433 likely spilled. */
2434 nregs --;
2435 do
2436 {
2437 if ((mask & 1 << nregs)
2438 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2439 return 1;
2440 } while (nregs--);
2441 return 0;
2442 }
2443
2444 /* Adjust INSN after we made a change to its destination.
2445
2446 Changing the destination can invalidate notes that say something about
2447 the results of the insn and a LOG_LINK pointing to the insn. */
2448
2449 static void
adjust_for_new_dest(rtx_insn * insn)2450 adjust_for_new_dest (rtx_insn *insn)
2451 {
2452 /* For notes, be conservative and simply remove them. */
2453 remove_reg_equal_equiv_notes (insn);
2454
2455 /* The new insn will have a destination that was previously the destination
2456 of an insn just above it. Call distribute_links to make a LOG_LINK from
2457 the next use of that destination. */
2458
2459 rtx set = single_set (insn);
2460 gcc_assert (set);
2461
2462 rtx reg = SET_DEST (set);
2463
2464 while (GET_CODE (reg) == ZERO_EXTRACT
2465 || GET_CODE (reg) == STRICT_LOW_PART
2466 || GET_CODE (reg) == SUBREG)
2467 reg = XEXP (reg, 0);
2468 gcc_assert (REG_P (reg));
2469
2470 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2471
2472 df_insn_rescan (insn);
2473 }
2474
2475 /* Return TRUE if combine can reuse reg X in mode MODE.
2476 ADDED_SETS is nonzero if the original set is still required. */
2477 static bool
can_change_dest_mode(rtx x,int added_sets,machine_mode mode)2478 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2479 {
2480 unsigned int regno;
2481
2482 if (!REG_P (x))
2483 return false;
2484
2485 /* Don't change between modes with different underlying register sizes,
2486 since this could lead to invalid subregs. */
2487 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2488 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2489 return false;
2490
2491 regno = REGNO (x);
2492 /* Allow hard registers if the new mode is legal, and occupies no more
2493 registers than the old mode. */
2494 if (regno < FIRST_PSEUDO_REGISTER)
2495 return (targetm.hard_regno_mode_ok (regno, mode)
2496 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2497
2498 /* Or a pseudo that is only used once. */
2499 return (regno < reg_n_sets_max
2500 && REG_N_SETS (regno) == 1
2501 && !added_sets
2502 && !REG_USERVAR_P (x));
2503 }
2504
2505
2506 /* Check whether X, the destination of a set, refers to part of
2507 the register specified by REG. */
2508
2509 static bool
reg_subword_p(rtx x,rtx reg)2510 reg_subword_p (rtx x, rtx reg)
2511 {
2512 /* Check that reg is an integer mode register. */
2513 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2514 return false;
2515
2516 if (GET_CODE (x) == STRICT_LOW_PART
2517 || GET_CODE (x) == ZERO_EXTRACT)
2518 x = XEXP (x, 0);
2519
2520 return GET_CODE (x) == SUBREG
2521 && SUBREG_REG (x) == reg
2522 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2523 }
2524
2525 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2526 Note that the INSN should be deleted *after* removing dead edges, so
2527 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2528 but not for a (set (pc) (label_ref FOO)). */
2529
2530 static void
update_cfg_for_uncondjump(rtx_insn * insn)2531 update_cfg_for_uncondjump (rtx_insn *insn)
2532 {
2533 basic_block bb = BLOCK_FOR_INSN (insn);
2534 gcc_assert (BB_END (bb) == insn);
2535
2536 purge_dead_edges (bb);
2537
2538 delete_insn (insn);
2539 if (EDGE_COUNT (bb->succs) == 1)
2540 {
2541 rtx_insn *insn;
2542
2543 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2544
2545 /* Remove barriers from the footer if there are any. */
2546 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2547 if (BARRIER_P (insn))
2548 {
2549 if (PREV_INSN (insn))
2550 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2551 else
2552 BB_FOOTER (bb) = NEXT_INSN (insn);
2553 if (NEXT_INSN (insn))
2554 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2555 }
2556 else if (LABEL_P (insn))
2557 break;
2558 }
2559 }
2560
2561 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2562 by an arbitrary number of CLOBBERs. */
2563 static bool
is_parallel_of_n_reg_sets(rtx pat,int n)2564 is_parallel_of_n_reg_sets (rtx pat, int n)
2565 {
2566 if (GET_CODE (pat) != PARALLEL)
2567 return false;
2568
2569 int len = XVECLEN (pat, 0);
2570 if (len < n)
2571 return false;
2572
2573 int i;
2574 for (i = 0; i < n; i++)
2575 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2576 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2577 return false;
2578 for ( ; i < len; i++)
2579 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2580 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2581 return false;
2582
2583 return true;
2584 }
2585
2586 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2587 CLOBBERs), can be split into individual SETs in that order, without
2588 changing semantics. */
2589 static bool
can_split_parallel_of_n_reg_sets(rtx_insn * insn,int n)2590 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2591 {
2592 if (!insn_nothrow_p (insn))
2593 return false;
2594
2595 rtx pat = PATTERN (insn);
2596
2597 int i, j;
2598 for (i = 0; i < n; i++)
2599 {
2600 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2601 return false;
2602
2603 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2604
2605 for (j = i + 1; j < n; j++)
2606 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2607 return false;
2608 }
2609
2610 return true;
2611 }
2612
2613 /* Try to combine the insns I0, I1 and I2 into I3.
2614 Here I0, I1 and I2 appear earlier than I3.
2615 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2616 I3.
2617
2618 If we are combining more than two insns and the resulting insn is not
2619 recognized, try splitting it into two insns. If that happens, I2 and I3
2620 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2621 Otherwise, I0, I1 and I2 are pseudo-deleted.
2622
2623 Return 0 if the combination does not work. Then nothing is changed.
2624 If we did the combination, return the insn at which combine should
2625 resume scanning.
2626
2627 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2628 new direct jump instruction.
2629
2630 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2631 been I3 passed to an earlier try_combine within the same basic
2632 block. */
2633
2634 static rtx_insn *
try_combine(rtx_insn * i3,rtx_insn * i2,rtx_insn * i1,rtx_insn * i0,int * new_direct_jump_p,rtx_insn * last_combined_insn)2635 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2636 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2637 {
2638 /* New patterns for I3 and I2, respectively. */
2639 rtx newpat, newi2pat = 0;
2640 rtvec newpat_vec_with_clobbers = 0;
2641 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2642 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2643 dead. */
2644 int added_sets_0, added_sets_1, added_sets_2;
2645 /* Total number of SETs to put into I3. */
2646 int total_sets;
2647 /* Nonzero if I2's or I1's body now appears in I3. */
2648 int i2_is_used = 0, i1_is_used = 0;
2649 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2650 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2651 /* Contains I3 if the destination of I3 is used in its source, which means
2652 that the old life of I3 is being killed. If that usage is placed into
2653 I2 and not in I3, a REG_DEAD note must be made. */
2654 rtx i3dest_killed = 0;
2655 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2656 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2657 /* Copy of SET_SRC of I1 and I0, if needed. */
2658 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2659 /* Set if I2DEST was reused as a scratch register. */
2660 bool i2scratch = false;
2661 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2662 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2663 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2664 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2665 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2666 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2667 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2668 /* Notes that must be added to REG_NOTES in I3 and I2. */
2669 rtx new_i3_notes, new_i2_notes;
2670 /* Notes that we substituted I3 into I2 instead of the normal case. */
2671 int i3_subst_into_i2 = 0;
2672 /* Notes that I1, I2 or I3 is a MULT operation. */
2673 int have_mult = 0;
2674 int swap_i2i3 = 0;
2675 int split_i2i3 = 0;
2676 int changed_i3_dest = 0;
2677
2678 int maxreg;
2679 rtx_insn *temp_insn;
2680 rtx temp_expr;
2681 struct insn_link *link;
2682 rtx other_pat = 0;
2683 rtx new_other_notes;
2684 int i;
2685 scalar_int_mode dest_mode, temp_mode;
2686
2687 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2688 never be). */
2689 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2690 return 0;
2691
2692 /* Only try four-insn combinations when there's high likelihood of
2693 success. Look for simple insns, such as loads of constants or
2694 binary operations involving a constant. */
2695 if (i0)
2696 {
2697 int i;
2698 int ngood = 0;
2699 int nshift = 0;
2700 rtx set0, set3;
2701
2702 if (!flag_expensive_optimizations)
2703 return 0;
2704
2705 for (i = 0; i < 4; i++)
2706 {
2707 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2708 rtx set = single_set (insn);
2709 rtx src;
2710 if (!set)
2711 continue;
2712 src = SET_SRC (set);
2713 if (CONSTANT_P (src))
2714 {
2715 ngood += 2;
2716 break;
2717 }
2718 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2719 ngood++;
2720 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2721 || GET_CODE (src) == LSHIFTRT)
2722 nshift++;
2723 }
2724
2725 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2726 are likely manipulating its value. Ideally we'll be able to combine
2727 all four insns into a bitfield insertion of some kind.
2728
2729 Note the source in I0 might be inside a sign/zero extension and the
2730 memory modes in I0 and I3 might be different. So extract the address
2731 from the destination of I3 and search for it in the source of I0.
2732
2733 In the event that there's a match but the source/dest do not actually
2734 refer to the same memory, the worst that happens is we try some
2735 combinations that we wouldn't have otherwise. */
2736 if ((set0 = single_set (i0))
2737 /* Ensure the source of SET0 is a MEM, possibly buried inside
2738 an extension. */
2739 && (GET_CODE (SET_SRC (set0)) == MEM
2740 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2741 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2742 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2743 && (set3 = single_set (i3))
2744 /* Ensure the destination of SET3 is a MEM. */
2745 && GET_CODE (SET_DEST (set3)) == MEM
2746 /* Would it be better to extract the base address for the MEM
2747 in SET3 and look for that? I don't have cases where it matters
2748 but I could envision such cases. */
2749 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2750 ngood += 2;
2751
2752 if (ngood < 2 && nshift < 2)
2753 return 0;
2754 }
2755
2756 /* Exit early if one of the insns involved can't be used for
2757 combinations. */
2758 if (CALL_P (i2)
2759 || (i1 && CALL_P (i1))
2760 || (i0 && CALL_P (i0))
2761 || cant_combine_insn_p (i3)
2762 || cant_combine_insn_p (i2)
2763 || (i1 && cant_combine_insn_p (i1))
2764 || (i0 && cant_combine_insn_p (i0))
2765 || likely_spilled_retval_p (i3))
2766 return 0;
2767
2768 combine_attempts++;
2769 undobuf.other_insn = 0;
2770
2771 /* Reset the hard register usage information. */
2772 CLEAR_HARD_REG_SET (newpat_used_regs);
2773
2774 if (dump_file && (dump_flags & TDF_DETAILS))
2775 {
2776 if (i0)
2777 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2778 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2779 else if (i1)
2780 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2781 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2782 else
2783 fprintf (dump_file, "\nTrying %d -> %d:\n",
2784 INSN_UID (i2), INSN_UID (i3));
2785
2786 if (i0)
2787 dump_insn_slim (dump_file, i0);
2788 if (i1)
2789 dump_insn_slim (dump_file, i1);
2790 dump_insn_slim (dump_file, i2);
2791 dump_insn_slim (dump_file, i3);
2792 }
2793
2794 /* If multiple insns feed into one of I2 or I3, they can be in any
2795 order. To simplify the code below, reorder them in sequence. */
2796 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2797 std::swap (i0, i2);
2798 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2799 std::swap (i0, i1);
2800 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2801 std::swap (i1, i2);
2802
2803 added_links_insn = 0;
2804 added_notes_insn = 0;
2805
2806 /* First check for one important special case that the code below will
2807 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2808 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2809 we may be able to replace that destination with the destination of I3.
2810 This occurs in the common code where we compute both a quotient and
2811 remainder into a structure, in which case we want to do the computation
2812 directly into the structure to avoid register-register copies.
2813
2814 Note that this case handles both multiple sets in I2 and also cases
2815 where I2 has a number of CLOBBERs inside the PARALLEL.
2816
2817 We make very conservative checks below and only try to handle the
2818 most common cases of this. For example, we only handle the case
2819 where I2 and I3 are adjacent to avoid making difficult register
2820 usage tests. */
2821
2822 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2823 && REG_P (SET_SRC (PATTERN (i3)))
2824 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2825 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2826 && GET_CODE (PATTERN (i2)) == PARALLEL
2827 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2828 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2829 below would need to check what is inside (and reg_overlap_mentioned_p
2830 doesn't support those codes anyway). Don't allow those destinations;
2831 the resulting insn isn't likely to be recognized anyway. */
2832 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2833 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2834 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2835 SET_DEST (PATTERN (i3)))
2836 && next_active_insn (i2) == i3)
2837 {
2838 rtx p2 = PATTERN (i2);
2839
2840 /* Make sure that the destination of I3,
2841 which we are going to substitute into one output of I2,
2842 is not used within another output of I2. We must avoid making this:
2843 (parallel [(set (mem (reg 69)) ...)
2844 (set (reg 69) ...)])
2845 which is not well-defined as to order of actions.
2846 (Besides, reload can't handle output reloads for this.)
2847
2848 The problem can also happen if the dest of I3 is a memory ref,
2849 if another dest in I2 is an indirect memory ref.
2850
2851 Neither can this PARALLEL be an asm. We do not allow combining
2852 that usually (see can_combine_p), so do not here either. */
2853 bool ok = true;
2854 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2855 {
2856 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2857 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2858 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2859 SET_DEST (XVECEXP (p2, 0, i))))
2860 ok = false;
2861 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2862 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2863 ok = false;
2864 }
2865
2866 if (ok)
2867 for (i = 0; i < XVECLEN (p2, 0); i++)
2868 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2869 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2870 {
2871 combine_merges++;
2872
2873 subst_insn = i3;
2874 subst_low_luid = DF_INSN_LUID (i2);
2875
2876 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2877 i2src = SET_SRC (XVECEXP (p2, 0, i));
2878 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2879 i2dest_killed = dead_or_set_p (i2, i2dest);
2880
2881 /* Replace the dest in I2 with our dest and make the resulting
2882 insn the new pattern for I3. Then skip to where we validate
2883 the pattern. Everything was set up above. */
2884 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2885 newpat = p2;
2886 i3_subst_into_i2 = 1;
2887 goto validate_replacement;
2888 }
2889 }
2890
2891 /* If I2 is setting a pseudo to a constant and I3 is setting some
2892 sub-part of it to another constant, merge them by making a new
2893 constant. */
2894 if (i1 == 0
2895 && (temp_expr = single_set (i2)) != 0
2896 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2897 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2898 && GET_CODE (PATTERN (i3)) == SET
2899 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2900 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2901 {
2902 rtx dest = SET_DEST (PATTERN (i3));
2903 rtx temp_dest = SET_DEST (temp_expr);
2904 int offset = -1;
2905 int width = 0;
2906
2907 if (GET_CODE (dest) == ZERO_EXTRACT)
2908 {
2909 if (CONST_INT_P (XEXP (dest, 1))
2910 && CONST_INT_P (XEXP (dest, 2))
2911 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2912 &dest_mode))
2913 {
2914 width = INTVAL (XEXP (dest, 1));
2915 offset = INTVAL (XEXP (dest, 2));
2916 dest = XEXP (dest, 0);
2917 if (BITS_BIG_ENDIAN)
2918 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2919 }
2920 }
2921 else
2922 {
2923 if (GET_CODE (dest) == STRICT_LOW_PART)
2924 dest = XEXP (dest, 0);
2925 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2926 {
2927 width = GET_MODE_PRECISION (dest_mode);
2928 offset = 0;
2929 }
2930 }
2931
2932 if (offset >= 0)
2933 {
2934 /* If this is the low part, we're done. */
2935 if (subreg_lowpart_p (dest))
2936 ;
2937 /* Handle the case where inner is twice the size of outer. */
2938 else if (GET_MODE_PRECISION (temp_mode)
2939 == 2 * GET_MODE_PRECISION (dest_mode))
2940 offset += GET_MODE_PRECISION (dest_mode);
2941 /* Otherwise give up for now. */
2942 else
2943 offset = -1;
2944 }
2945
2946 if (offset >= 0)
2947 {
2948 rtx inner = SET_SRC (PATTERN (i3));
2949 rtx outer = SET_SRC (temp_expr);
2950
2951 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2952 rtx_mode_t (inner, dest_mode),
2953 offset, width);
2954
2955 combine_merges++;
2956 subst_insn = i3;
2957 subst_low_luid = DF_INSN_LUID (i2);
2958 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2959 i2dest = temp_dest;
2960 i2dest_killed = dead_or_set_p (i2, i2dest);
2961
2962 /* Replace the source in I2 with the new constant and make the
2963 resulting insn the new pattern for I3. Then skip to where we
2964 validate the pattern. Everything was set up above. */
2965 SUBST (SET_SRC (temp_expr),
2966 immed_wide_int_const (o, temp_mode));
2967
2968 newpat = PATTERN (i2);
2969
2970 /* The dest of I3 has been replaced with the dest of I2. */
2971 changed_i3_dest = 1;
2972 goto validate_replacement;
2973 }
2974 }
2975
2976 /* If we have no I1 and I2 looks like:
2977 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2978 (set Y OP)])
2979 make up a dummy I1 that is
2980 (set Y OP)
2981 and change I2 to be
2982 (set (reg:CC X) (compare:CC Y (const_int 0)))
2983
2984 (We can ignore any trailing CLOBBERs.)
2985
2986 This undoes a previous combination and allows us to match a branch-and-
2987 decrement insn. */
2988
2989 if (!HAVE_cc0 && i1 == 0
2990 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2991 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2992 == MODE_CC)
2993 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2994 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2995 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2996 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2997 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2998 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2999 {
3000 /* We make I1 with the same INSN_UID as I2. This gives it
3001 the same DF_INSN_LUID for value tracking. Our fake I1 will
3002 never appear in the insn stream so giving it the same INSN_UID
3003 as I2 will not cause a problem. */
3004
3005 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3006 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3007 -1, NULL_RTX);
3008 INSN_UID (i1) = INSN_UID (i2);
3009
3010 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3011 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3012 SET_DEST (PATTERN (i1)));
3013 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3014 SUBST_LINK (LOG_LINKS (i2),
3015 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3016 }
3017
3018 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3019 make those two SETs separate I1 and I2 insns, and make an I0 that is
3020 the original I1. */
3021 if (!HAVE_cc0 && i0 == 0
3022 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3023 && can_split_parallel_of_n_reg_sets (i2, 2)
3024 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3025 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3026 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3027 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3028 {
3029 /* If there is no I1, there is no I0 either. */
3030 i0 = i1;
3031
3032 /* We make I1 with the same INSN_UID as I2. This gives it
3033 the same DF_INSN_LUID for value tracking. Our fake I1 will
3034 never appear in the insn stream so giving it the same INSN_UID
3035 as I2 will not cause a problem. */
3036
3037 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3038 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3039 -1, NULL_RTX);
3040 INSN_UID (i1) = INSN_UID (i2);
3041
3042 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3043 }
3044
3045 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3046 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3047 {
3048 if (dump_file)
3049 fprintf (dump_file, "Can't combine i2 into i3\n");
3050 undo_all ();
3051 return 0;
3052 }
3053 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3054 {
3055 if (dump_file)
3056 fprintf (dump_file, "Can't combine i1 into i3\n");
3057 undo_all ();
3058 return 0;
3059 }
3060 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3061 {
3062 if (dump_file)
3063 fprintf (dump_file, "Can't combine i0 into i3\n");
3064 undo_all ();
3065 return 0;
3066 }
3067
3068 /* Record whether I2DEST is used in I2SRC and similarly for the other
3069 cases. Knowing this will help in register status updating below. */
3070 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3071 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3072 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3073 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3074 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3075 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3076 i2dest_killed = dead_or_set_p (i2, i2dest);
3077 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3078 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3079
3080 /* For the earlier insns, determine which of the subsequent ones they
3081 feed. */
3082 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3083 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3084 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3085 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3086 && reg_overlap_mentioned_p (i0dest, i2src))));
3087
3088 /* Ensure that I3's pattern can be the destination of combines. */
3089 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3090 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3091 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3092 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3093 &i3dest_killed))
3094 {
3095 undo_all ();
3096 return 0;
3097 }
3098
3099 /* See if any of the insns is a MULT operation. Unless one is, we will
3100 reject a combination that is, since it must be slower. Be conservative
3101 here. */
3102 if (GET_CODE (i2src) == MULT
3103 || (i1 != 0 && GET_CODE (i1src) == MULT)
3104 || (i0 != 0 && GET_CODE (i0src) == MULT)
3105 || (GET_CODE (PATTERN (i3)) == SET
3106 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3107 have_mult = 1;
3108
3109 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3110 We used to do this EXCEPT in one case: I3 has a post-inc in an
3111 output operand. However, that exception can give rise to insns like
3112 mov r3,(r3)+
3113 which is a famous insn on the PDP-11 where the value of r3 used as the
3114 source was model-dependent. Avoid this sort of thing. */
3115
3116 #if 0
3117 if (!(GET_CODE (PATTERN (i3)) == SET
3118 && REG_P (SET_SRC (PATTERN (i3)))
3119 && MEM_P (SET_DEST (PATTERN (i3)))
3120 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3121 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3122 /* It's not the exception. */
3123 #endif
3124 if (AUTO_INC_DEC)
3125 {
3126 rtx link;
3127 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3128 if (REG_NOTE_KIND (link) == REG_INC
3129 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3130 || (i1 != 0
3131 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3132 {
3133 undo_all ();
3134 return 0;
3135 }
3136 }
3137
3138 /* See if the SETs in I1 or I2 need to be kept around in the merged
3139 instruction: whenever the value set there is still needed past I3.
3140 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3141
3142 For the SET in I1, we have two cases: if I1 and I2 independently feed
3143 into I3, the set in I1 needs to be kept around unless I1DEST dies
3144 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3145 in I1 needs to be kept around unless I1DEST dies or is set in either
3146 I2 or I3. The same considerations apply to I0. */
3147
3148 added_sets_2 = !dead_or_set_p (i3, i2dest);
3149
3150 if (i1)
3151 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3152 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3153 else
3154 added_sets_1 = 0;
3155
3156 if (i0)
3157 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3158 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3159 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3160 && dead_or_set_p (i2, i0dest)));
3161 else
3162 added_sets_0 = 0;
3163
3164 /* We are about to copy insns for the case where they need to be kept
3165 around. Check that they can be copied in the merged instruction. */
3166
3167 if (targetm.cannot_copy_insn_p
3168 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3169 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3170 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3171 {
3172 undo_all ();
3173 return 0;
3174 }
3175
3176 /* If the set in I2 needs to be kept around, we must make a copy of
3177 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3178 PATTERN (I2), we are only substituting for the original I1DEST, not into
3179 an already-substituted copy. This also prevents making self-referential
3180 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3181 I2DEST. */
3182
3183 if (added_sets_2)
3184 {
3185 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3186 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3187 else
3188 i2pat = copy_rtx (PATTERN (i2));
3189 }
3190
3191 if (added_sets_1)
3192 {
3193 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3194 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3195 else
3196 i1pat = copy_rtx (PATTERN (i1));
3197 }
3198
3199 if (added_sets_0)
3200 {
3201 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3202 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3203 else
3204 i0pat = copy_rtx (PATTERN (i0));
3205 }
3206
3207 combine_merges++;
3208
3209 /* Substitute in the latest insn for the regs set by the earlier ones. */
3210
3211 maxreg = max_reg_num ();
3212
3213 subst_insn = i3;
3214
3215 /* Many machines that don't use CC0 have insns that can both perform an
3216 arithmetic operation and set the condition code. These operations will
3217 be represented as a PARALLEL with the first element of the vector
3218 being a COMPARE of an arithmetic operation with the constant zero.
3219 The second element of the vector will set some pseudo to the result
3220 of the same arithmetic operation. If we simplify the COMPARE, we won't
3221 match such a pattern and so will generate an extra insn. Here we test
3222 for this case, where both the comparison and the operation result are
3223 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3224 I2SRC. Later we will make the PARALLEL that contains I2. */
3225
3226 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3227 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3228 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3229 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3230 {
3231 rtx newpat_dest;
3232 rtx *cc_use_loc = NULL;
3233 rtx_insn *cc_use_insn = NULL;
3234 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3235 machine_mode compare_mode, orig_compare_mode;
3236 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3237 scalar_int_mode mode;
3238
3239 newpat = PATTERN (i3);
3240 newpat_dest = SET_DEST (newpat);
3241 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3242
3243 if (undobuf.other_insn == 0
3244 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3245 &cc_use_insn)))
3246 {
3247 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3248 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3249 compare_code = simplify_compare_const (compare_code, mode,
3250 op0, &op1);
3251 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3252 }
3253
3254 /* Do the rest only if op1 is const0_rtx, which may be the
3255 result of simplification. */
3256 if (op1 == const0_rtx)
3257 {
3258 /* If a single use of the CC is found, prepare to modify it
3259 when SELECT_CC_MODE returns a new CC-class mode, or when
3260 the above simplify_compare_const() returned a new comparison
3261 operator. undobuf.other_insn is assigned the CC use insn
3262 when modifying it. */
3263 if (cc_use_loc)
3264 {
3265 #ifdef SELECT_CC_MODE
3266 machine_mode new_mode
3267 = SELECT_CC_MODE (compare_code, op0, op1);
3268 if (new_mode != orig_compare_mode
3269 && can_change_dest_mode (SET_DEST (newpat),
3270 added_sets_2, new_mode))
3271 {
3272 unsigned int regno = REGNO (newpat_dest);
3273 compare_mode = new_mode;
3274 if (regno < FIRST_PSEUDO_REGISTER)
3275 newpat_dest = gen_rtx_REG (compare_mode, regno);
3276 else
3277 {
3278 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3279 newpat_dest = regno_reg_rtx[regno];
3280 }
3281 }
3282 #endif
3283 /* Cases for modifying the CC-using comparison. */
3284 if (compare_code != orig_compare_code
3285 /* ??? Do we need to verify the zero rtx? */
3286 && XEXP (*cc_use_loc, 1) == const0_rtx)
3287 {
3288 /* Replace cc_use_loc with entire new RTX. */
3289 SUBST (*cc_use_loc,
3290 gen_rtx_fmt_ee (compare_code, compare_mode,
3291 newpat_dest, const0_rtx));
3292 undobuf.other_insn = cc_use_insn;
3293 }
3294 else if (compare_mode != orig_compare_mode)
3295 {
3296 /* Just replace the CC reg with a new mode. */
3297 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3298 undobuf.other_insn = cc_use_insn;
3299 }
3300 }
3301
3302 /* Now we modify the current newpat:
3303 First, SET_DEST(newpat) is updated if the CC mode has been
3304 altered. For targets without SELECT_CC_MODE, this should be
3305 optimized away. */
3306 if (compare_mode != orig_compare_mode)
3307 SUBST (SET_DEST (newpat), newpat_dest);
3308 /* This is always done to propagate i2src into newpat. */
3309 SUBST (SET_SRC (newpat),
3310 gen_rtx_COMPARE (compare_mode, op0, op1));
3311 /* Create new version of i2pat if needed; the below PARALLEL
3312 creation needs this to work correctly. */
3313 if (! rtx_equal_p (i2src, op0))
3314 i2pat = gen_rtx_SET (i2dest, op0);
3315 i2_is_used = 1;
3316 }
3317 }
3318
3319 if (i2_is_used == 0)
3320 {
3321 /* It is possible that the source of I2 or I1 may be performing
3322 an unneeded operation, such as a ZERO_EXTEND of something
3323 that is known to have the high part zero. Handle that case
3324 by letting subst look at the inner insns.
3325
3326 Another way to do this would be to have a function that tries
3327 to simplify a single insn instead of merging two or more
3328 insns. We don't do this because of the potential of infinite
3329 loops and because of the potential extra memory required.
3330 However, doing it the way we are is a bit of a kludge and
3331 doesn't catch all cases.
3332
3333 But only do this if -fexpensive-optimizations since it slows
3334 things down and doesn't usually win.
3335
3336 This is not done in the COMPARE case above because the
3337 unmodified I2PAT is used in the PARALLEL and so a pattern
3338 with a modified I2SRC would not match. */
3339
3340 if (flag_expensive_optimizations)
3341 {
3342 /* Pass pc_rtx so no substitutions are done, just
3343 simplifications. */
3344 if (i1)
3345 {
3346 subst_low_luid = DF_INSN_LUID (i1);
3347 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3348 }
3349
3350 subst_low_luid = DF_INSN_LUID (i2);
3351 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3352 }
3353
3354 n_occurrences = 0; /* `subst' counts here */
3355 subst_low_luid = DF_INSN_LUID (i2);
3356
3357 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3358 copy of I2SRC each time we substitute it, in order to avoid creating
3359 self-referential RTL when we will be substituting I1SRC for I1DEST
3360 later. Likewise if I0 feeds into I2, either directly or indirectly
3361 through I1, and I0DEST is in I0SRC. */
3362 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3363 (i1_feeds_i2_n && i1dest_in_i1src)
3364 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3365 && i0dest_in_i0src));
3366 substed_i2 = 1;
3367
3368 /* Record whether I2's body now appears within I3's body. */
3369 i2_is_used = n_occurrences;
3370 }
3371
3372 /* If we already got a failure, don't try to do more. Otherwise, try to
3373 substitute I1 if we have it. */
3374
3375 if (i1 && GET_CODE (newpat) != CLOBBER)
3376 {
3377 /* Check that an autoincrement side-effect on I1 has not been lost.
3378 This happens if I1DEST is mentioned in I2 and dies there, and
3379 has disappeared from the new pattern. */
3380 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3381 && i1_feeds_i2_n
3382 && dead_or_set_p (i2, i1dest)
3383 && !reg_overlap_mentioned_p (i1dest, newpat))
3384 /* Before we can do this substitution, we must redo the test done
3385 above (see detailed comments there) that ensures I1DEST isn't
3386 mentioned in any SETs in NEWPAT that are field assignments. */
3387 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3388 0, 0, 0))
3389 {
3390 undo_all ();
3391 return 0;
3392 }
3393
3394 n_occurrences = 0;
3395 subst_low_luid = DF_INSN_LUID (i1);
3396
3397 /* If the following substitution will modify I1SRC, make a copy of it
3398 for the case where it is substituted for I1DEST in I2PAT later. */
3399 if (added_sets_2 && i1_feeds_i2_n)
3400 i1src_copy = copy_rtx (i1src);
3401
3402 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3403 copy of I1SRC each time we substitute it, in order to avoid creating
3404 self-referential RTL when we will be substituting I0SRC for I0DEST
3405 later. */
3406 newpat = subst (newpat, i1dest, i1src, 0, 0,
3407 i0_feeds_i1_n && i0dest_in_i0src);
3408 substed_i1 = 1;
3409
3410 /* Record whether I1's body now appears within I3's body. */
3411 i1_is_used = n_occurrences;
3412 }
3413
3414 /* Likewise for I0 if we have it. */
3415
3416 if (i0 && GET_CODE (newpat) != CLOBBER)
3417 {
3418 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3419 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3420 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3421 && !reg_overlap_mentioned_p (i0dest, newpat))
3422 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3423 0, 0, 0))
3424 {
3425 undo_all ();
3426 return 0;
3427 }
3428
3429 /* If the following substitution will modify I0SRC, make a copy of it
3430 for the case where it is substituted for I0DEST in I1PAT later. */
3431 if (added_sets_1 && i0_feeds_i1_n)
3432 i0src_copy = copy_rtx (i0src);
3433 /* And a copy for I0DEST in I2PAT substitution. */
3434 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3435 || (i0_feeds_i2_n)))
3436 i0src_copy2 = copy_rtx (i0src);
3437
3438 n_occurrences = 0;
3439 subst_low_luid = DF_INSN_LUID (i0);
3440 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3441 substed_i0 = 1;
3442 }
3443
3444 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3445 to count all the ways that I2SRC and I1SRC can be used. */
3446 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3447 && i2_is_used + added_sets_2 > 1)
3448 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3449 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3450 > 1))
3451 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3452 && (n_occurrences + added_sets_0
3453 + (added_sets_1 && i0_feeds_i1_n)
3454 + (added_sets_2 && i0_feeds_i2_n)
3455 > 1))
3456 /* Fail if we tried to make a new register. */
3457 || max_reg_num () != maxreg
3458 /* Fail if we couldn't do something and have a CLOBBER. */
3459 || GET_CODE (newpat) == CLOBBER
3460 /* Fail if this new pattern is a MULT and we didn't have one before
3461 at the outer level. */
3462 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3463 && ! have_mult))
3464 {
3465 undo_all ();
3466 return 0;
3467 }
3468
3469 /* If the actions of the earlier insns must be kept
3470 in addition to substituting them into the latest one,
3471 we must make a new PARALLEL for the latest insn
3472 to hold additional the SETs. */
3473
3474 if (added_sets_0 || added_sets_1 || added_sets_2)
3475 {
3476 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3477 combine_extras++;
3478
3479 if (GET_CODE (newpat) == PARALLEL)
3480 {
3481 rtvec old = XVEC (newpat, 0);
3482 total_sets = XVECLEN (newpat, 0) + extra_sets;
3483 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3484 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3485 sizeof (old->elem[0]) * old->num_elem);
3486 }
3487 else
3488 {
3489 rtx old = newpat;
3490 total_sets = 1 + extra_sets;
3491 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3492 XVECEXP (newpat, 0, 0) = old;
3493 }
3494
3495 if (added_sets_0)
3496 XVECEXP (newpat, 0, --total_sets) = i0pat;
3497
3498 if (added_sets_1)
3499 {
3500 rtx t = i1pat;
3501 if (i0_feeds_i1_n)
3502 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3503
3504 XVECEXP (newpat, 0, --total_sets) = t;
3505 }
3506 if (added_sets_2)
3507 {
3508 rtx t = i2pat;
3509 if (i1_feeds_i2_n)
3510 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3511 i0_feeds_i1_n && i0dest_in_i0src);
3512 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3513 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3514
3515 XVECEXP (newpat, 0, --total_sets) = t;
3516 }
3517 }
3518
3519 validate_replacement:
3520
3521 /* Note which hard regs this insn has as inputs. */
3522 mark_used_regs_combine (newpat);
3523
3524 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3525 consider splitting this pattern, we might need these clobbers. */
3526 if (i1 && GET_CODE (newpat) == PARALLEL
3527 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3528 {
3529 int len = XVECLEN (newpat, 0);
3530
3531 newpat_vec_with_clobbers = rtvec_alloc (len);
3532 for (i = 0; i < len; i++)
3533 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3534 }
3535
3536 /* We have recognized nothing yet. */
3537 insn_code_number = -1;
3538
3539 /* See if this is a PARALLEL of two SETs where one SET's destination is
3540 a register that is unused and this isn't marked as an instruction that
3541 might trap in an EH region. In that case, we just need the other SET.
3542 We prefer this over the PARALLEL.
3543
3544 This can occur when simplifying a divmod insn. We *must* test for this
3545 case here because the code below that splits two independent SETs doesn't
3546 handle this case correctly when it updates the register status.
3547
3548 It's pointless doing this if we originally had two sets, one from
3549 i3, and one from i2. Combining then splitting the parallel results
3550 in the original i2 again plus an invalid insn (which we delete).
3551 The net effect is only to move instructions around, which makes
3552 debug info less accurate.
3553
3554 If the remaining SET came from I2 its destination should not be used
3555 between I2 and I3. See PR82024. */
3556
3557 if (!(added_sets_2 && i1 == 0)
3558 && is_parallel_of_n_reg_sets (newpat, 2)
3559 && asm_noperands (newpat) < 0)
3560 {
3561 rtx set0 = XVECEXP (newpat, 0, 0);
3562 rtx set1 = XVECEXP (newpat, 0, 1);
3563 rtx oldpat = newpat;
3564
3565 if (((REG_P (SET_DEST (set1))
3566 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3567 || (GET_CODE (SET_DEST (set1)) == SUBREG
3568 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3569 && insn_nothrow_p (i3)
3570 && !side_effects_p (SET_SRC (set1)))
3571 {
3572 newpat = set0;
3573 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3574 }
3575
3576 else if (((REG_P (SET_DEST (set0))
3577 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3578 || (GET_CODE (SET_DEST (set0)) == SUBREG
3579 && find_reg_note (i3, REG_UNUSED,
3580 SUBREG_REG (SET_DEST (set0)))))
3581 && insn_nothrow_p (i3)
3582 && !side_effects_p (SET_SRC (set0)))
3583 {
3584 rtx dest = SET_DEST (set1);
3585 if (GET_CODE (dest) == SUBREG)
3586 dest = SUBREG_REG (dest);
3587 if (!reg_used_between_p (dest, i2, i3))
3588 {
3589 newpat = set1;
3590 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3591
3592 if (insn_code_number >= 0)
3593 changed_i3_dest = 1;
3594 }
3595 }
3596
3597 if (insn_code_number < 0)
3598 newpat = oldpat;
3599 }
3600
3601 /* Is the result of combination a valid instruction? */
3602 if (insn_code_number < 0)
3603 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3604
3605 /* If we were combining three insns and the result is a simple SET
3606 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3607 insns. There are two ways to do this. It can be split using a
3608 machine-specific method (like when you have an addition of a large
3609 constant) or by combine in the function find_split_point. */
3610
3611 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3612 && asm_noperands (newpat) < 0)
3613 {
3614 rtx parallel, *split;
3615 rtx_insn *m_split_insn;
3616
3617 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3618 use I2DEST as a scratch register will help. In the latter case,
3619 convert I2DEST to the mode of the source of NEWPAT if we can. */
3620
3621 m_split_insn = combine_split_insns (newpat, i3);
3622
3623 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3624 inputs of NEWPAT. */
3625
3626 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3627 possible to try that as a scratch reg. This would require adding
3628 more code to make it work though. */
3629
3630 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3631 {
3632 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3633
3634 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3635 (temporarily, until we are committed to this instruction
3636 combination) does not work: for example, any call to nonzero_bits
3637 on the register (from a splitter in the MD file, for example)
3638 will get the old information, which is invalid.
3639
3640 Since nowadays we can create registers during combine just fine,
3641 we should just create a new one here, not reuse i2dest. */
3642
3643 /* First try to split using the original register as a
3644 scratch register. */
3645 parallel = gen_rtx_PARALLEL (VOIDmode,
3646 gen_rtvec (2, newpat,
3647 gen_rtx_CLOBBER (VOIDmode,
3648 i2dest)));
3649 m_split_insn = combine_split_insns (parallel, i3);
3650
3651 /* If that didn't work, try changing the mode of I2DEST if
3652 we can. */
3653 if (m_split_insn == 0
3654 && new_mode != GET_MODE (i2dest)
3655 && new_mode != VOIDmode
3656 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3657 {
3658 machine_mode old_mode = GET_MODE (i2dest);
3659 rtx ni2dest;
3660
3661 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3662 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3663 else
3664 {
3665 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3666 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3667 }
3668
3669 parallel = (gen_rtx_PARALLEL
3670 (VOIDmode,
3671 gen_rtvec (2, newpat,
3672 gen_rtx_CLOBBER (VOIDmode,
3673 ni2dest))));
3674 m_split_insn = combine_split_insns (parallel, i3);
3675
3676 if (m_split_insn == 0
3677 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3678 {
3679 struct undo *buf;
3680
3681 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3682 buf = undobuf.undos;
3683 undobuf.undos = buf->next;
3684 buf->next = undobuf.frees;
3685 undobuf.frees = buf;
3686 }
3687 }
3688
3689 i2scratch = m_split_insn != 0;
3690 }
3691
3692 /* If recog_for_combine has discarded clobbers, try to use them
3693 again for the split. */
3694 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3695 {
3696 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3697 m_split_insn = combine_split_insns (parallel, i3);
3698 }
3699
3700 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3701 {
3702 rtx m_split_pat = PATTERN (m_split_insn);
3703 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3704 if (insn_code_number >= 0)
3705 newpat = m_split_pat;
3706 }
3707 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3708 && (next_nonnote_nondebug_insn (i2) == i3
3709 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3710 {
3711 rtx i2set, i3set;
3712 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3713 newi2pat = PATTERN (m_split_insn);
3714
3715 i3set = single_set (NEXT_INSN (m_split_insn));
3716 i2set = single_set (m_split_insn);
3717
3718 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3719
3720 /* If I2 or I3 has multiple SETs, we won't know how to track
3721 register status, so don't use these insns. If I2's destination
3722 is used between I2 and I3, we also can't use these insns. */
3723
3724 if (i2_code_number >= 0 && i2set && i3set
3725 && (next_nonnote_nondebug_insn (i2) == i3
3726 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3727 insn_code_number = recog_for_combine (&newi3pat, i3,
3728 &new_i3_notes);
3729 if (insn_code_number >= 0)
3730 newpat = newi3pat;
3731
3732 /* It is possible that both insns now set the destination of I3.
3733 If so, we must show an extra use of it. */
3734
3735 if (insn_code_number >= 0)
3736 {
3737 rtx new_i3_dest = SET_DEST (i3set);
3738 rtx new_i2_dest = SET_DEST (i2set);
3739
3740 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3741 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3742 || GET_CODE (new_i3_dest) == SUBREG)
3743 new_i3_dest = XEXP (new_i3_dest, 0);
3744
3745 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3746 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3747 || GET_CODE (new_i2_dest) == SUBREG)
3748 new_i2_dest = XEXP (new_i2_dest, 0);
3749
3750 if (REG_P (new_i3_dest)
3751 && REG_P (new_i2_dest)
3752 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3753 && REGNO (new_i2_dest) < reg_n_sets_max)
3754 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3755 }
3756 }
3757
3758 /* If we can split it and use I2DEST, go ahead and see if that
3759 helps things be recognized. Verify that none of the registers
3760 are set between I2 and I3. */
3761 if (insn_code_number < 0
3762 && (split = find_split_point (&newpat, i3, false)) != 0
3763 && (!HAVE_cc0 || REG_P (i2dest))
3764 /* We need I2DEST in the proper mode. If it is a hard register
3765 or the only use of a pseudo, we can change its mode.
3766 Make sure we don't change a hard register to have a mode that
3767 isn't valid for it, or change the number of registers. */
3768 && (GET_MODE (*split) == GET_MODE (i2dest)
3769 || GET_MODE (*split) == VOIDmode
3770 || can_change_dest_mode (i2dest, added_sets_2,
3771 GET_MODE (*split)))
3772 && (next_nonnote_nondebug_insn (i2) == i3
3773 || !modified_between_p (*split, i2, i3))
3774 /* We can't overwrite I2DEST if its value is still used by
3775 NEWPAT. */
3776 && ! reg_referenced_p (i2dest, newpat))
3777 {
3778 rtx newdest = i2dest;
3779 enum rtx_code split_code = GET_CODE (*split);
3780 machine_mode split_mode = GET_MODE (*split);
3781 bool subst_done = false;
3782 newi2pat = NULL_RTX;
3783
3784 i2scratch = true;
3785
3786 /* *SPLIT may be part of I2SRC, so make sure we have the
3787 original expression around for later debug processing.
3788 We should not need I2SRC any more in other cases. */
3789 if (MAY_HAVE_DEBUG_BIND_INSNS)
3790 i2src = copy_rtx (i2src);
3791 else
3792 i2src = NULL;
3793
3794 /* Get NEWDEST as a register in the proper mode. We have already
3795 validated that we can do this. */
3796 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3797 {
3798 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3799 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3800 else
3801 {
3802 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3803 newdest = regno_reg_rtx[REGNO (i2dest)];
3804 }
3805 }
3806
3807 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3808 an ASHIFT. This can occur if it was inside a PLUS and hence
3809 appeared to be a memory address. This is a kludge. */
3810 if (split_code == MULT
3811 && CONST_INT_P (XEXP (*split, 1))
3812 && INTVAL (XEXP (*split, 1)) > 0
3813 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3814 {
3815 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3816 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3817 XEXP (*split, 0), i_rtx));
3818 /* Update split_code because we may not have a multiply
3819 anymore. */
3820 split_code = GET_CODE (*split);
3821 }
3822
3823 /* Similarly for (plus (mult FOO (const_int pow2))). */
3824 if (split_code == PLUS
3825 && GET_CODE (XEXP (*split, 0)) == MULT
3826 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3827 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3828 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3829 {
3830 rtx nsplit = XEXP (*split, 0);
3831 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3832 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3833 XEXP (nsplit, 0),
3834 i_rtx));
3835 /* Update split_code because we may not have a multiply
3836 anymore. */
3837 split_code = GET_CODE (*split);
3838 }
3839
3840 #ifdef INSN_SCHEDULING
3841 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3842 be written as a ZERO_EXTEND. */
3843 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3844 {
3845 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3846 what it really is. */
3847 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3848 == SIGN_EXTEND)
3849 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3850 SUBREG_REG (*split)));
3851 else
3852 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3853 SUBREG_REG (*split)));
3854 }
3855 #endif
3856
3857 /* Attempt to split binary operators using arithmetic identities. */
3858 if (BINARY_P (SET_SRC (newpat))
3859 && split_mode == GET_MODE (SET_SRC (newpat))
3860 && ! side_effects_p (SET_SRC (newpat)))
3861 {
3862 rtx setsrc = SET_SRC (newpat);
3863 machine_mode mode = GET_MODE (setsrc);
3864 enum rtx_code code = GET_CODE (setsrc);
3865 rtx src_op0 = XEXP (setsrc, 0);
3866 rtx src_op1 = XEXP (setsrc, 1);
3867
3868 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3869 if (rtx_equal_p (src_op0, src_op1))
3870 {
3871 newi2pat = gen_rtx_SET (newdest, src_op0);
3872 SUBST (XEXP (setsrc, 0), newdest);
3873 SUBST (XEXP (setsrc, 1), newdest);
3874 subst_done = true;
3875 }
3876 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3877 else if ((code == PLUS || code == MULT)
3878 && GET_CODE (src_op0) == code
3879 && GET_CODE (XEXP (src_op0, 0)) == code
3880 && (INTEGRAL_MODE_P (mode)
3881 || (FLOAT_MODE_P (mode)
3882 && flag_unsafe_math_optimizations)))
3883 {
3884 rtx p = XEXP (XEXP (src_op0, 0), 0);
3885 rtx q = XEXP (XEXP (src_op0, 0), 1);
3886 rtx r = XEXP (src_op0, 1);
3887 rtx s = src_op1;
3888
3889 /* Split both "((X op Y) op X) op Y" and
3890 "((X op Y) op Y) op X" as "T op T" where T is
3891 "X op Y". */
3892 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3893 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3894 {
3895 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3896 SUBST (XEXP (setsrc, 0), newdest);
3897 SUBST (XEXP (setsrc, 1), newdest);
3898 subst_done = true;
3899 }
3900 /* Split "((X op X) op Y) op Y)" as "T op T" where
3901 T is "X op Y". */
3902 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3903 {
3904 rtx tmp = simplify_gen_binary (code, mode, p, r);
3905 newi2pat = gen_rtx_SET (newdest, tmp);
3906 SUBST (XEXP (setsrc, 0), newdest);
3907 SUBST (XEXP (setsrc, 1), newdest);
3908 subst_done = true;
3909 }
3910 }
3911 }
3912
3913 if (!subst_done)
3914 {
3915 newi2pat = gen_rtx_SET (newdest, *split);
3916 SUBST (*split, newdest);
3917 }
3918
3919 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3920
3921 /* recog_for_combine might have added CLOBBERs to newi2pat.
3922 Make sure NEWPAT does not depend on the clobbered regs. */
3923 if (GET_CODE (newi2pat) == PARALLEL)
3924 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3925 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3926 {
3927 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3928 if (reg_overlap_mentioned_p (reg, newpat))
3929 {
3930 undo_all ();
3931 return 0;
3932 }
3933 }
3934
3935 /* If the split point was a MULT and we didn't have one before,
3936 don't use one now. */
3937 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3938 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3939 }
3940 }
3941
3942 /* Check for a case where we loaded from memory in a narrow mode and
3943 then sign extended it, but we need both registers. In that case,
3944 we have a PARALLEL with both loads from the same memory location.
3945 We can split this into a load from memory followed by a register-register
3946 copy. This saves at least one insn, more if register allocation can
3947 eliminate the copy.
3948
3949 We cannot do this if the destination of the first assignment is a
3950 condition code register or cc0. We eliminate this case by making sure
3951 the SET_DEST and SET_SRC have the same mode.
3952
3953 We cannot do this if the destination of the second assignment is
3954 a register that we have already assumed is zero-extended. Similarly
3955 for a SUBREG of such a register. */
3956
3957 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3958 && GET_CODE (newpat) == PARALLEL
3959 && XVECLEN (newpat, 0) == 2
3960 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3961 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3962 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3963 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3964 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3965 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3966 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3967 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
3968 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3969 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3970 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3971 (REG_P (temp_expr)
3972 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3973 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3974 BITS_PER_WORD)
3975 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3976 HOST_BITS_PER_INT)
3977 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3978 != GET_MODE_MASK (word_mode))))
3979 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3980 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3981 (REG_P (temp_expr)
3982 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3983 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3984 BITS_PER_WORD)
3985 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3986 HOST_BITS_PER_INT)
3987 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3988 != GET_MODE_MASK (word_mode)))))
3989 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3990 SET_SRC (XVECEXP (newpat, 0, 1)))
3991 && ! find_reg_note (i3, REG_UNUSED,
3992 SET_DEST (XVECEXP (newpat, 0, 0))))
3993 {
3994 rtx ni2dest;
3995
3996 newi2pat = XVECEXP (newpat, 0, 0);
3997 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3998 newpat = XVECEXP (newpat, 0, 1);
3999 SUBST (SET_SRC (newpat),
4000 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4001 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4002
4003 if (i2_code_number >= 0)
4004 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4005
4006 if (insn_code_number >= 0)
4007 swap_i2i3 = 1;
4008 }
4009
4010 /* Similarly, check for a case where we have a PARALLEL of two independent
4011 SETs but we started with three insns. In this case, we can do the sets
4012 as two separate insns. This case occurs when some SET allows two
4013 other insns to combine, but the destination of that SET is still live.
4014
4015 Also do this if we started with two insns and (at least) one of the
4016 resulting sets is a noop; this noop will be deleted later.
4017
4018 Also do this if we started with two insns neither of which was a simple
4019 move. */
4020
4021 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4022 && GET_CODE (newpat) == PARALLEL
4023 && XVECLEN (newpat, 0) == 2
4024 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4025 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4026 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
4027 || set_noop_p (XVECEXP (newpat, 0, 1)))
4028 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4029 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4030 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4031 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4032 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4033 XVECEXP (newpat, 0, 0))
4034 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4035 XVECEXP (newpat, 0, 1))
4036 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4037 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4038 {
4039 rtx set0 = XVECEXP (newpat, 0, 0);
4040 rtx set1 = XVECEXP (newpat, 0, 1);
4041
4042 /* Normally, it doesn't matter which of the two is done first,
4043 but the one that references cc0 can't be the second, and
4044 one which uses any regs/memory set in between i2 and i3 can't
4045 be first. The PARALLEL might also have been pre-existing in i3,
4046 so we need to make sure that we won't wrongly hoist a SET to i2
4047 that would conflict with a death note present in there, or would
4048 have its dest modified between i2 and i3. */
4049 if (!modified_between_p (SET_SRC (set1), i2, i3)
4050 && !(REG_P (SET_DEST (set1))
4051 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4052 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4053 && find_reg_note (i2, REG_DEAD,
4054 SUBREG_REG (SET_DEST (set1))))
4055 && !modified_between_p (SET_DEST (set1), i2, i3)
4056 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4057 /* If I3 is a jump, ensure that set0 is a jump so that
4058 we do not create invalid RTL. */
4059 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4060 )
4061 {
4062 newi2pat = set1;
4063 newpat = set0;
4064 }
4065 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4066 && !(REG_P (SET_DEST (set0))
4067 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4068 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4069 && find_reg_note (i2, REG_DEAD,
4070 SUBREG_REG (SET_DEST (set0))))
4071 && !modified_between_p (SET_DEST (set0), i2, i3)
4072 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4073 /* If I3 is a jump, ensure that set1 is a jump so that
4074 we do not create invalid RTL. */
4075 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4076 )
4077 {
4078 newi2pat = set0;
4079 newpat = set1;
4080 }
4081 else
4082 {
4083 undo_all ();
4084 return 0;
4085 }
4086
4087 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4088
4089 if (i2_code_number >= 0)
4090 {
4091 /* recog_for_combine might have added CLOBBERs to newi2pat.
4092 Make sure NEWPAT does not depend on the clobbered regs. */
4093 if (GET_CODE (newi2pat) == PARALLEL)
4094 {
4095 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4096 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4097 {
4098 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4099 if (reg_overlap_mentioned_p (reg, newpat))
4100 {
4101 undo_all ();
4102 return 0;
4103 }
4104 }
4105 }
4106
4107 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4108
4109 if (insn_code_number >= 0)
4110 split_i2i3 = 1;
4111 }
4112 }
4113
4114 /* If it still isn't recognized, fail and change things back the way they
4115 were. */
4116 if ((insn_code_number < 0
4117 /* Is the result a reasonable ASM_OPERANDS? */
4118 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4119 {
4120 undo_all ();
4121 return 0;
4122 }
4123
4124 /* If we had to change another insn, make sure it is valid also. */
4125 if (undobuf.other_insn)
4126 {
4127 CLEAR_HARD_REG_SET (newpat_used_regs);
4128
4129 other_pat = PATTERN (undobuf.other_insn);
4130 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4131 &new_other_notes);
4132
4133 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4134 {
4135 undo_all ();
4136 return 0;
4137 }
4138 }
4139
4140 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4141 they are adjacent to each other or not. */
4142 if (HAVE_cc0)
4143 {
4144 rtx_insn *p = prev_nonnote_insn (i3);
4145 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4146 && sets_cc0_p (newi2pat))
4147 {
4148 undo_all ();
4149 return 0;
4150 }
4151 }
4152
4153 /* Only allow this combination if insn_cost reports that the
4154 replacement instructions are cheaper than the originals. */
4155 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4156 {
4157 undo_all ();
4158 return 0;
4159 }
4160
4161 if (MAY_HAVE_DEBUG_BIND_INSNS)
4162 {
4163 struct undo *undo;
4164
4165 for (undo = undobuf.undos; undo; undo = undo->next)
4166 if (undo->kind == UNDO_MODE)
4167 {
4168 rtx reg = *undo->where.r;
4169 machine_mode new_mode = GET_MODE (reg);
4170 machine_mode old_mode = undo->old_contents.m;
4171
4172 /* Temporarily revert mode back. */
4173 adjust_reg_mode (reg, old_mode);
4174
4175 if (reg == i2dest && i2scratch)
4176 {
4177 /* If we used i2dest as a scratch register with a
4178 different mode, substitute it for the original
4179 i2src while its original mode is temporarily
4180 restored, and then clear i2scratch so that we don't
4181 do it again later. */
4182 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4183 this_basic_block);
4184 i2scratch = false;
4185 /* Put back the new mode. */
4186 adjust_reg_mode (reg, new_mode);
4187 }
4188 else
4189 {
4190 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4191 rtx_insn *first, *last;
4192
4193 if (reg == i2dest)
4194 {
4195 first = i2;
4196 last = last_combined_insn;
4197 }
4198 else
4199 {
4200 first = i3;
4201 last = undobuf.other_insn;
4202 gcc_assert (last);
4203 if (DF_INSN_LUID (last)
4204 < DF_INSN_LUID (last_combined_insn))
4205 last = last_combined_insn;
4206 }
4207
4208 /* We're dealing with a reg that changed mode but not
4209 meaning, so we want to turn it into a subreg for
4210 the new mode. However, because of REG sharing and
4211 because its mode had already changed, we have to do
4212 it in two steps. First, replace any debug uses of
4213 reg, with its original mode temporarily restored,
4214 with this copy we have created; then, replace the
4215 copy with the SUBREG of the original shared reg,
4216 once again changed to the new mode. */
4217 propagate_for_debug (first, last, reg, tempreg,
4218 this_basic_block);
4219 adjust_reg_mode (reg, new_mode);
4220 propagate_for_debug (first, last, tempreg,
4221 lowpart_subreg (old_mode, reg, new_mode),
4222 this_basic_block);
4223 }
4224 }
4225 }
4226
4227 /* If we will be able to accept this, we have made a
4228 change to the destination of I3. This requires us to
4229 do a few adjustments. */
4230
4231 if (changed_i3_dest)
4232 {
4233 PATTERN (i3) = newpat;
4234 adjust_for_new_dest (i3);
4235 }
4236
4237 /* We now know that we can do this combination. Merge the insns and
4238 update the status of registers and LOG_LINKS. */
4239
4240 if (undobuf.other_insn)
4241 {
4242 rtx note, next;
4243
4244 PATTERN (undobuf.other_insn) = other_pat;
4245
4246 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4247 ensure that they are still valid. Then add any non-duplicate
4248 notes added by recog_for_combine. */
4249 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4250 {
4251 next = XEXP (note, 1);
4252
4253 if ((REG_NOTE_KIND (note) == REG_DEAD
4254 && !reg_referenced_p (XEXP (note, 0),
4255 PATTERN (undobuf.other_insn)))
4256 ||(REG_NOTE_KIND (note) == REG_UNUSED
4257 && !reg_set_p (XEXP (note, 0),
4258 PATTERN (undobuf.other_insn)))
4259 /* Simply drop equal note since it may be no longer valid
4260 for other_insn. It may be possible to record that CC
4261 register is changed and only discard those notes, but
4262 in practice it's unnecessary complication and doesn't
4263 give any meaningful improvement.
4264
4265 See PR78559. */
4266 || REG_NOTE_KIND (note) == REG_EQUAL
4267 || REG_NOTE_KIND (note) == REG_EQUIV)
4268 remove_note (undobuf.other_insn, note);
4269 }
4270
4271 distribute_notes (new_other_notes, undobuf.other_insn,
4272 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4273 NULL_RTX);
4274 }
4275
4276 if (swap_i2i3)
4277 {
4278 /* I3 now uses what used to be its destination and which is now
4279 I2's destination. This requires us to do a few adjustments. */
4280 PATTERN (i3) = newpat;
4281 adjust_for_new_dest (i3);
4282 }
4283
4284 if (swap_i2i3 || split_i2i3)
4285 {
4286 /* We might need a LOG_LINK from I3 to I2. But then we used to
4287 have one, so we still will.
4288
4289 However, some later insn might be using I2's dest and have
4290 a LOG_LINK pointing at I3. We should change it to point at
4291 I2 instead. */
4292
4293 /* newi2pat is usually a SET here; however, recog_for_combine might
4294 have added some clobbers. */
4295 rtx x = newi2pat;
4296 if (GET_CODE (x) == PARALLEL)
4297 x = XVECEXP (newi2pat, 0, 0);
4298
4299 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4300 unsigned int regno = reg_or_subregno (SET_DEST (x));
4301
4302 bool done = false;
4303 for (rtx_insn *insn = NEXT_INSN (i3);
4304 !done
4305 && insn
4306 && NONDEBUG_INSN_P (insn)
4307 && BLOCK_FOR_INSN (insn) == this_basic_block;
4308 insn = NEXT_INSN (insn))
4309 {
4310 struct insn_link *link;
4311 FOR_EACH_LOG_LINK (link, insn)
4312 if (link->insn == i3 && link->regno == regno)
4313 {
4314 link->insn = i2;
4315 done = true;
4316 break;
4317 }
4318 }
4319 }
4320
4321 {
4322 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4323 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4324 rtx midnotes = 0;
4325 int from_luid;
4326 /* Compute which registers we expect to eliminate. newi2pat may be setting
4327 either i3dest or i2dest, so we must check it. */
4328 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4329 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4330 || !i2dest_killed
4331 ? 0 : i2dest);
4332 /* For i1, we need to compute both local elimination and global
4333 elimination information with respect to newi2pat because i1dest
4334 may be the same as i3dest, in which case newi2pat may be setting
4335 i1dest. Global information is used when distributing REG_DEAD
4336 note for i2 and i3, in which case it does matter if newi2pat sets
4337 i1dest or not.
4338
4339 Local information is used when distributing REG_DEAD note for i1,
4340 in which case it doesn't matter if newi2pat sets i1dest or not.
4341 See PR62151, if we have four insns combination:
4342 i0: r0 <- i0src
4343 i1: r1 <- i1src (using r0)
4344 REG_DEAD (r0)
4345 i2: r0 <- i2src (using r1)
4346 i3: r3 <- i3src (using r0)
4347 ix: using r0
4348 From i1's point of view, r0 is eliminated, no matter if it is set
4349 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4350 should be discarded.
4351
4352 Note local information only affects cases in forms like "I1->I2->I3",
4353 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4354 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4355 i0dest anyway. */
4356 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4357 || !i1dest_killed
4358 ? 0 : i1dest);
4359 rtx elim_i1 = (local_elim_i1 == 0
4360 || (newi2pat && reg_set_p (i1dest, newi2pat))
4361 ? 0 : i1dest);
4362 /* Same case as i1. */
4363 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4364 ? 0 : i0dest);
4365 rtx elim_i0 = (local_elim_i0 == 0
4366 || (newi2pat && reg_set_p (i0dest, newi2pat))
4367 ? 0 : i0dest);
4368
4369 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4370 clear them. */
4371 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4372 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4373 if (i1)
4374 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4375 if (i0)
4376 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4377
4378 /* Ensure that we do not have something that should not be shared but
4379 occurs multiple times in the new insns. Check this by first
4380 resetting all the `used' flags and then copying anything is shared. */
4381
4382 reset_used_flags (i3notes);
4383 reset_used_flags (i2notes);
4384 reset_used_flags (i1notes);
4385 reset_used_flags (i0notes);
4386 reset_used_flags (newpat);
4387 reset_used_flags (newi2pat);
4388 if (undobuf.other_insn)
4389 reset_used_flags (PATTERN (undobuf.other_insn));
4390
4391 i3notes = copy_rtx_if_shared (i3notes);
4392 i2notes = copy_rtx_if_shared (i2notes);
4393 i1notes = copy_rtx_if_shared (i1notes);
4394 i0notes = copy_rtx_if_shared (i0notes);
4395 newpat = copy_rtx_if_shared (newpat);
4396 newi2pat = copy_rtx_if_shared (newi2pat);
4397 if (undobuf.other_insn)
4398 reset_used_flags (PATTERN (undobuf.other_insn));
4399
4400 INSN_CODE (i3) = insn_code_number;
4401 PATTERN (i3) = newpat;
4402
4403 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4404 {
4405 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4406 link = XEXP (link, 1))
4407 {
4408 if (substed_i2)
4409 {
4410 /* I2SRC must still be meaningful at this point. Some
4411 splitting operations can invalidate I2SRC, but those
4412 operations do not apply to calls. */
4413 gcc_assert (i2src);
4414 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4415 i2dest, i2src);
4416 }
4417 if (substed_i1)
4418 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4419 i1dest, i1src);
4420 if (substed_i0)
4421 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4422 i0dest, i0src);
4423 }
4424 }
4425
4426 if (undobuf.other_insn)
4427 INSN_CODE (undobuf.other_insn) = other_code_number;
4428
4429 /* We had one special case above where I2 had more than one set and
4430 we replaced a destination of one of those sets with the destination
4431 of I3. In that case, we have to update LOG_LINKS of insns later
4432 in this basic block. Note that this (expensive) case is rare.
4433
4434 Also, in this case, we must pretend that all REG_NOTEs for I2
4435 actually came from I3, so that REG_UNUSED notes from I2 will be
4436 properly handled. */
4437
4438 if (i3_subst_into_i2)
4439 {
4440 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4441 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4442 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4443 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4444 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4445 && ! find_reg_note (i2, REG_UNUSED,
4446 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4447 for (temp_insn = NEXT_INSN (i2);
4448 temp_insn
4449 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4450 || BB_HEAD (this_basic_block) != temp_insn);
4451 temp_insn = NEXT_INSN (temp_insn))
4452 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4453 FOR_EACH_LOG_LINK (link, temp_insn)
4454 if (link->insn == i2)
4455 link->insn = i3;
4456
4457 if (i3notes)
4458 {
4459 rtx link = i3notes;
4460 while (XEXP (link, 1))
4461 link = XEXP (link, 1);
4462 XEXP (link, 1) = i2notes;
4463 }
4464 else
4465 i3notes = i2notes;
4466 i2notes = 0;
4467 }
4468
4469 LOG_LINKS (i3) = NULL;
4470 REG_NOTES (i3) = 0;
4471 LOG_LINKS (i2) = NULL;
4472 REG_NOTES (i2) = 0;
4473
4474 if (newi2pat)
4475 {
4476 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4477 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4478 this_basic_block);
4479 INSN_CODE (i2) = i2_code_number;
4480 PATTERN (i2) = newi2pat;
4481 }
4482 else
4483 {
4484 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4485 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4486 this_basic_block);
4487 SET_INSN_DELETED (i2);
4488 }
4489
4490 if (i1)
4491 {
4492 LOG_LINKS (i1) = NULL;
4493 REG_NOTES (i1) = 0;
4494 if (MAY_HAVE_DEBUG_BIND_INSNS)
4495 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4496 this_basic_block);
4497 SET_INSN_DELETED (i1);
4498 }
4499
4500 if (i0)
4501 {
4502 LOG_LINKS (i0) = NULL;
4503 REG_NOTES (i0) = 0;
4504 if (MAY_HAVE_DEBUG_BIND_INSNS)
4505 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4506 this_basic_block);
4507 SET_INSN_DELETED (i0);
4508 }
4509
4510 /* Get death notes for everything that is now used in either I3 or
4511 I2 and used to die in a previous insn. If we built two new
4512 patterns, move from I1 to I2 then I2 to I3 so that we get the
4513 proper movement on registers that I2 modifies. */
4514
4515 if (i0)
4516 from_luid = DF_INSN_LUID (i0);
4517 else if (i1)
4518 from_luid = DF_INSN_LUID (i1);
4519 else
4520 from_luid = DF_INSN_LUID (i2);
4521 if (newi2pat)
4522 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4523 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4524
4525 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4526 if (i3notes)
4527 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4528 elim_i2, elim_i1, elim_i0);
4529 if (i2notes)
4530 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4531 elim_i2, elim_i1, elim_i0);
4532 if (i1notes)
4533 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4534 elim_i2, local_elim_i1, local_elim_i0);
4535 if (i0notes)
4536 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4537 elim_i2, elim_i1, local_elim_i0);
4538 if (midnotes)
4539 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4540 elim_i2, elim_i1, elim_i0);
4541
4542 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4543 know these are REG_UNUSED and want them to go to the desired insn,
4544 so we always pass it as i3. */
4545
4546 if (newi2pat && new_i2_notes)
4547 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4548 NULL_RTX);
4549
4550 if (new_i3_notes)
4551 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4552 NULL_RTX);
4553
4554 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4555 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4556 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4557 in that case, it might delete I2. Similarly for I2 and I1.
4558 Show an additional death due to the REG_DEAD note we make here. If
4559 we discard it in distribute_notes, we will decrement it again. */
4560
4561 if (i3dest_killed)
4562 {
4563 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4564 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4565 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4566 elim_i1, elim_i0);
4567 else
4568 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4569 elim_i2, elim_i1, elim_i0);
4570 }
4571
4572 if (i2dest_in_i2src)
4573 {
4574 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4575 if (newi2pat && reg_set_p (i2dest, newi2pat))
4576 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4577 NULL_RTX, NULL_RTX);
4578 else
4579 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4580 NULL_RTX, NULL_RTX, NULL_RTX);
4581 }
4582
4583 if (i1dest_in_i1src)
4584 {
4585 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4586 if (newi2pat && reg_set_p (i1dest, newi2pat))
4587 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4588 NULL_RTX, NULL_RTX);
4589 else
4590 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4591 NULL_RTX, NULL_RTX, NULL_RTX);
4592 }
4593
4594 if (i0dest_in_i0src)
4595 {
4596 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4597 if (newi2pat && reg_set_p (i0dest, newi2pat))
4598 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4599 NULL_RTX, NULL_RTX);
4600 else
4601 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4602 NULL_RTX, NULL_RTX, NULL_RTX);
4603 }
4604
4605 distribute_links (i3links);
4606 distribute_links (i2links);
4607 distribute_links (i1links);
4608 distribute_links (i0links);
4609
4610 if (REG_P (i2dest))
4611 {
4612 struct insn_link *link;
4613 rtx_insn *i2_insn = 0;
4614 rtx i2_val = 0, set;
4615
4616 /* The insn that used to set this register doesn't exist, and
4617 this life of the register may not exist either. See if one of
4618 I3's links points to an insn that sets I2DEST. If it does,
4619 that is now the last known value for I2DEST. If we don't update
4620 this and I2 set the register to a value that depended on its old
4621 contents, we will get confused. If this insn is used, thing
4622 will be set correctly in combine_instructions. */
4623 FOR_EACH_LOG_LINK (link, i3)
4624 if ((set = single_set (link->insn)) != 0
4625 && rtx_equal_p (i2dest, SET_DEST (set)))
4626 i2_insn = link->insn, i2_val = SET_SRC (set);
4627
4628 record_value_for_reg (i2dest, i2_insn, i2_val);
4629
4630 /* If the reg formerly set in I2 died only once and that was in I3,
4631 zero its use count so it won't make `reload' do any work. */
4632 if (! added_sets_2
4633 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4634 && ! i2dest_in_i2src
4635 && REGNO (i2dest) < reg_n_sets_max)
4636 INC_REG_N_SETS (REGNO (i2dest), -1);
4637 }
4638
4639 if (i1 && REG_P (i1dest))
4640 {
4641 struct insn_link *link;
4642 rtx_insn *i1_insn = 0;
4643 rtx i1_val = 0, set;
4644
4645 FOR_EACH_LOG_LINK (link, i3)
4646 if ((set = single_set (link->insn)) != 0
4647 && rtx_equal_p (i1dest, SET_DEST (set)))
4648 i1_insn = link->insn, i1_val = SET_SRC (set);
4649
4650 record_value_for_reg (i1dest, i1_insn, i1_val);
4651
4652 if (! added_sets_1
4653 && ! i1dest_in_i1src
4654 && REGNO (i1dest) < reg_n_sets_max)
4655 INC_REG_N_SETS (REGNO (i1dest), -1);
4656 }
4657
4658 if (i0 && REG_P (i0dest))
4659 {
4660 struct insn_link *link;
4661 rtx_insn *i0_insn = 0;
4662 rtx i0_val = 0, set;
4663
4664 FOR_EACH_LOG_LINK (link, i3)
4665 if ((set = single_set (link->insn)) != 0
4666 && rtx_equal_p (i0dest, SET_DEST (set)))
4667 i0_insn = link->insn, i0_val = SET_SRC (set);
4668
4669 record_value_for_reg (i0dest, i0_insn, i0_val);
4670
4671 if (! added_sets_0
4672 && ! i0dest_in_i0src
4673 && REGNO (i0dest) < reg_n_sets_max)
4674 INC_REG_N_SETS (REGNO (i0dest), -1);
4675 }
4676
4677 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4678 been made to this insn. The order is important, because newi2pat
4679 can affect nonzero_bits of newpat. */
4680 if (newi2pat)
4681 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4682 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4683 }
4684
4685 if (undobuf.other_insn != NULL_RTX)
4686 {
4687 if (dump_file)
4688 {
4689 fprintf (dump_file, "modifying other_insn ");
4690 dump_insn_slim (dump_file, undobuf.other_insn);
4691 }
4692 df_insn_rescan (undobuf.other_insn);
4693 }
4694
4695 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4696 {
4697 if (dump_file)
4698 {
4699 fprintf (dump_file, "modifying insn i0 ");
4700 dump_insn_slim (dump_file, i0);
4701 }
4702 df_insn_rescan (i0);
4703 }
4704
4705 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4706 {
4707 if (dump_file)
4708 {
4709 fprintf (dump_file, "modifying insn i1 ");
4710 dump_insn_slim (dump_file, i1);
4711 }
4712 df_insn_rescan (i1);
4713 }
4714
4715 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4716 {
4717 if (dump_file)
4718 {
4719 fprintf (dump_file, "modifying insn i2 ");
4720 dump_insn_slim (dump_file, i2);
4721 }
4722 df_insn_rescan (i2);
4723 }
4724
4725 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4726 {
4727 if (dump_file)
4728 {
4729 fprintf (dump_file, "modifying insn i3 ");
4730 dump_insn_slim (dump_file, i3);
4731 }
4732 df_insn_rescan (i3);
4733 }
4734
4735 /* Set new_direct_jump_p if a new return or simple jump instruction
4736 has been created. Adjust the CFG accordingly. */
4737 if (returnjump_p (i3) || any_uncondjump_p (i3))
4738 {
4739 *new_direct_jump_p = 1;
4740 mark_jump_label (PATTERN (i3), i3, 0);
4741 update_cfg_for_uncondjump (i3);
4742 }
4743
4744 if (undobuf.other_insn != NULL_RTX
4745 && (returnjump_p (undobuf.other_insn)
4746 || any_uncondjump_p (undobuf.other_insn)))
4747 {
4748 *new_direct_jump_p = 1;
4749 update_cfg_for_uncondjump (undobuf.other_insn);
4750 }
4751
4752 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4753 && XEXP (PATTERN (i3), 0) == const1_rtx)
4754 {
4755 basic_block bb = BLOCK_FOR_INSN (i3);
4756 gcc_assert (bb);
4757 remove_edge (split_block (bb, i3));
4758 emit_barrier_after_bb (bb);
4759 *new_direct_jump_p = 1;
4760 }
4761
4762 if (undobuf.other_insn
4763 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4764 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4765 {
4766 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4767 gcc_assert (bb);
4768 remove_edge (split_block (bb, undobuf.other_insn));
4769 emit_barrier_after_bb (bb);
4770 *new_direct_jump_p = 1;
4771 }
4772
4773 /* A noop might also need cleaning up of CFG, if it comes from the
4774 simplification of a jump. */
4775 if (JUMP_P (i3)
4776 && GET_CODE (newpat) == SET
4777 && SET_SRC (newpat) == pc_rtx
4778 && SET_DEST (newpat) == pc_rtx)
4779 {
4780 *new_direct_jump_p = 1;
4781 update_cfg_for_uncondjump (i3);
4782 }
4783
4784 if (undobuf.other_insn != NULL_RTX
4785 && JUMP_P (undobuf.other_insn)
4786 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4787 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4788 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4789 {
4790 *new_direct_jump_p = 1;
4791 update_cfg_for_uncondjump (undobuf.other_insn);
4792 }
4793
4794 combine_successes++;
4795 undo_commit ();
4796
4797 rtx_insn *ret = newi2pat ? i2 : i3;
4798 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4799 ret = added_links_insn;
4800 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4801 ret = added_notes_insn;
4802
4803 return ret;
4804 }
4805
4806 /* Get a marker for undoing to the current state. */
4807
4808 static void *
get_undo_marker(void)4809 get_undo_marker (void)
4810 {
4811 return undobuf.undos;
4812 }
4813
4814 /* Undo the modifications up to the marker. */
4815
4816 static void
undo_to_marker(void * marker)4817 undo_to_marker (void *marker)
4818 {
4819 struct undo *undo, *next;
4820
4821 for (undo = undobuf.undos; undo != marker; undo = next)
4822 {
4823 gcc_assert (undo);
4824
4825 next = undo->next;
4826 switch (undo->kind)
4827 {
4828 case UNDO_RTX:
4829 *undo->where.r = undo->old_contents.r;
4830 break;
4831 case UNDO_INT:
4832 *undo->where.i = undo->old_contents.i;
4833 break;
4834 case UNDO_MODE:
4835 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4836 break;
4837 case UNDO_LINKS:
4838 *undo->where.l = undo->old_contents.l;
4839 break;
4840 default:
4841 gcc_unreachable ();
4842 }
4843
4844 undo->next = undobuf.frees;
4845 undobuf.frees = undo;
4846 }
4847
4848 undobuf.undos = (struct undo *) marker;
4849 }
4850
4851 /* Undo all the modifications recorded in undobuf. */
4852
4853 static void
undo_all(void)4854 undo_all (void)
4855 {
4856 undo_to_marker (0);
4857 }
4858
4859 /* We've committed to accepting the changes we made. Move all
4860 of the undos to the free list. */
4861
4862 static void
undo_commit(void)4863 undo_commit (void)
4864 {
4865 struct undo *undo, *next;
4866
4867 for (undo = undobuf.undos; undo; undo = next)
4868 {
4869 next = undo->next;
4870 undo->next = undobuf.frees;
4871 undobuf.frees = undo;
4872 }
4873 undobuf.undos = 0;
4874 }
4875
4876 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4877 where we have an arithmetic expression and return that point. LOC will
4878 be inside INSN.
4879
4880 try_combine will call this function to see if an insn can be split into
4881 two insns. */
4882
4883 static rtx *
find_split_point(rtx * loc,rtx_insn * insn,bool set_src)4884 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4885 {
4886 rtx x = *loc;
4887 enum rtx_code code = GET_CODE (x);
4888 rtx *split;
4889 unsigned HOST_WIDE_INT len = 0;
4890 HOST_WIDE_INT pos = 0;
4891 int unsignedp = 0;
4892 rtx inner = NULL_RTX;
4893 scalar_int_mode mode, inner_mode;
4894
4895 /* First special-case some codes. */
4896 switch (code)
4897 {
4898 case SUBREG:
4899 #ifdef INSN_SCHEDULING
4900 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4901 point. */
4902 if (MEM_P (SUBREG_REG (x)))
4903 return loc;
4904 #endif
4905 return find_split_point (&SUBREG_REG (x), insn, false);
4906
4907 case MEM:
4908 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4909 using LO_SUM and HIGH. */
4910 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4911 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4912 {
4913 machine_mode address_mode = get_address_mode (x);
4914
4915 SUBST (XEXP (x, 0),
4916 gen_rtx_LO_SUM (address_mode,
4917 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4918 XEXP (x, 0)));
4919 return &XEXP (XEXP (x, 0), 0);
4920 }
4921
4922 /* If we have a PLUS whose second operand is a constant and the
4923 address is not valid, perhaps will can split it up using
4924 the machine-specific way to split large constants. We use
4925 the first pseudo-reg (one of the virtual regs) as a placeholder;
4926 it will not remain in the result. */
4927 if (GET_CODE (XEXP (x, 0)) == PLUS
4928 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4929 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4930 MEM_ADDR_SPACE (x)))
4931 {
4932 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4933 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4934 subst_insn);
4935
4936 /* This should have produced two insns, each of which sets our
4937 placeholder. If the source of the second is a valid address,
4938 we can make put both sources together and make a split point
4939 in the middle. */
4940
4941 if (seq
4942 && NEXT_INSN (seq) != NULL_RTX
4943 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4944 && NONJUMP_INSN_P (seq)
4945 && GET_CODE (PATTERN (seq)) == SET
4946 && SET_DEST (PATTERN (seq)) == reg
4947 && ! reg_mentioned_p (reg,
4948 SET_SRC (PATTERN (seq)))
4949 && NONJUMP_INSN_P (NEXT_INSN (seq))
4950 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4951 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4952 && memory_address_addr_space_p
4953 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4954 MEM_ADDR_SPACE (x)))
4955 {
4956 rtx src1 = SET_SRC (PATTERN (seq));
4957 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4958
4959 /* Replace the placeholder in SRC2 with SRC1. If we can
4960 find where in SRC2 it was placed, that can become our
4961 split point and we can replace this address with SRC2.
4962 Just try two obvious places. */
4963
4964 src2 = replace_rtx (src2, reg, src1);
4965 split = 0;
4966 if (XEXP (src2, 0) == src1)
4967 split = &XEXP (src2, 0);
4968 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4969 && XEXP (XEXP (src2, 0), 0) == src1)
4970 split = &XEXP (XEXP (src2, 0), 0);
4971
4972 if (split)
4973 {
4974 SUBST (XEXP (x, 0), src2);
4975 return split;
4976 }
4977 }
4978
4979 /* If that didn't work, perhaps the first operand is complex and
4980 needs to be computed separately, so make a split point there.
4981 This will occur on machines that just support REG + CONST
4982 and have a constant moved through some previous computation. */
4983
4984 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4985 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4986 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4987 return &XEXP (XEXP (x, 0), 0);
4988 }
4989
4990 /* If we have a PLUS whose first operand is complex, try computing it
4991 separately by making a split there. */
4992 if (GET_CODE (XEXP (x, 0)) == PLUS
4993 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4994 MEM_ADDR_SPACE (x))
4995 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4996 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4997 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4998 return &XEXP (XEXP (x, 0), 0);
4999 break;
5000
5001 case SET:
5002 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5003 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5004 we need to put the operand into a register. So split at that
5005 point. */
5006
5007 if (SET_DEST (x) == cc0_rtx
5008 && GET_CODE (SET_SRC (x)) != COMPARE
5009 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5010 && !OBJECT_P (SET_SRC (x))
5011 && ! (GET_CODE (SET_SRC (x)) == SUBREG
5012 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5013 return &SET_SRC (x);
5014
5015 /* See if we can split SET_SRC as it stands. */
5016 split = find_split_point (&SET_SRC (x), insn, true);
5017 if (split && split != &SET_SRC (x))
5018 return split;
5019
5020 /* See if we can split SET_DEST as it stands. */
5021 split = find_split_point (&SET_DEST (x), insn, false);
5022 if (split && split != &SET_DEST (x))
5023 return split;
5024
5025 /* See if this is a bitfield assignment with everything constant. If
5026 so, this is an IOR of an AND, so split it into that. */
5027 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5028 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5029 &inner_mode)
5030 && HWI_COMPUTABLE_MODE_P (inner_mode)
5031 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5032 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5033 && CONST_INT_P (SET_SRC (x))
5034 && ((INTVAL (XEXP (SET_DEST (x), 1))
5035 + INTVAL (XEXP (SET_DEST (x), 2)))
5036 <= GET_MODE_PRECISION (inner_mode))
5037 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5038 {
5039 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5040 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5041 rtx dest = XEXP (SET_DEST (x), 0);
5042 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1;
5043 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask;
5044 rtx or_mask;
5045
5046 if (BITS_BIG_ENDIAN)
5047 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5048
5049 or_mask = gen_int_mode (src << pos, inner_mode);
5050 if (src == mask)
5051 SUBST (SET_SRC (x),
5052 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5053 else
5054 {
5055 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5056 SUBST (SET_SRC (x),
5057 simplify_gen_binary (IOR, inner_mode,
5058 simplify_gen_binary (AND, inner_mode,
5059 dest, negmask),
5060 or_mask));
5061 }
5062
5063 SUBST (SET_DEST (x), dest);
5064
5065 split = find_split_point (&SET_SRC (x), insn, true);
5066 if (split && split != &SET_SRC (x))
5067 return split;
5068 }
5069
5070 /* Otherwise, see if this is an operation that we can split into two.
5071 If so, try to split that. */
5072 code = GET_CODE (SET_SRC (x));
5073
5074 switch (code)
5075 {
5076 case AND:
5077 /* If we are AND'ing with a large constant that is only a single
5078 bit and the result is only being used in a context where we
5079 need to know if it is zero or nonzero, replace it with a bit
5080 extraction. This will avoid the large constant, which might
5081 have taken more than one insn to make. If the constant were
5082 not a valid argument to the AND but took only one insn to make,
5083 this is no worse, but if it took more than one insn, it will
5084 be better. */
5085
5086 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5087 && REG_P (XEXP (SET_SRC (x), 0))
5088 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5089 && REG_P (SET_DEST (x))
5090 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5091 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5092 && XEXP (*split, 0) == SET_DEST (x)
5093 && XEXP (*split, 1) == const0_rtx)
5094 {
5095 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5096 XEXP (SET_SRC (x), 0),
5097 pos, NULL_RTX, 1, 1, 0, 0);
5098 if (extraction != 0)
5099 {
5100 SUBST (SET_SRC (x), extraction);
5101 return find_split_point (loc, insn, false);
5102 }
5103 }
5104 break;
5105
5106 case NE:
5107 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5108 is known to be on, this can be converted into a NEG of a shift. */
5109 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5110 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5111 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5112 GET_MODE (XEXP (SET_SRC (x),
5113 0))))) >= 1))
5114 {
5115 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5116 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5117 SUBST (SET_SRC (x),
5118 gen_rtx_NEG (mode,
5119 gen_rtx_LSHIFTRT (mode,
5120 XEXP (SET_SRC (x), 0),
5121 pos_rtx)));
5122
5123 split = find_split_point (&SET_SRC (x), insn, true);
5124 if (split && split != &SET_SRC (x))
5125 return split;
5126 }
5127 break;
5128
5129 case SIGN_EXTEND:
5130 inner = XEXP (SET_SRC (x), 0);
5131
5132 /* We can't optimize if either mode is a partial integer
5133 mode as we don't know how many bits are significant
5134 in those modes. */
5135 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5136 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5137 break;
5138
5139 pos = 0;
5140 len = GET_MODE_PRECISION (inner_mode);
5141 unsignedp = 0;
5142 break;
5143
5144 case SIGN_EXTRACT:
5145 case ZERO_EXTRACT:
5146 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5147 &inner_mode)
5148 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5149 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5150 {
5151 inner = XEXP (SET_SRC (x), 0);
5152 len = INTVAL (XEXP (SET_SRC (x), 1));
5153 pos = INTVAL (XEXP (SET_SRC (x), 2));
5154
5155 if (BITS_BIG_ENDIAN)
5156 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5157 unsignedp = (code == ZERO_EXTRACT);
5158 }
5159 break;
5160
5161 default:
5162 break;
5163 }
5164
5165 if (len
5166 && known_subrange_p (pos, len,
5167 0, GET_MODE_PRECISION (GET_MODE (inner)))
5168 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5169 {
5170 /* For unsigned, we have a choice of a shift followed by an
5171 AND or two shifts. Use two shifts for field sizes where the
5172 constant might be too large. We assume here that we can
5173 always at least get 8-bit constants in an AND insn, which is
5174 true for every current RISC. */
5175
5176 if (unsignedp && len <= 8)
5177 {
5178 unsigned HOST_WIDE_INT mask
5179 = (HOST_WIDE_INT_1U << len) - 1;
5180 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5181 SUBST (SET_SRC (x),
5182 gen_rtx_AND (mode,
5183 gen_rtx_LSHIFTRT
5184 (mode, gen_lowpart (mode, inner), pos_rtx),
5185 gen_int_mode (mask, mode)));
5186
5187 split = find_split_point (&SET_SRC (x), insn, true);
5188 if (split && split != &SET_SRC (x))
5189 return split;
5190 }
5191 else
5192 {
5193 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5194 int right_bits = GET_MODE_PRECISION (mode) - len;
5195 SUBST (SET_SRC (x),
5196 gen_rtx_fmt_ee
5197 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5198 gen_rtx_ASHIFT (mode,
5199 gen_lowpart (mode, inner),
5200 gen_int_shift_amount (mode, left_bits)),
5201 gen_int_shift_amount (mode, right_bits)));
5202
5203 split = find_split_point (&SET_SRC (x), insn, true);
5204 if (split && split != &SET_SRC (x))
5205 return split;
5206 }
5207 }
5208
5209 /* See if this is a simple operation with a constant as the second
5210 operand. It might be that this constant is out of range and hence
5211 could be used as a split point. */
5212 if (BINARY_P (SET_SRC (x))
5213 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5214 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5215 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5216 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5217 return &XEXP (SET_SRC (x), 1);
5218
5219 /* Finally, see if this is a simple operation with its first operand
5220 not in a register. The operation might require this operand in a
5221 register, so return it as a split point. We can always do this
5222 because if the first operand were another operation, we would have
5223 already found it as a split point. */
5224 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5225 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5226 return &XEXP (SET_SRC (x), 0);
5227
5228 return 0;
5229
5230 case AND:
5231 case IOR:
5232 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5233 it is better to write this as (not (ior A B)) so we can split it.
5234 Similarly for IOR. */
5235 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5236 {
5237 SUBST (*loc,
5238 gen_rtx_NOT (GET_MODE (x),
5239 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5240 GET_MODE (x),
5241 XEXP (XEXP (x, 0), 0),
5242 XEXP (XEXP (x, 1), 0))));
5243 return find_split_point (loc, insn, set_src);
5244 }
5245
5246 /* Many RISC machines have a large set of logical insns. If the
5247 second operand is a NOT, put it first so we will try to split the
5248 other operand first. */
5249 if (GET_CODE (XEXP (x, 1)) == NOT)
5250 {
5251 rtx tem = XEXP (x, 0);
5252 SUBST (XEXP (x, 0), XEXP (x, 1));
5253 SUBST (XEXP (x, 1), tem);
5254 }
5255 break;
5256
5257 case PLUS:
5258 case MINUS:
5259 /* Canonicalization can produce (minus A (mult B C)), where C is a
5260 constant. It may be better to try splitting (plus (mult B -C) A)
5261 instead if this isn't a multiply by a power of two. */
5262 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5263 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5264 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5265 {
5266 machine_mode mode = GET_MODE (x);
5267 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5268 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5269 SUBST (*loc, gen_rtx_PLUS (mode,
5270 gen_rtx_MULT (mode,
5271 XEXP (XEXP (x, 1), 0),
5272 gen_int_mode (other_int,
5273 mode)),
5274 XEXP (x, 0)));
5275 return find_split_point (loc, insn, set_src);
5276 }
5277
5278 /* Split at a multiply-accumulate instruction. However if this is
5279 the SET_SRC, we likely do not have such an instruction and it's
5280 worthless to try this split. */
5281 if (!set_src
5282 && (GET_CODE (XEXP (x, 0)) == MULT
5283 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5284 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5285 return loc;
5286
5287 default:
5288 break;
5289 }
5290
5291 /* Otherwise, select our actions depending on our rtx class. */
5292 switch (GET_RTX_CLASS (code))
5293 {
5294 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5295 case RTX_TERNARY:
5296 split = find_split_point (&XEXP (x, 2), insn, false);
5297 if (split)
5298 return split;
5299 /* fall through */
5300 case RTX_BIN_ARITH:
5301 case RTX_COMM_ARITH:
5302 case RTX_COMPARE:
5303 case RTX_COMM_COMPARE:
5304 split = find_split_point (&XEXP (x, 1), insn, false);
5305 if (split)
5306 return split;
5307 /* fall through */
5308 case RTX_UNARY:
5309 /* Some machines have (and (shift ...) ...) insns. If X is not
5310 an AND, but XEXP (X, 0) is, use it as our split point. */
5311 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5312 return &XEXP (x, 0);
5313
5314 split = find_split_point (&XEXP (x, 0), insn, false);
5315 if (split)
5316 return split;
5317 return loc;
5318
5319 default:
5320 /* Otherwise, we don't have a split point. */
5321 return 0;
5322 }
5323 }
5324
5325 /* Throughout X, replace FROM with TO, and return the result.
5326 The result is TO if X is FROM;
5327 otherwise the result is X, but its contents may have been modified.
5328 If they were modified, a record was made in undobuf so that
5329 undo_all will (among other things) return X to its original state.
5330
5331 If the number of changes necessary is too much to record to undo,
5332 the excess changes are not made, so the result is invalid.
5333 The changes already made can still be undone.
5334 undobuf.num_undo is incremented for such changes, so by testing that
5335 the caller can tell whether the result is valid.
5336
5337 `n_occurrences' is incremented each time FROM is replaced.
5338
5339 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5340
5341 IN_COND is nonzero if we are at the top level of a condition.
5342
5343 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5344 by copying if `n_occurrences' is nonzero. */
5345
5346 static rtx
subst(rtx x,rtx from,rtx to,int in_dest,int in_cond,int unique_copy)5347 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5348 {
5349 enum rtx_code code = GET_CODE (x);
5350 machine_mode op0_mode = VOIDmode;
5351 const char *fmt;
5352 int len, i;
5353 rtx new_rtx;
5354
5355 /* Two expressions are equal if they are identical copies of a shared
5356 RTX or if they are both registers with the same register number
5357 and mode. */
5358
5359 #define COMBINE_RTX_EQUAL_P(X,Y) \
5360 ((X) == (Y) \
5361 || (REG_P (X) && REG_P (Y) \
5362 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5363
5364 /* Do not substitute into clobbers of regs -- this will never result in
5365 valid RTL. */
5366 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5367 return x;
5368
5369 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5370 {
5371 n_occurrences++;
5372 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5373 }
5374
5375 /* If X and FROM are the same register but different modes, they
5376 will not have been seen as equal above. However, the log links code
5377 will make a LOG_LINKS entry for that case. If we do nothing, we
5378 will try to rerecognize our original insn and, when it succeeds,
5379 we will delete the feeding insn, which is incorrect.
5380
5381 So force this insn not to match in this (rare) case. */
5382 if (! in_dest && code == REG && REG_P (from)
5383 && reg_overlap_mentioned_p (x, from))
5384 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5385
5386 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5387 of which may contain things that can be combined. */
5388 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5389 return x;
5390
5391 /* It is possible to have a subexpression appear twice in the insn.
5392 Suppose that FROM is a register that appears within TO.
5393 Then, after that subexpression has been scanned once by `subst',
5394 the second time it is scanned, TO may be found. If we were
5395 to scan TO here, we would find FROM within it and create a
5396 self-referent rtl structure which is completely wrong. */
5397 if (COMBINE_RTX_EQUAL_P (x, to))
5398 return to;
5399
5400 /* Parallel asm_operands need special attention because all of the
5401 inputs are shared across the arms. Furthermore, unsharing the
5402 rtl results in recognition failures. Failure to handle this case
5403 specially can result in circular rtl.
5404
5405 Solve this by doing a normal pass across the first entry of the
5406 parallel, and only processing the SET_DESTs of the subsequent
5407 entries. Ug. */
5408
5409 if (code == PARALLEL
5410 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5411 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5412 {
5413 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5414
5415 /* If this substitution failed, this whole thing fails. */
5416 if (GET_CODE (new_rtx) == CLOBBER
5417 && XEXP (new_rtx, 0) == const0_rtx)
5418 return new_rtx;
5419
5420 SUBST (XVECEXP (x, 0, 0), new_rtx);
5421
5422 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5423 {
5424 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5425
5426 if (!REG_P (dest)
5427 && GET_CODE (dest) != CC0
5428 && GET_CODE (dest) != PC)
5429 {
5430 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5431
5432 /* If this substitution failed, this whole thing fails. */
5433 if (GET_CODE (new_rtx) == CLOBBER
5434 && XEXP (new_rtx, 0) == const0_rtx)
5435 return new_rtx;
5436
5437 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5438 }
5439 }
5440 }
5441 else
5442 {
5443 len = GET_RTX_LENGTH (code);
5444 fmt = GET_RTX_FORMAT (code);
5445
5446 /* We don't need to process a SET_DEST that is a register, CC0,
5447 or PC, so set up to skip this common case. All other cases
5448 where we want to suppress replacing something inside a
5449 SET_SRC are handled via the IN_DEST operand. */
5450 if (code == SET
5451 && (REG_P (SET_DEST (x))
5452 || GET_CODE (SET_DEST (x)) == CC0
5453 || GET_CODE (SET_DEST (x)) == PC))
5454 fmt = "ie";
5455
5456 /* Trying to simplify the operands of a widening MULT is not likely
5457 to create RTL matching a machine insn. */
5458 if (code == MULT
5459 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5460 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5461 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5462 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5463 && REG_P (XEXP (XEXP (x, 0), 0))
5464 && REG_P (XEXP (XEXP (x, 1), 0))
5465 && from == to)
5466 return x;
5467
5468
5469 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5470 constant. */
5471 if (fmt[0] == 'e')
5472 op0_mode = GET_MODE (XEXP (x, 0));
5473
5474 for (i = 0; i < len; i++)
5475 {
5476 if (fmt[i] == 'E')
5477 {
5478 int j;
5479 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5480 {
5481 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5482 {
5483 new_rtx = (unique_copy && n_occurrences
5484 ? copy_rtx (to) : to);
5485 n_occurrences++;
5486 }
5487 else
5488 {
5489 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5490 unique_copy);
5491
5492 /* If this substitution failed, this whole thing
5493 fails. */
5494 if (GET_CODE (new_rtx) == CLOBBER
5495 && XEXP (new_rtx, 0) == const0_rtx)
5496 return new_rtx;
5497 }
5498
5499 SUBST (XVECEXP (x, i, j), new_rtx);
5500 }
5501 }
5502 else if (fmt[i] == 'e')
5503 {
5504 /* If this is a register being set, ignore it. */
5505 new_rtx = XEXP (x, i);
5506 if (in_dest
5507 && i == 0
5508 && (((code == SUBREG || code == ZERO_EXTRACT)
5509 && REG_P (new_rtx))
5510 || code == STRICT_LOW_PART))
5511 ;
5512
5513 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5514 {
5515 /* In general, don't install a subreg involving two
5516 modes not tieable. It can worsen register
5517 allocation, and can even make invalid reload
5518 insns, since the reg inside may need to be copied
5519 from in the outside mode, and that may be invalid
5520 if it is an fp reg copied in integer mode.
5521
5522 We allow two exceptions to this: It is valid if
5523 it is inside another SUBREG and the mode of that
5524 SUBREG and the mode of the inside of TO is
5525 tieable and it is valid if X is a SET that copies
5526 FROM to CC0. */
5527
5528 if (GET_CODE (to) == SUBREG
5529 && !targetm.modes_tieable_p (GET_MODE (to),
5530 GET_MODE (SUBREG_REG (to)))
5531 && ! (code == SUBREG
5532 && (targetm.modes_tieable_p
5533 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5534 && (!HAVE_cc0
5535 || (! (code == SET
5536 && i == 1
5537 && XEXP (x, 0) == cc0_rtx))))
5538 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5539
5540 if (code == SUBREG
5541 && REG_P (to)
5542 && REGNO (to) < FIRST_PSEUDO_REGISTER
5543 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5544 SUBREG_BYTE (x),
5545 GET_MODE (x)) < 0)
5546 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5547
5548 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5549 n_occurrences++;
5550 }
5551 else
5552 /* If we are in a SET_DEST, suppress most cases unless we
5553 have gone inside a MEM, in which case we want to
5554 simplify the address. We assume here that things that
5555 are actually part of the destination have their inner
5556 parts in the first expression. This is true for SUBREG,
5557 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5558 things aside from REG and MEM that should appear in a
5559 SET_DEST. */
5560 new_rtx = subst (XEXP (x, i), from, to,
5561 (((in_dest
5562 && (code == SUBREG || code == STRICT_LOW_PART
5563 || code == ZERO_EXTRACT))
5564 || code == SET)
5565 && i == 0),
5566 code == IF_THEN_ELSE && i == 0,
5567 unique_copy);
5568
5569 /* If we found that we will have to reject this combination,
5570 indicate that by returning the CLOBBER ourselves, rather than
5571 an expression containing it. This will speed things up as
5572 well as prevent accidents where two CLOBBERs are considered
5573 to be equal, thus producing an incorrect simplification. */
5574
5575 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5576 return new_rtx;
5577
5578 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5579 {
5580 machine_mode mode = GET_MODE (x);
5581
5582 x = simplify_subreg (GET_MODE (x), new_rtx,
5583 GET_MODE (SUBREG_REG (x)),
5584 SUBREG_BYTE (x));
5585 if (! x)
5586 x = gen_rtx_CLOBBER (mode, const0_rtx);
5587 }
5588 else if (CONST_SCALAR_INT_P (new_rtx)
5589 && (GET_CODE (x) == ZERO_EXTEND
5590 || GET_CODE (x) == FLOAT
5591 || GET_CODE (x) == UNSIGNED_FLOAT))
5592 {
5593 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5594 new_rtx,
5595 GET_MODE (XEXP (x, 0)));
5596 if (!x)
5597 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5598 }
5599 else
5600 SUBST (XEXP (x, i), new_rtx);
5601 }
5602 }
5603 }
5604
5605 /* Check if we are loading something from the constant pool via float
5606 extension; in this case we would undo compress_float_constant
5607 optimization and degenerate constant load to an immediate value. */
5608 if (GET_CODE (x) == FLOAT_EXTEND
5609 && MEM_P (XEXP (x, 0))
5610 && MEM_READONLY_P (XEXP (x, 0)))
5611 {
5612 rtx tmp = avoid_constant_pool_reference (x);
5613 if (x != tmp)
5614 return x;
5615 }
5616
5617 /* Try to simplify X. If the simplification changed the code, it is likely
5618 that further simplification will help, so loop, but limit the number
5619 of repetitions that will be performed. */
5620
5621 for (i = 0; i < 4; i++)
5622 {
5623 /* If X is sufficiently simple, don't bother trying to do anything
5624 with it. */
5625 if (code != CONST_INT && code != REG && code != CLOBBER)
5626 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5627
5628 if (GET_CODE (x) == code)
5629 break;
5630
5631 code = GET_CODE (x);
5632
5633 /* We no longer know the original mode of operand 0 since we
5634 have changed the form of X) */
5635 op0_mode = VOIDmode;
5636 }
5637
5638 return x;
5639 }
5640
5641 /* If X is a commutative operation whose operands are not in the canonical
5642 order, use substitutions to swap them. */
5643
5644 static void
maybe_swap_commutative_operands(rtx x)5645 maybe_swap_commutative_operands (rtx x)
5646 {
5647 if (COMMUTATIVE_ARITH_P (x)
5648 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5649 {
5650 rtx temp = XEXP (x, 0);
5651 SUBST (XEXP (x, 0), XEXP (x, 1));
5652 SUBST (XEXP (x, 1), temp);
5653 }
5654 }
5655
5656 /* Simplify X, a piece of RTL. We just operate on the expression at the
5657 outer level; call `subst' to simplify recursively. Return the new
5658 expression.
5659
5660 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5661 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5662 of a condition. */
5663
5664 static rtx
combine_simplify_rtx(rtx x,machine_mode op0_mode,int in_dest,int in_cond)5665 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5666 int in_cond)
5667 {
5668 enum rtx_code code = GET_CODE (x);
5669 machine_mode mode = GET_MODE (x);
5670 scalar_int_mode int_mode;
5671 rtx temp;
5672 int i;
5673
5674 /* If this is a commutative operation, put a constant last and a complex
5675 expression first. We don't need to do this for comparisons here. */
5676 maybe_swap_commutative_operands (x);
5677
5678 /* Try to fold this expression in case we have constants that weren't
5679 present before. */
5680 temp = 0;
5681 switch (GET_RTX_CLASS (code))
5682 {
5683 case RTX_UNARY:
5684 if (op0_mode == VOIDmode)
5685 op0_mode = GET_MODE (XEXP (x, 0));
5686 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5687 break;
5688 case RTX_COMPARE:
5689 case RTX_COMM_COMPARE:
5690 {
5691 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5692 if (cmp_mode == VOIDmode)
5693 {
5694 cmp_mode = GET_MODE (XEXP (x, 1));
5695 if (cmp_mode == VOIDmode)
5696 cmp_mode = op0_mode;
5697 }
5698 temp = simplify_relational_operation (code, mode, cmp_mode,
5699 XEXP (x, 0), XEXP (x, 1));
5700 }
5701 break;
5702 case RTX_COMM_ARITH:
5703 case RTX_BIN_ARITH:
5704 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5705 break;
5706 case RTX_BITFIELD_OPS:
5707 case RTX_TERNARY:
5708 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5709 XEXP (x, 1), XEXP (x, 2));
5710 break;
5711 default:
5712 break;
5713 }
5714
5715 if (temp)
5716 {
5717 x = temp;
5718 code = GET_CODE (temp);
5719 op0_mode = VOIDmode;
5720 mode = GET_MODE (temp);
5721 }
5722
5723 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5724 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5725 things. Check for cases where both arms are testing the same
5726 condition.
5727
5728 Don't do anything if all operands are very simple. */
5729
5730 if ((BINARY_P (x)
5731 && ((!OBJECT_P (XEXP (x, 0))
5732 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5733 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5734 || (!OBJECT_P (XEXP (x, 1))
5735 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5736 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5737 || (UNARY_P (x)
5738 && (!OBJECT_P (XEXP (x, 0))
5739 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5740 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5741 {
5742 rtx cond, true_rtx, false_rtx;
5743
5744 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5745 if (cond != 0
5746 /* If everything is a comparison, what we have is highly unlikely
5747 to be simpler, so don't use it. */
5748 && ! (COMPARISON_P (x)
5749 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5750 /* Similarly, if we end up with one of the expressions the same
5751 as the original, it is certainly not simpler. */
5752 && ! rtx_equal_p (x, true_rtx)
5753 && ! rtx_equal_p (x, false_rtx))
5754 {
5755 rtx cop1 = const0_rtx;
5756 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5757
5758 if (cond_code == NE && COMPARISON_P (cond))
5759 return x;
5760
5761 /* Simplify the alternative arms; this may collapse the true and
5762 false arms to store-flag values. Be careful to use copy_rtx
5763 here since true_rtx or false_rtx might share RTL with x as a
5764 result of the if_then_else_cond call above. */
5765 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5766 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5767
5768 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5769 is unlikely to be simpler. */
5770 if (general_operand (true_rtx, VOIDmode)
5771 && general_operand (false_rtx, VOIDmode))
5772 {
5773 enum rtx_code reversed;
5774
5775 /* Restarting if we generate a store-flag expression will cause
5776 us to loop. Just drop through in this case. */
5777
5778 /* If the result values are STORE_FLAG_VALUE and zero, we can
5779 just make the comparison operation. */
5780 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5781 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5782 cond, cop1);
5783 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5784 && ((reversed = reversed_comparison_code_parts
5785 (cond_code, cond, cop1, NULL))
5786 != UNKNOWN))
5787 x = simplify_gen_relational (reversed, mode, VOIDmode,
5788 cond, cop1);
5789
5790 /* Likewise, we can make the negate of a comparison operation
5791 if the result values are - STORE_FLAG_VALUE and zero. */
5792 else if (CONST_INT_P (true_rtx)
5793 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5794 && false_rtx == const0_rtx)
5795 x = simplify_gen_unary (NEG, mode,
5796 simplify_gen_relational (cond_code,
5797 mode, VOIDmode,
5798 cond, cop1),
5799 mode);
5800 else if (CONST_INT_P (false_rtx)
5801 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5802 && true_rtx == const0_rtx
5803 && ((reversed = reversed_comparison_code_parts
5804 (cond_code, cond, cop1, NULL))
5805 != UNKNOWN))
5806 x = simplify_gen_unary (NEG, mode,
5807 simplify_gen_relational (reversed,
5808 mode, VOIDmode,
5809 cond, cop1),
5810 mode);
5811 else
5812 return gen_rtx_IF_THEN_ELSE (mode,
5813 simplify_gen_relational (cond_code,
5814 mode,
5815 VOIDmode,
5816 cond,
5817 cop1),
5818 true_rtx, false_rtx);
5819
5820 code = GET_CODE (x);
5821 op0_mode = VOIDmode;
5822 }
5823 }
5824 }
5825
5826 /* First see if we can apply the inverse distributive law. */
5827 if (code == PLUS || code == MINUS
5828 || code == AND || code == IOR || code == XOR)
5829 {
5830 x = apply_distributive_law (x);
5831 code = GET_CODE (x);
5832 op0_mode = VOIDmode;
5833 }
5834
5835 /* If CODE is an associative operation not otherwise handled, see if we
5836 can associate some operands. This can win if they are constants or
5837 if they are logically related (i.e. (a & b) & a). */
5838 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5839 || code == AND || code == IOR || code == XOR
5840 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5841 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5842 || (flag_associative_math && FLOAT_MODE_P (mode))))
5843 {
5844 if (GET_CODE (XEXP (x, 0)) == code)
5845 {
5846 rtx other = XEXP (XEXP (x, 0), 0);
5847 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5848 rtx inner_op1 = XEXP (x, 1);
5849 rtx inner;
5850
5851 /* Make sure we pass the constant operand if any as the second
5852 one if this is a commutative operation. */
5853 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5854 std::swap (inner_op0, inner_op1);
5855 inner = simplify_binary_operation (code == MINUS ? PLUS
5856 : code == DIV ? MULT
5857 : code,
5858 mode, inner_op0, inner_op1);
5859
5860 /* For commutative operations, try the other pair if that one
5861 didn't simplify. */
5862 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5863 {
5864 other = XEXP (XEXP (x, 0), 1);
5865 inner = simplify_binary_operation (code, mode,
5866 XEXP (XEXP (x, 0), 0),
5867 XEXP (x, 1));
5868 }
5869
5870 if (inner)
5871 return simplify_gen_binary (code, mode, other, inner);
5872 }
5873 }
5874
5875 /* A little bit of algebraic simplification here. */
5876 switch (code)
5877 {
5878 case MEM:
5879 /* Ensure that our address has any ASHIFTs converted to MULT in case
5880 address-recognizing predicates are called later. */
5881 temp = make_compound_operation (XEXP (x, 0), MEM);
5882 SUBST (XEXP (x, 0), temp);
5883 break;
5884
5885 case SUBREG:
5886 if (op0_mode == VOIDmode)
5887 op0_mode = GET_MODE (SUBREG_REG (x));
5888
5889 /* See if this can be moved to simplify_subreg. */
5890 if (CONSTANT_P (SUBREG_REG (x))
5891 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5892 /* Don't call gen_lowpart if the inner mode
5893 is VOIDmode and we cannot simplify it, as SUBREG without
5894 inner mode is invalid. */
5895 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5896 || gen_lowpart_common (mode, SUBREG_REG (x))))
5897 return gen_lowpart (mode, SUBREG_REG (x));
5898
5899 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5900 break;
5901 {
5902 rtx temp;
5903 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5904 SUBREG_BYTE (x));
5905 if (temp)
5906 return temp;
5907
5908 /* If op is known to have all lower bits zero, the result is zero. */
5909 scalar_int_mode int_mode, int_op0_mode;
5910 if (!in_dest
5911 && is_a <scalar_int_mode> (mode, &int_mode)
5912 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5913 && (GET_MODE_PRECISION (int_mode)
5914 < GET_MODE_PRECISION (int_op0_mode))
5915 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
5916 SUBREG_BYTE (x))
5917 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5918 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
5919 & GET_MODE_MASK (int_mode)) == 0)
5920 && !side_effects_p (SUBREG_REG (x)))
5921 return CONST0_RTX (int_mode);
5922 }
5923
5924 /* Don't change the mode of the MEM if that would change the meaning
5925 of the address. */
5926 if (MEM_P (SUBREG_REG (x))
5927 && (MEM_VOLATILE_P (SUBREG_REG (x))
5928 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5929 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5930 return gen_rtx_CLOBBER (mode, const0_rtx);
5931
5932 /* Note that we cannot do any narrowing for non-constants since
5933 we might have been counting on using the fact that some bits were
5934 zero. We now do this in the SET. */
5935
5936 break;
5937
5938 case NEG:
5939 temp = expand_compound_operation (XEXP (x, 0));
5940
5941 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5942 replaced by (lshiftrt X C). This will convert
5943 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5944
5945 if (GET_CODE (temp) == ASHIFTRT
5946 && CONST_INT_P (XEXP (temp, 1))
5947 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
5948 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5949 INTVAL (XEXP (temp, 1)));
5950
5951 /* If X has only a single bit that might be nonzero, say, bit I, convert
5952 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5953 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5954 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5955 or a SUBREG of one since we'd be making the expression more
5956 complex if it was just a register. */
5957
5958 if (!REG_P (temp)
5959 && ! (GET_CODE (temp) == SUBREG
5960 && REG_P (SUBREG_REG (temp)))
5961 && is_a <scalar_int_mode> (mode, &int_mode)
5962 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5963 {
5964 rtx temp1 = simplify_shift_const
5965 (NULL_RTX, ASHIFTRT, int_mode,
5966 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5967 GET_MODE_PRECISION (int_mode) - 1 - i),
5968 GET_MODE_PRECISION (int_mode) - 1 - i);
5969
5970 /* If all we did was surround TEMP with the two shifts, we
5971 haven't improved anything, so don't use it. Otherwise,
5972 we are better off with TEMP1. */
5973 if (GET_CODE (temp1) != ASHIFTRT
5974 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5975 || XEXP (XEXP (temp1, 0), 0) != temp)
5976 return temp1;
5977 }
5978 break;
5979
5980 case TRUNCATE:
5981 /* We can't handle truncation to a partial integer mode here
5982 because we don't know the real bitsize of the partial
5983 integer mode. */
5984 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5985 break;
5986
5987 if (HWI_COMPUTABLE_MODE_P (mode))
5988 SUBST (XEXP (x, 0),
5989 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5990 GET_MODE_MASK (mode), 0));
5991
5992 /* We can truncate a constant value and return it. */
5993 if (CONST_INT_P (XEXP (x, 0)))
5994 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5995
5996 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5997 whose value is a comparison can be replaced with a subreg if
5998 STORE_FLAG_VALUE permits. */
5999 if (HWI_COMPUTABLE_MODE_P (mode)
6000 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6001 && (temp = get_last_value (XEXP (x, 0)))
6002 && COMPARISON_P (temp))
6003 return gen_lowpart (mode, XEXP (x, 0));
6004 break;
6005
6006 case CONST:
6007 /* (const (const X)) can become (const X). Do it this way rather than
6008 returning the inner CONST since CONST can be shared with a
6009 REG_EQUAL note. */
6010 if (GET_CODE (XEXP (x, 0)) == CONST)
6011 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6012 break;
6013
6014 case LO_SUM:
6015 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6016 can add in an offset. find_split_point will split this address up
6017 again if it doesn't match. */
6018 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6019 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6020 return XEXP (x, 1);
6021 break;
6022
6023 case PLUS:
6024 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6025 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6026 bit-field and can be replaced by either a sign_extend or a
6027 sign_extract. The `and' may be a zero_extend and the two
6028 <c>, -<c> constants may be reversed. */
6029 if (GET_CODE (XEXP (x, 0)) == XOR
6030 && is_a <scalar_int_mode> (mode, &int_mode)
6031 && CONST_INT_P (XEXP (x, 1))
6032 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6033 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6034 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6035 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6036 && HWI_COMPUTABLE_MODE_P (int_mode)
6037 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6038 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6039 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6040 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6041 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6042 && known_eq ((GET_MODE_PRECISION
6043 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6044 (unsigned int) i + 1))))
6045 return simplify_shift_const
6046 (NULL_RTX, ASHIFTRT, int_mode,
6047 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6048 XEXP (XEXP (XEXP (x, 0), 0), 0),
6049 GET_MODE_PRECISION (int_mode) - (i + 1)),
6050 GET_MODE_PRECISION (int_mode) - (i + 1));
6051
6052 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6053 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6054 the bitsize of the mode - 1. This allows simplification of
6055 "a = (b & 8) == 0;" */
6056 if (XEXP (x, 1) == constm1_rtx
6057 && !REG_P (XEXP (x, 0))
6058 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6059 && REG_P (SUBREG_REG (XEXP (x, 0))))
6060 && is_a <scalar_int_mode> (mode, &int_mode)
6061 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6062 return simplify_shift_const
6063 (NULL_RTX, ASHIFTRT, int_mode,
6064 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6065 gen_rtx_XOR (int_mode, XEXP (x, 0),
6066 const1_rtx),
6067 GET_MODE_PRECISION (int_mode) - 1),
6068 GET_MODE_PRECISION (int_mode) - 1);
6069
6070 /* If we are adding two things that have no bits in common, convert
6071 the addition into an IOR. This will often be further simplified,
6072 for example in cases like ((a & 1) + (a & 2)), which can
6073 become a & 3. */
6074
6075 if (HWI_COMPUTABLE_MODE_P (mode)
6076 && (nonzero_bits (XEXP (x, 0), mode)
6077 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6078 {
6079 /* Try to simplify the expression further. */
6080 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6081 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6082
6083 /* If we could, great. If not, do not go ahead with the IOR
6084 replacement, since PLUS appears in many special purpose
6085 address arithmetic instructions. */
6086 if (GET_CODE (temp) != CLOBBER
6087 && (GET_CODE (temp) != IOR
6088 || ((XEXP (temp, 0) != XEXP (x, 0)
6089 || XEXP (temp, 1) != XEXP (x, 1))
6090 && (XEXP (temp, 0) != XEXP (x, 1)
6091 || XEXP (temp, 1) != XEXP (x, 0)))))
6092 return temp;
6093 }
6094
6095 /* Canonicalize x + x into x << 1. */
6096 if (GET_MODE_CLASS (mode) == MODE_INT
6097 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6098 && !side_effects_p (XEXP (x, 0)))
6099 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6100
6101 break;
6102
6103 case MINUS:
6104 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6105 (and <foo> (const_int pow2-1)) */
6106 if (is_a <scalar_int_mode> (mode, &int_mode)
6107 && GET_CODE (XEXP (x, 1)) == AND
6108 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6109 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6110 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6111 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6112 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6113 break;
6114
6115 case MULT:
6116 /* If we have (mult (plus A B) C), apply the distributive law and then
6117 the inverse distributive law to see if things simplify. This
6118 occurs mostly in addresses, often when unrolling loops. */
6119
6120 if (GET_CODE (XEXP (x, 0)) == PLUS)
6121 {
6122 rtx result = distribute_and_simplify_rtx (x, 0);
6123 if (result)
6124 return result;
6125 }
6126
6127 /* Try simplify a*(b/c) as (a*b)/c. */
6128 if (FLOAT_MODE_P (mode) && flag_associative_math
6129 && GET_CODE (XEXP (x, 0)) == DIV)
6130 {
6131 rtx tem = simplify_binary_operation (MULT, mode,
6132 XEXP (XEXP (x, 0), 0),
6133 XEXP (x, 1));
6134 if (tem)
6135 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6136 }
6137 break;
6138
6139 case UDIV:
6140 /* If this is a divide by a power of two, treat it as a shift if
6141 its first operand is a shift. */
6142 if (is_a <scalar_int_mode> (mode, &int_mode)
6143 && CONST_INT_P (XEXP (x, 1))
6144 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6145 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6146 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6147 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6148 || GET_CODE (XEXP (x, 0)) == ROTATE
6149 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6150 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6151 XEXP (x, 0), i);
6152 break;
6153
6154 case EQ: case NE:
6155 case GT: case GTU: case GE: case GEU:
6156 case LT: case LTU: case LE: case LEU:
6157 case UNEQ: case LTGT:
6158 case UNGT: case UNGE:
6159 case UNLT: case UNLE:
6160 case UNORDERED: case ORDERED:
6161 /* If the first operand is a condition code, we can't do anything
6162 with it. */
6163 if (GET_CODE (XEXP (x, 0)) == COMPARE
6164 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6165 && ! CC0_P (XEXP (x, 0))))
6166 {
6167 rtx op0 = XEXP (x, 0);
6168 rtx op1 = XEXP (x, 1);
6169 enum rtx_code new_code;
6170
6171 if (GET_CODE (op0) == COMPARE)
6172 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6173
6174 /* Simplify our comparison, if possible. */
6175 new_code = simplify_comparison (code, &op0, &op1);
6176
6177 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6178 if only the low-order bit is possibly nonzero in X (such as when
6179 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6180 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6181 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6182 (plus X 1).
6183
6184 Remove any ZERO_EXTRACT we made when thinking this was a
6185 comparison. It may now be simpler to use, e.g., an AND. If a
6186 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6187 the call to make_compound_operation in the SET case.
6188
6189 Don't apply these optimizations if the caller would
6190 prefer a comparison rather than a value.
6191 E.g., for the condition in an IF_THEN_ELSE most targets need
6192 an explicit comparison. */
6193
6194 if (in_cond)
6195 ;
6196
6197 else if (STORE_FLAG_VALUE == 1
6198 && new_code == NE
6199 && is_int_mode (mode, &int_mode)
6200 && op1 == const0_rtx
6201 && int_mode == GET_MODE (op0)
6202 && nonzero_bits (op0, int_mode) == 1)
6203 return gen_lowpart (int_mode,
6204 expand_compound_operation (op0));
6205
6206 else if (STORE_FLAG_VALUE == 1
6207 && new_code == NE
6208 && is_int_mode (mode, &int_mode)
6209 && op1 == const0_rtx
6210 && int_mode == GET_MODE (op0)
6211 && (num_sign_bit_copies (op0, int_mode)
6212 == GET_MODE_PRECISION (int_mode)))
6213 {
6214 op0 = expand_compound_operation (op0);
6215 return simplify_gen_unary (NEG, int_mode,
6216 gen_lowpart (int_mode, op0),
6217 int_mode);
6218 }
6219
6220 else if (STORE_FLAG_VALUE == 1
6221 && new_code == EQ
6222 && is_int_mode (mode, &int_mode)
6223 && op1 == const0_rtx
6224 && int_mode == GET_MODE (op0)
6225 && nonzero_bits (op0, int_mode) == 1)
6226 {
6227 op0 = expand_compound_operation (op0);
6228 return simplify_gen_binary (XOR, int_mode,
6229 gen_lowpart (int_mode, op0),
6230 const1_rtx);
6231 }
6232
6233 else if (STORE_FLAG_VALUE == 1
6234 && new_code == EQ
6235 && is_int_mode (mode, &int_mode)
6236 && op1 == const0_rtx
6237 && int_mode == GET_MODE (op0)
6238 && (num_sign_bit_copies (op0, int_mode)
6239 == GET_MODE_PRECISION (int_mode)))
6240 {
6241 op0 = expand_compound_operation (op0);
6242 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6243 }
6244
6245 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6246 those above. */
6247 if (in_cond)
6248 ;
6249
6250 else if (STORE_FLAG_VALUE == -1
6251 && new_code == NE
6252 && is_int_mode (mode, &int_mode)
6253 && op1 == const0_rtx
6254 && int_mode == GET_MODE (op0)
6255 && (num_sign_bit_copies (op0, int_mode)
6256 == GET_MODE_PRECISION (int_mode)))
6257 return gen_lowpart (int_mode, expand_compound_operation (op0));
6258
6259 else if (STORE_FLAG_VALUE == -1
6260 && new_code == NE
6261 && is_int_mode (mode, &int_mode)
6262 && op1 == const0_rtx
6263 && int_mode == GET_MODE (op0)
6264 && nonzero_bits (op0, int_mode) == 1)
6265 {
6266 op0 = expand_compound_operation (op0);
6267 return simplify_gen_unary (NEG, int_mode,
6268 gen_lowpart (int_mode, op0),
6269 int_mode);
6270 }
6271
6272 else if (STORE_FLAG_VALUE == -1
6273 && new_code == EQ
6274 && is_int_mode (mode, &int_mode)
6275 && op1 == const0_rtx
6276 && int_mode == GET_MODE (op0)
6277 && (num_sign_bit_copies (op0, int_mode)
6278 == GET_MODE_PRECISION (int_mode)))
6279 {
6280 op0 = expand_compound_operation (op0);
6281 return simplify_gen_unary (NOT, int_mode,
6282 gen_lowpart (int_mode, op0),
6283 int_mode);
6284 }
6285
6286 /* If X is 0/1, (eq X 0) is X-1. */
6287 else if (STORE_FLAG_VALUE == -1
6288 && new_code == EQ
6289 && is_int_mode (mode, &int_mode)
6290 && op1 == const0_rtx
6291 && int_mode == GET_MODE (op0)
6292 && nonzero_bits (op0, int_mode) == 1)
6293 {
6294 op0 = expand_compound_operation (op0);
6295 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6296 }
6297
6298 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6299 one bit that might be nonzero, we can convert (ne x 0) to
6300 (ashift x c) where C puts the bit in the sign bit. Remove any
6301 AND with STORE_FLAG_VALUE when we are done, since we are only
6302 going to test the sign bit. */
6303 if (new_code == NE
6304 && is_int_mode (mode, &int_mode)
6305 && HWI_COMPUTABLE_MODE_P (int_mode)
6306 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6307 && op1 == const0_rtx
6308 && int_mode == GET_MODE (op0)
6309 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6310 {
6311 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6312 expand_compound_operation (op0),
6313 GET_MODE_PRECISION (int_mode) - 1 - i);
6314 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6315 return XEXP (x, 0);
6316 else
6317 return x;
6318 }
6319
6320 /* If the code changed, return a whole new comparison.
6321 We also need to avoid using SUBST in cases where
6322 simplify_comparison has widened a comparison with a CONST_INT,
6323 since in that case the wider CONST_INT may fail the sanity
6324 checks in do_SUBST. */
6325 if (new_code != code
6326 || (CONST_INT_P (op1)
6327 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6328 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6329 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6330
6331 /* Otherwise, keep this operation, but maybe change its operands.
6332 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6333 SUBST (XEXP (x, 0), op0);
6334 SUBST (XEXP (x, 1), op1);
6335 }
6336 break;
6337
6338 case IF_THEN_ELSE:
6339 return simplify_if_then_else (x);
6340
6341 case ZERO_EXTRACT:
6342 case SIGN_EXTRACT:
6343 case ZERO_EXTEND:
6344 case SIGN_EXTEND:
6345 /* If we are processing SET_DEST, we are done. */
6346 if (in_dest)
6347 return x;
6348
6349 return expand_compound_operation (x);
6350
6351 case SET:
6352 return simplify_set (x);
6353
6354 case AND:
6355 case IOR:
6356 return simplify_logical (x);
6357
6358 case ASHIFT:
6359 case LSHIFTRT:
6360 case ASHIFTRT:
6361 case ROTATE:
6362 case ROTATERT:
6363 /* If this is a shift by a constant amount, simplify it. */
6364 if (CONST_INT_P (XEXP (x, 1)))
6365 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6366 INTVAL (XEXP (x, 1)));
6367
6368 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6369 SUBST (XEXP (x, 1),
6370 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6371 (HOST_WIDE_INT_1U
6372 << exact_log2 (GET_MODE_UNIT_BITSIZE
6373 (GET_MODE (x))))
6374 - 1,
6375 0));
6376 break;
6377
6378 default:
6379 break;
6380 }
6381
6382 return x;
6383 }
6384
6385 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6386
6387 static rtx
simplify_if_then_else(rtx x)6388 simplify_if_then_else (rtx x)
6389 {
6390 machine_mode mode = GET_MODE (x);
6391 rtx cond = XEXP (x, 0);
6392 rtx true_rtx = XEXP (x, 1);
6393 rtx false_rtx = XEXP (x, 2);
6394 enum rtx_code true_code = GET_CODE (cond);
6395 int comparison_p = COMPARISON_P (cond);
6396 rtx temp;
6397 int i;
6398 enum rtx_code false_code;
6399 rtx reversed;
6400 scalar_int_mode int_mode, inner_mode;
6401
6402 /* Simplify storing of the truth value. */
6403 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6404 return simplify_gen_relational (true_code, mode, VOIDmode,
6405 XEXP (cond, 0), XEXP (cond, 1));
6406
6407 /* Also when the truth value has to be reversed. */
6408 if (comparison_p
6409 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6410 && (reversed = reversed_comparison (cond, mode)))
6411 return reversed;
6412
6413 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6414 in it is being compared against certain values. Get the true and false
6415 comparisons and see if that says anything about the value of each arm. */
6416
6417 if (comparison_p
6418 && ((false_code = reversed_comparison_code (cond, NULL))
6419 != UNKNOWN)
6420 && REG_P (XEXP (cond, 0)))
6421 {
6422 HOST_WIDE_INT nzb;
6423 rtx from = XEXP (cond, 0);
6424 rtx true_val = XEXP (cond, 1);
6425 rtx false_val = true_val;
6426 int swapped = 0;
6427
6428 /* If FALSE_CODE is EQ, swap the codes and arms. */
6429
6430 if (false_code == EQ)
6431 {
6432 swapped = 1, true_code = EQ, false_code = NE;
6433 std::swap (true_rtx, false_rtx);
6434 }
6435
6436 scalar_int_mode from_mode;
6437 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6438 {
6439 /* If we are comparing against zero and the expression being
6440 tested has only a single bit that might be nonzero, that is
6441 its value when it is not equal to zero. Similarly if it is
6442 known to be -1 or 0. */
6443 if (true_code == EQ
6444 && true_val == const0_rtx
6445 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6446 {
6447 false_code = EQ;
6448 false_val = gen_int_mode (nzb, from_mode);
6449 }
6450 else if (true_code == EQ
6451 && true_val == const0_rtx
6452 && (num_sign_bit_copies (from, from_mode)
6453 == GET_MODE_PRECISION (from_mode)))
6454 {
6455 false_code = EQ;
6456 false_val = constm1_rtx;
6457 }
6458 }
6459
6460 /* Now simplify an arm if we know the value of the register in the
6461 branch and it is used in the arm. Be careful due to the potential
6462 of locally-shared RTL. */
6463
6464 if (reg_mentioned_p (from, true_rtx))
6465 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6466 from, true_val),
6467 pc_rtx, pc_rtx, 0, 0, 0);
6468 if (reg_mentioned_p (from, false_rtx))
6469 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6470 from, false_val),
6471 pc_rtx, pc_rtx, 0, 0, 0);
6472
6473 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6474 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6475
6476 true_rtx = XEXP (x, 1);
6477 false_rtx = XEXP (x, 2);
6478 true_code = GET_CODE (cond);
6479 }
6480
6481 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6482 reversed, do so to avoid needing two sets of patterns for
6483 subtract-and-branch insns. Similarly if we have a constant in the true
6484 arm, the false arm is the same as the first operand of the comparison, or
6485 the false arm is more complicated than the true arm. */
6486
6487 if (comparison_p
6488 && reversed_comparison_code (cond, NULL) != UNKNOWN
6489 && (true_rtx == pc_rtx
6490 || (CONSTANT_P (true_rtx)
6491 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6492 || true_rtx == const0_rtx
6493 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6494 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6495 && !OBJECT_P (false_rtx))
6496 || reg_mentioned_p (true_rtx, false_rtx)
6497 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6498 {
6499 true_code = reversed_comparison_code (cond, NULL);
6500 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6501 SUBST (XEXP (x, 1), false_rtx);
6502 SUBST (XEXP (x, 2), true_rtx);
6503
6504 std::swap (true_rtx, false_rtx);
6505 cond = XEXP (x, 0);
6506
6507 /* It is possible that the conditional has been simplified out. */
6508 true_code = GET_CODE (cond);
6509 comparison_p = COMPARISON_P (cond);
6510 }
6511
6512 /* If the two arms are identical, we don't need the comparison. */
6513
6514 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6515 return true_rtx;
6516
6517 /* Convert a == b ? b : a to "a". */
6518 if (true_code == EQ && ! side_effects_p (cond)
6519 && !HONOR_NANS (mode)
6520 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6521 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6522 return false_rtx;
6523 else if (true_code == NE && ! side_effects_p (cond)
6524 && !HONOR_NANS (mode)
6525 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6526 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6527 return true_rtx;
6528
6529 /* Look for cases where we have (abs x) or (neg (abs X)). */
6530
6531 if (GET_MODE_CLASS (mode) == MODE_INT
6532 && comparison_p
6533 && XEXP (cond, 1) == const0_rtx
6534 && GET_CODE (false_rtx) == NEG
6535 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6536 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6537 && ! side_effects_p (true_rtx))
6538 switch (true_code)
6539 {
6540 case GT:
6541 case GE:
6542 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6543 case LT:
6544 case LE:
6545 return
6546 simplify_gen_unary (NEG, mode,
6547 simplify_gen_unary (ABS, mode, true_rtx, mode),
6548 mode);
6549 default:
6550 break;
6551 }
6552
6553 /* Look for MIN or MAX. */
6554
6555 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6556 && comparison_p
6557 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6558 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6559 && ! side_effects_p (cond))
6560 switch (true_code)
6561 {
6562 case GE:
6563 case GT:
6564 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6565 case LE:
6566 case LT:
6567 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6568 case GEU:
6569 case GTU:
6570 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6571 case LEU:
6572 case LTU:
6573 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6574 default:
6575 break;
6576 }
6577
6578 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6579 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6580 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6581 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6582 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6583 neither 1 or -1, but it isn't worth checking for. */
6584
6585 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6586 && comparison_p
6587 && is_int_mode (mode, &int_mode)
6588 && ! side_effects_p (x))
6589 {
6590 rtx t = make_compound_operation (true_rtx, SET);
6591 rtx f = make_compound_operation (false_rtx, SET);
6592 rtx cond_op0 = XEXP (cond, 0);
6593 rtx cond_op1 = XEXP (cond, 1);
6594 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6595 scalar_int_mode m = int_mode;
6596 rtx z = 0, c1 = NULL_RTX;
6597
6598 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6599 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6600 || GET_CODE (t) == ASHIFT
6601 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6602 && rtx_equal_p (XEXP (t, 0), f))
6603 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6604
6605 /* If an identity-zero op is commutative, check whether there
6606 would be a match if we swapped the operands. */
6607 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6608 || GET_CODE (t) == XOR)
6609 && rtx_equal_p (XEXP (t, 1), f))
6610 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6611 else if (GET_CODE (t) == SIGN_EXTEND
6612 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6613 && (GET_CODE (XEXP (t, 0)) == PLUS
6614 || GET_CODE (XEXP (t, 0)) == MINUS
6615 || GET_CODE (XEXP (t, 0)) == IOR
6616 || GET_CODE (XEXP (t, 0)) == XOR
6617 || GET_CODE (XEXP (t, 0)) == ASHIFT
6618 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6619 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6620 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6621 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6622 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6623 && (num_sign_bit_copies (f, GET_MODE (f))
6624 > (unsigned int)
6625 (GET_MODE_PRECISION (int_mode)
6626 - GET_MODE_PRECISION (inner_mode))))
6627 {
6628 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6629 extend_op = SIGN_EXTEND;
6630 m = inner_mode;
6631 }
6632 else if (GET_CODE (t) == SIGN_EXTEND
6633 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6634 && (GET_CODE (XEXP (t, 0)) == PLUS
6635 || GET_CODE (XEXP (t, 0)) == IOR
6636 || GET_CODE (XEXP (t, 0)) == XOR)
6637 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6638 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6639 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6640 && (num_sign_bit_copies (f, GET_MODE (f))
6641 > (unsigned int)
6642 (GET_MODE_PRECISION (int_mode)
6643 - GET_MODE_PRECISION (inner_mode))))
6644 {
6645 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6646 extend_op = SIGN_EXTEND;
6647 m = inner_mode;
6648 }
6649 else if (GET_CODE (t) == ZERO_EXTEND
6650 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6651 && (GET_CODE (XEXP (t, 0)) == PLUS
6652 || GET_CODE (XEXP (t, 0)) == MINUS
6653 || GET_CODE (XEXP (t, 0)) == IOR
6654 || GET_CODE (XEXP (t, 0)) == XOR
6655 || GET_CODE (XEXP (t, 0)) == ASHIFT
6656 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6657 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6658 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6659 && HWI_COMPUTABLE_MODE_P (int_mode)
6660 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6661 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6662 && ((nonzero_bits (f, GET_MODE (f))
6663 & ~GET_MODE_MASK (inner_mode))
6664 == 0))
6665 {
6666 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6667 extend_op = ZERO_EXTEND;
6668 m = inner_mode;
6669 }
6670 else if (GET_CODE (t) == ZERO_EXTEND
6671 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6672 && (GET_CODE (XEXP (t, 0)) == PLUS
6673 || GET_CODE (XEXP (t, 0)) == IOR
6674 || GET_CODE (XEXP (t, 0)) == XOR)
6675 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6676 && HWI_COMPUTABLE_MODE_P (int_mode)
6677 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6678 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6679 && ((nonzero_bits (f, GET_MODE (f))
6680 & ~GET_MODE_MASK (inner_mode))
6681 == 0))
6682 {
6683 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6684 extend_op = ZERO_EXTEND;
6685 m = inner_mode;
6686 }
6687
6688 if (z)
6689 {
6690 machine_mode cm = m;
6691 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6692 && GET_MODE (c1) != VOIDmode)
6693 cm = GET_MODE (c1);
6694 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6695 cond_op0, cond_op1),
6696 pc_rtx, pc_rtx, 0, 0, 0);
6697 temp = simplify_gen_binary (MULT, cm, temp,
6698 simplify_gen_binary (MULT, cm, c1,
6699 const_true_rtx));
6700 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6701 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6702
6703 if (extend_op != UNKNOWN)
6704 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6705
6706 return temp;
6707 }
6708 }
6709
6710 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6711 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6712 negation of a single bit, we can convert this operation to a shift. We
6713 can actually do this more generally, but it doesn't seem worth it. */
6714
6715 if (true_code == NE
6716 && is_a <scalar_int_mode> (mode, &int_mode)
6717 && XEXP (cond, 1) == const0_rtx
6718 && false_rtx == const0_rtx
6719 && CONST_INT_P (true_rtx)
6720 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6721 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6722 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6723 == GET_MODE_PRECISION (int_mode))
6724 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6725 return
6726 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6727 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6728
6729 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6730 non-zero bit in A is C1. */
6731 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6732 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6733 && is_a <scalar_int_mode> (mode, &int_mode)
6734 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6735 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6736 == nonzero_bits (XEXP (cond, 0), inner_mode)
6737 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6738 {
6739 rtx val = XEXP (cond, 0);
6740 if (inner_mode == int_mode)
6741 return val;
6742 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6743 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6744 }
6745
6746 return x;
6747 }
6748
6749 /* Simplify X, a SET expression. Return the new expression. */
6750
6751 static rtx
simplify_set(rtx x)6752 simplify_set (rtx x)
6753 {
6754 rtx src = SET_SRC (x);
6755 rtx dest = SET_DEST (x);
6756 machine_mode mode
6757 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6758 rtx_insn *other_insn;
6759 rtx *cc_use;
6760 scalar_int_mode int_mode;
6761
6762 /* (set (pc) (return)) gets written as (return). */
6763 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6764 return src;
6765
6766 /* Now that we know for sure which bits of SRC we are using, see if we can
6767 simplify the expression for the object knowing that we only need the
6768 low-order bits. */
6769
6770 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6771 {
6772 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6773 SUBST (SET_SRC (x), src);
6774 }
6775
6776 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6777 the comparison result and try to simplify it unless we already have used
6778 undobuf.other_insn. */
6779 if ((GET_MODE_CLASS (mode) == MODE_CC
6780 || GET_CODE (src) == COMPARE
6781 || CC0_P (dest))
6782 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6783 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6784 && COMPARISON_P (*cc_use)
6785 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6786 {
6787 enum rtx_code old_code = GET_CODE (*cc_use);
6788 enum rtx_code new_code;
6789 rtx op0, op1, tmp;
6790 int other_changed = 0;
6791 rtx inner_compare = NULL_RTX;
6792 machine_mode compare_mode = GET_MODE (dest);
6793
6794 if (GET_CODE (src) == COMPARE)
6795 {
6796 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6797 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6798 {
6799 inner_compare = op0;
6800 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6801 }
6802 }
6803 else
6804 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6805
6806 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6807 op0, op1);
6808 if (!tmp)
6809 new_code = old_code;
6810 else if (!CONSTANT_P (tmp))
6811 {
6812 new_code = GET_CODE (tmp);
6813 op0 = XEXP (tmp, 0);
6814 op1 = XEXP (tmp, 1);
6815 }
6816 else
6817 {
6818 rtx pat = PATTERN (other_insn);
6819 undobuf.other_insn = other_insn;
6820 SUBST (*cc_use, tmp);
6821
6822 /* Attempt to simplify CC user. */
6823 if (GET_CODE (pat) == SET)
6824 {
6825 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6826 if (new_rtx != NULL_RTX)
6827 SUBST (SET_SRC (pat), new_rtx);
6828 }
6829
6830 /* Convert X into a no-op move. */
6831 SUBST (SET_DEST (x), pc_rtx);
6832 SUBST (SET_SRC (x), pc_rtx);
6833 return x;
6834 }
6835
6836 /* Simplify our comparison, if possible. */
6837 new_code = simplify_comparison (new_code, &op0, &op1);
6838
6839 #ifdef SELECT_CC_MODE
6840 /* If this machine has CC modes other than CCmode, check to see if we
6841 need to use a different CC mode here. */
6842 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6843 compare_mode = GET_MODE (op0);
6844 else if (inner_compare
6845 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6846 && new_code == old_code
6847 && op0 == XEXP (inner_compare, 0)
6848 && op1 == XEXP (inner_compare, 1))
6849 compare_mode = GET_MODE (inner_compare);
6850 else
6851 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6852
6853 /* If the mode changed, we have to change SET_DEST, the mode in the
6854 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6855 a hard register, just build new versions with the proper mode. If it
6856 is a pseudo, we lose unless it is only time we set the pseudo, in
6857 which case we can safely change its mode. */
6858 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6859 {
6860 if (can_change_dest_mode (dest, 0, compare_mode))
6861 {
6862 unsigned int regno = REGNO (dest);
6863 rtx new_dest;
6864
6865 if (regno < FIRST_PSEUDO_REGISTER)
6866 new_dest = gen_rtx_REG (compare_mode, regno);
6867 else
6868 {
6869 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6870 new_dest = regno_reg_rtx[regno];
6871 }
6872
6873 SUBST (SET_DEST (x), new_dest);
6874 SUBST (XEXP (*cc_use, 0), new_dest);
6875 other_changed = 1;
6876
6877 dest = new_dest;
6878 }
6879 }
6880 #endif /* SELECT_CC_MODE */
6881
6882 /* If the code changed, we have to build a new comparison in
6883 undobuf.other_insn. */
6884 if (new_code != old_code)
6885 {
6886 int other_changed_previously = other_changed;
6887 unsigned HOST_WIDE_INT mask;
6888 rtx old_cc_use = *cc_use;
6889
6890 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6891 dest, const0_rtx));
6892 other_changed = 1;
6893
6894 /* If the only change we made was to change an EQ into an NE or
6895 vice versa, OP0 has only one bit that might be nonzero, and OP1
6896 is zero, check if changing the user of the condition code will
6897 produce a valid insn. If it won't, we can keep the original code
6898 in that insn by surrounding our operation with an XOR. */
6899
6900 if (((old_code == NE && new_code == EQ)
6901 || (old_code == EQ && new_code == NE))
6902 && ! other_changed_previously && op1 == const0_rtx
6903 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6904 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6905 {
6906 rtx pat = PATTERN (other_insn), note = 0;
6907
6908 if ((recog_for_combine (&pat, other_insn, ¬e) < 0
6909 && ! check_asm_operands (pat)))
6910 {
6911 *cc_use = old_cc_use;
6912 other_changed = 0;
6913
6914 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6915 gen_int_mode (mask,
6916 GET_MODE (op0)));
6917 }
6918 }
6919 }
6920
6921 if (other_changed)
6922 undobuf.other_insn = other_insn;
6923
6924 /* Don't generate a compare of a CC with 0, just use that CC. */
6925 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6926 {
6927 SUBST (SET_SRC (x), op0);
6928 src = SET_SRC (x);
6929 }
6930 /* Otherwise, if we didn't previously have the same COMPARE we
6931 want, create it from scratch. */
6932 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6933 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6934 {
6935 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6936 src = SET_SRC (x);
6937 }
6938 }
6939 else
6940 {
6941 /* Get SET_SRC in a form where we have placed back any
6942 compound expressions. Then do the checks below. */
6943 src = make_compound_operation (src, SET);
6944 SUBST (SET_SRC (x), src);
6945 }
6946
6947 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6948 and X being a REG or (subreg (reg)), we may be able to convert this to
6949 (set (subreg:m2 x) (op)).
6950
6951 We can always do this if M1 is narrower than M2 because that means that
6952 we only care about the low bits of the result.
6953
6954 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6955 perform a narrower operation than requested since the high-order bits will
6956 be undefined. On machine where it is defined, this transformation is safe
6957 as long as M1 and M2 have the same number of words. */
6958
6959 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6960 && !OBJECT_P (SUBREG_REG (src))
6961 && (known_equal_after_align_up
6962 (GET_MODE_SIZE (GET_MODE (src)),
6963 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
6964 UNITS_PER_WORD))
6965 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6966 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6967 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6968 GET_MODE (SUBREG_REG (src)),
6969 GET_MODE (src)))
6970 && (REG_P (dest)
6971 || (GET_CODE (dest) == SUBREG
6972 && REG_P (SUBREG_REG (dest)))))
6973 {
6974 SUBST (SET_DEST (x),
6975 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6976 dest));
6977 SUBST (SET_SRC (x), SUBREG_REG (src));
6978
6979 src = SET_SRC (x), dest = SET_DEST (x);
6980 }
6981
6982 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6983 in SRC. */
6984 if (dest == cc0_rtx
6985 && partial_subreg_p (src)
6986 && subreg_lowpart_p (src))
6987 {
6988 rtx inner = SUBREG_REG (src);
6989 machine_mode inner_mode = GET_MODE (inner);
6990
6991 /* Here we make sure that we don't have a sign bit on. */
6992 if (val_signbit_known_clear_p (GET_MODE (src),
6993 nonzero_bits (inner, inner_mode)))
6994 {
6995 SUBST (SET_SRC (x), inner);
6996 src = SET_SRC (x);
6997 }
6998 }
6999
7000 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7001 would require a paradoxical subreg. Replace the subreg with a
7002 zero_extend to avoid the reload that would otherwise be required.
7003 Don't do this unless we have a scalar integer mode, otherwise the
7004 transformation is incorrect. */
7005
7006 enum rtx_code extend_op;
7007 if (paradoxical_subreg_p (src)
7008 && MEM_P (SUBREG_REG (src))
7009 && SCALAR_INT_MODE_P (GET_MODE (src))
7010 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7011 {
7012 SUBST (SET_SRC (x),
7013 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7014
7015 src = SET_SRC (x);
7016 }
7017
7018 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7019 are comparing an item known to be 0 or -1 against 0, use a logical
7020 operation instead. Check for one of the arms being an IOR of the other
7021 arm with some value. We compute three terms to be IOR'ed together. In
7022 practice, at most two will be nonzero. Then we do the IOR's. */
7023
7024 if (GET_CODE (dest) != PC
7025 && GET_CODE (src) == IF_THEN_ELSE
7026 && is_int_mode (GET_MODE (src), &int_mode)
7027 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7028 && XEXP (XEXP (src, 0), 1) == const0_rtx
7029 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7030 && (!HAVE_conditional_move
7031 || ! can_conditionally_move_p (int_mode))
7032 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7033 == GET_MODE_PRECISION (int_mode))
7034 && ! side_effects_p (src))
7035 {
7036 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7037 ? XEXP (src, 1) : XEXP (src, 2));
7038 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7039 ? XEXP (src, 2) : XEXP (src, 1));
7040 rtx term1 = const0_rtx, term2, term3;
7041
7042 if (GET_CODE (true_rtx) == IOR
7043 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7044 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7045 else if (GET_CODE (true_rtx) == IOR
7046 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7047 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7048 else if (GET_CODE (false_rtx) == IOR
7049 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7050 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7051 else if (GET_CODE (false_rtx) == IOR
7052 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7053 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7054
7055 term2 = simplify_gen_binary (AND, int_mode,
7056 XEXP (XEXP (src, 0), 0), true_rtx);
7057 term3 = simplify_gen_binary (AND, int_mode,
7058 simplify_gen_unary (NOT, int_mode,
7059 XEXP (XEXP (src, 0), 0),
7060 int_mode),
7061 false_rtx);
7062
7063 SUBST (SET_SRC (x),
7064 simplify_gen_binary (IOR, int_mode,
7065 simplify_gen_binary (IOR, int_mode,
7066 term1, term2),
7067 term3));
7068
7069 src = SET_SRC (x);
7070 }
7071
7072 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7073 whole thing fail. */
7074 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7075 return src;
7076 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7077 return dest;
7078 else
7079 /* Convert this into a field assignment operation, if possible. */
7080 return make_field_assignment (x);
7081 }
7082
7083 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7084 result. */
7085
7086 static rtx
simplify_logical(rtx x)7087 simplify_logical (rtx x)
7088 {
7089 rtx op0 = XEXP (x, 0);
7090 rtx op1 = XEXP (x, 1);
7091 scalar_int_mode mode;
7092
7093 switch (GET_CODE (x))
7094 {
7095 case AND:
7096 /* We can call simplify_and_const_int only if we don't lose
7097 any (sign) bits when converting INTVAL (op1) to
7098 "unsigned HOST_WIDE_INT". */
7099 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7100 && CONST_INT_P (op1)
7101 && (HWI_COMPUTABLE_MODE_P (mode)
7102 || INTVAL (op1) > 0))
7103 {
7104 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7105 if (GET_CODE (x) != AND)
7106 return x;
7107
7108 op0 = XEXP (x, 0);
7109 op1 = XEXP (x, 1);
7110 }
7111
7112 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7113 apply the distributive law and then the inverse distributive
7114 law to see if things simplify. */
7115 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7116 {
7117 rtx result = distribute_and_simplify_rtx (x, 0);
7118 if (result)
7119 return result;
7120 }
7121 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7122 {
7123 rtx result = distribute_and_simplify_rtx (x, 1);
7124 if (result)
7125 return result;
7126 }
7127 break;
7128
7129 case IOR:
7130 /* If we have (ior (and A B) C), apply the distributive law and then
7131 the inverse distributive law to see if things simplify. */
7132
7133 if (GET_CODE (op0) == AND)
7134 {
7135 rtx result = distribute_and_simplify_rtx (x, 0);
7136 if (result)
7137 return result;
7138 }
7139
7140 if (GET_CODE (op1) == AND)
7141 {
7142 rtx result = distribute_and_simplify_rtx (x, 1);
7143 if (result)
7144 return result;
7145 }
7146 break;
7147
7148 default:
7149 gcc_unreachable ();
7150 }
7151
7152 return x;
7153 }
7154
7155 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7156 operations" because they can be replaced with two more basic operations.
7157 ZERO_EXTEND is also considered "compound" because it can be replaced with
7158 an AND operation, which is simpler, though only one operation.
7159
7160 The function expand_compound_operation is called with an rtx expression
7161 and will convert it to the appropriate shifts and AND operations,
7162 simplifying at each stage.
7163
7164 The function make_compound_operation is called to convert an expression
7165 consisting of shifts and ANDs into the equivalent compound expression.
7166 It is the inverse of this function, loosely speaking. */
7167
7168 static rtx
expand_compound_operation(rtx x)7169 expand_compound_operation (rtx x)
7170 {
7171 unsigned HOST_WIDE_INT pos = 0, len;
7172 int unsignedp = 0;
7173 unsigned int modewidth;
7174 rtx tem;
7175 scalar_int_mode inner_mode;
7176
7177 switch (GET_CODE (x))
7178 {
7179 case ZERO_EXTEND:
7180 unsignedp = 1;
7181 /* FALLTHRU */
7182 case SIGN_EXTEND:
7183 /* We can't necessarily use a const_int for a multiword mode;
7184 it depends on implicitly extending the value.
7185 Since we don't know the right way to extend it,
7186 we can't tell whether the implicit way is right.
7187
7188 Even for a mode that is no wider than a const_int,
7189 we can't win, because we need to sign extend one of its bits through
7190 the rest of it, and we don't know which bit. */
7191 if (CONST_INT_P (XEXP (x, 0)))
7192 return x;
7193
7194 /* Reject modes that aren't scalar integers because turning vector
7195 or complex modes into shifts causes problems. */
7196 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7197 return x;
7198
7199 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7200 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7201 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7202 reloaded. If not for that, MEM's would very rarely be safe.
7203
7204 Reject modes bigger than a word, because we might not be able
7205 to reference a two-register group starting with an arbitrary register
7206 (and currently gen_lowpart might crash for a SUBREG). */
7207
7208 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7209 return x;
7210
7211 len = GET_MODE_PRECISION (inner_mode);
7212 /* If the inner object has VOIDmode (the only way this can happen
7213 is if it is an ASM_OPERANDS), we can't do anything since we don't
7214 know how much masking to do. */
7215 if (len == 0)
7216 return x;
7217
7218 break;
7219
7220 case ZERO_EXTRACT:
7221 unsignedp = 1;
7222
7223 /* fall through */
7224
7225 case SIGN_EXTRACT:
7226 /* If the operand is a CLOBBER, just return it. */
7227 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7228 return XEXP (x, 0);
7229
7230 if (!CONST_INT_P (XEXP (x, 1))
7231 || !CONST_INT_P (XEXP (x, 2)))
7232 return x;
7233
7234 /* Reject modes that aren't scalar integers because turning vector
7235 or complex modes into shifts causes problems. */
7236 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7237 return x;
7238
7239 len = INTVAL (XEXP (x, 1));
7240 pos = INTVAL (XEXP (x, 2));
7241
7242 /* This should stay within the object being extracted, fail otherwise. */
7243 if (len + pos > GET_MODE_PRECISION (inner_mode))
7244 return x;
7245
7246 if (BITS_BIG_ENDIAN)
7247 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7248
7249 break;
7250
7251 default:
7252 return x;
7253 }
7254
7255 /* We've rejected non-scalar operations by now. */
7256 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7257
7258 /* Convert sign extension to zero extension, if we know that the high
7259 bit is not set, as this is easier to optimize. It will be converted
7260 back to cheaper alternative in make_extraction. */
7261 if (GET_CODE (x) == SIGN_EXTEND
7262 && HWI_COMPUTABLE_MODE_P (mode)
7263 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7264 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7265 == 0))
7266 {
7267 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7268 rtx temp2 = expand_compound_operation (temp);
7269
7270 /* Make sure this is a profitable operation. */
7271 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7272 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7273 return temp2;
7274 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7275 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7276 return temp;
7277 else
7278 return x;
7279 }
7280
7281 /* We can optimize some special cases of ZERO_EXTEND. */
7282 if (GET_CODE (x) == ZERO_EXTEND)
7283 {
7284 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7285 know that the last value didn't have any inappropriate bits
7286 set. */
7287 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7288 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7289 && HWI_COMPUTABLE_MODE_P (mode)
7290 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7291 & ~GET_MODE_MASK (inner_mode)) == 0)
7292 return XEXP (XEXP (x, 0), 0);
7293
7294 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7295 if (GET_CODE (XEXP (x, 0)) == SUBREG
7296 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7297 && subreg_lowpart_p (XEXP (x, 0))
7298 && HWI_COMPUTABLE_MODE_P (mode)
7299 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7300 & ~GET_MODE_MASK (inner_mode)) == 0)
7301 return SUBREG_REG (XEXP (x, 0));
7302
7303 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7304 is a comparison and STORE_FLAG_VALUE permits. This is like
7305 the first case, but it works even when MODE is larger
7306 than HOST_WIDE_INT. */
7307 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7308 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7309 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7310 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7311 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7312 return XEXP (XEXP (x, 0), 0);
7313
7314 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7315 if (GET_CODE (XEXP (x, 0)) == SUBREG
7316 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7317 && subreg_lowpart_p (XEXP (x, 0))
7318 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7319 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7320 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7321 return SUBREG_REG (XEXP (x, 0));
7322
7323 }
7324
7325 /* If we reach here, we want to return a pair of shifts. The inner
7326 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7327 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7328 logical depending on the value of UNSIGNEDP.
7329
7330 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7331 converted into an AND of a shift.
7332
7333 We must check for the case where the left shift would have a negative
7334 count. This can happen in a case like (x >> 31) & 255 on machines
7335 that can't shift by a constant. On those machines, we would first
7336 combine the shift with the AND to produce a variable-position
7337 extraction. Then the constant of 31 would be substituted in
7338 to produce such a position. */
7339
7340 modewidth = GET_MODE_PRECISION (mode);
7341 if (modewidth >= pos + len)
7342 {
7343 tem = gen_lowpart (mode, XEXP (x, 0));
7344 if (!tem || GET_CODE (tem) == CLOBBER)
7345 return x;
7346 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7347 tem, modewidth - pos - len);
7348 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7349 mode, tem, modewidth - len);
7350 }
7351 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7352 {
7353 tem = simplify_shift_const (NULL_RTX, LSHIFTRT, inner_mode,
7354 XEXP (x, 0), pos);
7355 tem = gen_lowpart (mode, tem);
7356 if (!tem || GET_CODE (tem) == CLOBBER)
7357 return x;
7358 tem = simplify_and_const_int (NULL_RTX, mode, tem,
7359 (HOST_WIDE_INT_1U << len) - 1);
7360 }
7361 else
7362 /* Any other cases we can't handle. */
7363 return x;
7364
7365 /* If we couldn't do this for some reason, return the original
7366 expression. */
7367 if (GET_CODE (tem) == CLOBBER)
7368 return x;
7369
7370 return tem;
7371 }
7372
7373 /* X is a SET which contains an assignment of one object into
7374 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7375 or certain SUBREGS). If possible, convert it into a series of
7376 logical operations.
7377
7378 We half-heartedly support variable positions, but do not at all
7379 support variable lengths. */
7380
7381 static const_rtx
expand_field_assignment(const_rtx x)7382 expand_field_assignment (const_rtx x)
7383 {
7384 rtx inner;
7385 rtx pos; /* Always counts from low bit. */
7386 int len, inner_len;
7387 rtx mask, cleared, masked;
7388 scalar_int_mode compute_mode;
7389
7390 /* Loop until we find something we can't simplify. */
7391 while (1)
7392 {
7393 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7394 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7395 {
7396 rtx x0 = XEXP (SET_DEST (x), 0);
7397 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7398 break;
7399 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7400 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7401 MAX_MODE_INT);
7402 }
7403 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7404 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7405 {
7406 inner = XEXP (SET_DEST (x), 0);
7407 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7408 break;
7409
7410 len = INTVAL (XEXP (SET_DEST (x), 1));
7411 pos = XEXP (SET_DEST (x), 2);
7412
7413 /* A constant position should stay within the width of INNER. */
7414 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7415 break;
7416
7417 if (BITS_BIG_ENDIAN)
7418 {
7419 if (CONST_INT_P (pos))
7420 pos = GEN_INT (inner_len - len - INTVAL (pos));
7421 else if (GET_CODE (pos) == MINUS
7422 && CONST_INT_P (XEXP (pos, 1))
7423 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7424 /* If position is ADJUST - X, new position is X. */
7425 pos = XEXP (pos, 0);
7426 else
7427 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7428 gen_int_mode (inner_len - len,
7429 GET_MODE (pos)),
7430 pos);
7431 }
7432 }
7433
7434 /* If the destination is a subreg that overwrites the whole of the inner
7435 register, we can move the subreg to the source. */
7436 else if (GET_CODE (SET_DEST (x)) == SUBREG
7437 /* We need SUBREGs to compute nonzero_bits properly. */
7438 && nonzero_sign_valid
7439 && !read_modify_subreg_p (SET_DEST (x)))
7440 {
7441 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7442 gen_lowpart
7443 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7444 SET_SRC (x)));
7445 continue;
7446 }
7447 else
7448 break;
7449
7450 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7451 inner = SUBREG_REG (inner);
7452
7453 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7454 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7455 {
7456 /* Don't do anything for vector or complex integral types. */
7457 if (! FLOAT_MODE_P (GET_MODE (inner)))
7458 break;
7459
7460 /* Try to find an integral mode to pun with. */
7461 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7462 .exists (&compute_mode))
7463 break;
7464
7465 inner = gen_lowpart (compute_mode, inner);
7466 }
7467
7468 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7469 if (len >= HOST_BITS_PER_WIDE_INT)
7470 break;
7471
7472 /* Don't try to compute in too wide unsupported modes. */
7473 if (!targetm.scalar_mode_supported_p (compute_mode))
7474 break;
7475
7476 /* Now compute the equivalent expression. Make a copy of INNER
7477 for the SET_DEST in case it is a MEM into which we will substitute;
7478 we don't want shared RTL in that case. */
7479 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7480 compute_mode);
7481 cleared = simplify_gen_binary (AND, compute_mode,
7482 simplify_gen_unary (NOT, compute_mode,
7483 simplify_gen_binary (ASHIFT,
7484 compute_mode,
7485 mask, pos),
7486 compute_mode),
7487 inner);
7488 masked = simplify_gen_binary (ASHIFT, compute_mode,
7489 simplify_gen_binary (
7490 AND, compute_mode,
7491 gen_lowpart (compute_mode, SET_SRC (x)),
7492 mask),
7493 pos);
7494
7495 x = gen_rtx_SET (copy_rtx (inner),
7496 simplify_gen_binary (IOR, compute_mode,
7497 cleared, masked));
7498 }
7499
7500 return x;
7501 }
7502
7503 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7504 it is an RTX that represents the (variable) starting position; otherwise,
7505 POS is the (constant) starting bit position. Both are counted from the LSB.
7506
7507 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7508
7509 IN_DEST is nonzero if this is a reference in the destination of a SET.
7510 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7511 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7512 be used.
7513
7514 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7515 ZERO_EXTRACT should be built even for bits starting at bit 0.
7516
7517 MODE is the desired mode of the result (if IN_DEST == 0).
7518
7519 The result is an RTX for the extraction or NULL_RTX if the target
7520 can't handle it. */
7521
7522 static rtx
make_extraction(machine_mode mode,rtx inner,HOST_WIDE_INT pos,rtx pos_rtx,unsigned HOST_WIDE_INT len,int unsignedp,int in_dest,int in_compare)7523 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7524 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7525 int in_dest, int in_compare)
7526 {
7527 /* This mode describes the size of the storage area
7528 to fetch the overall value from. Within that, we
7529 ignore the POS lowest bits, etc. */
7530 machine_mode is_mode = GET_MODE (inner);
7531 machine_mode inner_mode;
7532 scalar_int_mode wanted_inner_mode;
7533 scalar_int_mode wanted_inner_reg_mode = word_mode;
7534 scalar_int_mode pos_mode = word_mode;
7535 machine_mode extraction_mode = word_mode;
7536 rtx new_rtx = 0;
7537 rtx orig_pos_rtx = pos_rtx;
7538 HOST_WIDE_INT orig_pos;
7539
7540 if (pos_rtx && CONST_INT_P (pos_rtx))
7541 pos = INTVAL (pos_rtx), pos_rtx = 0;
7542
7543 if (GET_CODE (inner) == SUBREG
7544 && subreg_lowpart_p (inner)
7545 && (paradoxical_subreg_p (inner)
7546 /* If trying or potentionally trying to extract
7547 bits outside of is_mode, don't look through
7548 non-paradoxical SUBREGs. See PR82192. */
7549 || (pos_rtx == NULL_RTX
7550 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7551 {
7552 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7553 consider just the QI as the memory to extract from.
7554 The subreg adds or removes high bits; its mode is
7555 irrelevant to the meaning of this extraction,
7556 since POS and LEN count from the lsb. */
7557 if (MEM_P (SUBREG_REG (inner)))
7558 is_mode = GET_MODE (SUBREG_REG (inner));
7559 inner = SUBREG_REG (inner);
7560 }
7561 else if (GET_CODE (inner) == ASHIFT
7562 && CONST_INT_P (XEXP (inner, 1))
7563 && pos_rtx == 0 && pos == 0
7564 && len > UINTVAL (XEXP (inner, 1)))
7565 {
7566 /* We're extracting the least significant bits of an rtx
7567 (ashift X (const_int C)), where LEN > C. Extract the
7568 least significant (LEN - C) bits of X, giving an rtx
7569 whose mode is MODE, then shift it left C times. */
7570 new_rtx = make_extraction (mode, XEXP (inner, 0),
7571 0, 0, len - INTVAL (XEXP (inner, 1)),
7572 unsignedp, in_dest, in_compare);
7573 if (new_rtx != 0)
7574 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7575 }
7576 else if (GET_CODE (inner) == TRUNCATE
7577 /* If trying or potentionally trying to extract
7578 bits outside of is_mode, don't look through
7579 TRUNCATE. See PR82192. */
7580 && pos_rtx == NULL_RTX
7581 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7582 inner = XEXP (inner, 0);
7583
7584 inner_mode = GET_MODE (inner);
7585
7586 /* See if this can be done without an extraction. We never can if the
7587 width of the field is not the same as that of some integer mode. For
7588 registers, we can only avoid the extraction if the position is at the
7589 low-order bit and this is either not in the destination or we have the
7590 appropriate STRICT_LOW_PART operation available.
7591
7592 For MEM, we can avoid an extract if the field starts on an appropriate
7593 boundary and we can change the mode of the memory reference. */
7594
7595 scalar_int_mode tmode;
7596 if (int_mode_for_size (len, 1).exists (&tmode)
7597 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7598 && !MEM_P (inner)
7599 && (pos == 0 || REG_P (inner))
7600 && (inner_mode == tmode
7601 || !REG_P (inner)
7602 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7603 || reg_truncated_to_mode (tmode, inner))
7604 && (! in_dest
7605 || (REG_P (inner)
7606 && have_insn_for (STRICT_LOW_PART, tmode))))
7607 || (MEM_P (inner) && pos_rtx == 0
7608 && (pos
7609 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7610 : BITS_PER_UNIT)) == 0
7611 /* We can't do this if we are widening INNER_MODE (it
7612 may not be aligned, for one thing). */
7613 && !paradoxical_subreg_p (tmode, inner_mode)
7614 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7615 && (inner_mode == tmode
7616 || (! mode_dependent_address_p (XEXP (inner, 0),
7617 MEM_ADDR_SPACE (inner))
7618 && ! MEM_VOLATILE_P (inner))))))
7619 {
7620 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7621 field. If the original and current mode are the same, we need not
7622 adjust the offset. Otherwise, we do if bytes big endian.
7623
7624 If INNER is not a MEM, get a piece consisting of just the field
7625 of interest (in this case POS % BITS_PER_WORD must be 0). */
7626
7627 if (MEM_P (inner))
7628 {
7629 poly_int64 offset;
7630
7631 /* POS counts from lsb, but make OFFSET count in memory order. */
7632 if (BYTES_BIG_ENDIAN)
7633 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7634 - len - pos);
7635 else
7636 offset = pos / BITS_PER_UNIT;
7637
7638 new_rtx = adjust_address_nv (inner, tmode, offset);
7639 }
7640 else if (REG_P (inner))
7641 {
7642 if (tmode != inner_mode)
7643 {
7644 /* We can't call gen_lowpart in a DEST since we
7645 always want a SUBREG (see below) and it would sometimes
7646 return a new hard register. */
7647 if (pos || in_dest)
7648 {
7649 poly_uint64 offset
7650 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7651
7652 /* Avoid creating invalid subregs, for example when
7653 simplifying (x>>32)&255. */
7654 if (!validate_subreg (tmode, inner_mode, inner, offset))
7655 return NULL_RTX;
7656
7657 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7658 }
7659 else
7660 new_rtx = gen_lowpart (tmode, inner);
7661 }
7662 else
7663 new_rtx = inner;
7664 }
7665 else
7666 new_rtx = force_to_mode (inner, tmode,
7667 len >= HOST_BITS_PER_WIDE_INT
7668 ? HOST_WIDE_INT_M1U
7669 : (HOST_WIDE_INT_1U << len) - 1, 0);
7670
7671 /* If this extraction is going into the destination of a SET,
7672 make a STRICT_LOW_PART unless we made a MEM. */
7673
7674 if (in_dest)
7675 return (MEM_P (new_rtx) ? new_rtx
7676 : (GET_CODE (new_rtx) != SUBREG
7677 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7678 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7679
7680 if (mode == tmode)
7681 return new_rtx;
7682
7683 if (CONST_SCALAR_INT_P (new_rtx))
7684 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7685 mode, new_rtx, tmode);
7686
7687 /* If we know that no extraneous bits are set, and that the high
7688 bit is not set, convert the extraction to the cheaper of
7689 sign and zero extension, that are equivalent in these cases. */
7690 if (flag_expensive_optimizations
7691 && (HWI_COMPUTABLE_MODE_P (tmode)
7692 && ((nonzero_bits (new_rtx, tmode)
7693 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7694 == 0)))
7695 {
7696 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7697 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7698
7699 /* Prefer ZERO_EXTENSION, since it gives more information to
7700 backends. */
7701 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7702 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7703 return temp;
7704 return temp1;
7705 }
7706
7707 /* Otherwise, sign- or zero-extend unless we already are in the
7708 proper mode. */
7709
7710 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7711 mode, new_rtx));
7712 }
7713
7714 /* Unless this is a COMPARE or we have a funny memory reference,
7715 don't do anything with zero-extending field extracts starting at
7716 the low-order bit since they are simple AND operations. */
7717 if (pos_rtx == 0 && pos == 0 && ! in_dest
7718 && ! in_compare && unsignedp)
7719 return 0;
7720
7721 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7722 if the position is not a constant and the length is not 1. In all
7723 other cases, we would only be going outside our object in cases when
7724 an original shift would have been undefined. */
7725 if (MEM_P (inner)
7726 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7727 || (pos_rtx != 0 && len != 1)))
7728 return 0;
7729
7730 enum extraction_pattern pattern = (in_dest ? EP_insv
7731 : unsignedp ? EP_extzv : EP_extv);
7732
7733 /* If INNER is not from memory, we want it to have the mode of a register
7734 extraction pattern's structure operand, or word_mode if there is no
7735 such pattern. The same applies to extraction_mode and pos_mode
7736 and their respective operands.
7737
7738 For memory, assume that the desired extraction_mode and pos_mode
7739 are the same as for a register operation, since at present we don't
7740 have named patterns for aligned memory structures. */
7741 struct extraction_insn insn;
7742 unsigned int inner_size;
7743 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7744 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7745 {
7746 wanted_inner_reg_mode = insn.struct_mode.require ();
7747 pos_mode = insn.pos_mode;
7748 extraction_mode = insn.field_mode;
7749 }
7750
7751 /* Never narrow an object, since that might not be safe. */
7752
7753 if (mode != VOIDmode
7754 && partial_subreg_p (extraction_mode, mode))
7755 extraction_mode = mode;
7756
7757 /* Punt if len is too large for extraction_mode. */
7758 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7759 return NULL_RTX;
7760
7761 if (!MEM_P (inner))
7762 wanted_inner_mode = wanted_inner_reg_mode;
7763 else
7764 {
7765 /* Be careful not to go beyond the extracted object and maintain the
7766 natural alignment of the memory. */
7767 wanted_inner_mode = smallest_int_mode_for_size (len);
7768 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7769 > GET_MODE_BITSIZE (wanted_inner_mode))
7770 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7771 }
7772
7773 orig_pos = pos;
7774
7775 if (BITS_BIG_ENDIAN)
7776 {
7777 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7778 BITS_BIG_ENDIAN style. If position is constant, compute new
7779 position. Otherwise, build subtraction.
7780 Note that POS is relative to the mode of the original argument.
7781 If it's a MEM we need to recompute POS relative to that.
7782 However, if we're extracting from (or inserting into) a register,
7783 we want to recompute POS relative to wanted_inner_mode. */
7784 int width;
7785 if (!MEM_P (inner))
7786 width = GET_MODE_BITSIZE (wanted_inner_mode);
7787 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7788 return NULL_RTX;
7789
7790 if (pos_rtx == 0)
7791 pos = width - len - pos;
7792 else
7793 pos_rtx
7794 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7795 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7796 pos_rtx);
7797 /* POS may be less than 0 now, but we check for that below.
7798 Note that it can only be less than 0 if !MEM_P (inner). */
7799 }
7800
7801 /* If INNER has a wider mode, and this is a constant extraction, try to
7802 make it smaller and adjust the byte to point to the byte containing
7803 the value. */
7804 if (wanted_inner_mode != VOIDmode
7805 && inner_mode != wanted_inner_mode
7806 && ! pos_rtx
7807 && partial_subreg_p (wanted_inner_mode, is_mode)
7808 && MEM_P (inner)
7809 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7810 && ! MEM_VOLATILE_P (inner))
7811 {
7812 poly_int64 offset = 0;
7813
7814 /* The computations below will be correct if the machine is big
7815 endian in both bits and bytes or little endian in bits and bytes.
7816 If it is mixed, we must adjust. */
7817
7818 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7819 adjust OFFSET to compensate. */
7820 if (BYTES_BIG_ENDIAN
7821 && paradoxical_subreg_p (is_mode, inner_mode))
7822 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7823
7824 /* We can now move to the desired byte. */
7825 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7826 * GET_MODE_SIZE (wanted_inner_mode);
7827 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7828
7829 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7830 && is_mode != wanted_inner_mode)
7831 offset = (GET_MODE_SIZE (is_mode)
7832 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7833
7834 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7835 }
7836
7837 /* If INNER is not memory, get it into the proper mode. If we are changing
7838 its mode, POS must be a constant and smaller than the size of the new
7839 mode. */
7840 else if (!MEM_P (inner))
7841 {
7842 /* On the LHS, don't create paradoxical subregs implicitely truncating
7843 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7844 if (in_dest
7845 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7846 wanted_inner_mode))
7847 return NULL_RTX;
7848
7849 if (GET_MODE (inner) != wanted_inner_mode
7850 && (pos_rtx != 0
7851 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7852 return NULL_RTX;
7853
7854 if (orig_pos < 0)
7855 return NULL_RTX;
7856
7857 inner = force_to_mode (inner, wanted_inner_mode,
7858 pos_rtx
7859 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7860 ? HOST_WIDE_INT_M1U
7861 : (((HOST_WIDE_INT_1U << len) - 1)
7862 << orig_pos),
7863 0);
7864 }
7865
7866 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7867 have to zero extend. Otherwise, we can just use a SUBREG.
7868
7869 We dealt with constant rtxes earlier, so pos_rtx cannot
7870 have VOIDmode at this point. */
7871 if (pos_rtx != 0
7872 && (GET_MODE_SIZE (pos_mode)
7873 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7874 {
7875 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7876 GET_MODE (pos_rtx));
7877
7878 /* If we know that no extraneous bits are set, and that the high
7879 bit is not set, convert extraction to cheaper one - either
7880 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7881 cases. */
7882 if (flag_expensive_optimizations
7883 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7884 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7885 & ~(((unsigned HOST_WIDE_INT)
7886 GET_MODE_MASK (GET_MODE (pos_rtx)))
7887 >> 1))
7888 == 0)))
7889 {
7890 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7891 GET_MODE (pos_rtx));
7892
7893 /* Prefer ZERO_EXTENSION, since it gives more information to
7894 backends. */
7895 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7896 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7897 temp = temp1;
7898 }
7899 pos_rtx = temp;
7900 }
7901
7902 /* Make POS_RTX unless we already have it and it is correct. If we don't
7903 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7904 be a CONST_INT. */
7905 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7906 pos_rtx = orig_pos_rtx;
7907
7908 else if (pos_rtx == 0)
7909 pos_rtx = GEN_INT (pos);
7910
7911 /* Make the required operation. See if we can use existing rtx. */
7912 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7913 extraction_mode, inner, GEN_INT (len), pos_rtx);
7914 if (! in_dest)
7915 new_rtx = gen_lowpart (mode, new_rtx);
7916
7917 return new_rtx;
7918 }
7919
7920 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7921 can be commuted with any other operations in X. Return X without
7922 that shift if so. */
7923
7924 static rtx
extract_left_shift(scalar_int_mode mode,rtx x,int count)7925 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7926 {
7927 enum rtx_code code = GET_CODE (x);
7928 rtx tem;
7929
7930 switch (code)
7931 {
7932 case ASHIFT:
7933 /* This is the shift itself. If it is wide enough, we will return
7934 either the value being shifted if the shift count is equal to
7935 COUNT or a shift for the difference. */
7936 if (CONST_INT_P (XEXP (x, 1))
7937 && INTVAL (XEXP (x, 1)) >= count)
7938 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7939 INTVAL (XEXP (x, 1)) - count);
7940 break;
7941
7942 case NEG: case NOT:
7943 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7944 return simplify_gen_unary (code, mode, tem, mode);
7945
7946 break;
7947
7948 case PLUS: case IOR: case XOR: case AND:
7949 /* If we can safely shift this constant and we find the inner shift,
7950 make a new operation. */
7951 if (CONST_INT_P (XEXP (x, 1))
7952 && (UINTVAL (XEXP (x, 1))
7953 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7954 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7955 {
7956 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7957 return simplify_gen_binary (code, mode, tem,
7958 gen_int_mode (val, mode));
7959 }
7960 break;
7961
7962 default:
7963 break;
7964 }
7965
7966 return 0;
7967 }
7968
7969 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7970 level of the expression and MODE is its mode. IN_CODE is as for
7971 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7972 that should be used when recursing on operands of *X_PTR.
7973
7974 There are two possible actions:
7975
7976 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7977 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7978
7979 - Return a new rtx, which the caller returns directly. */
7980
7981 static rtx
make_compound_operation_int(scalar_int_mode mode,rtx * x_ptr,enum rtx_code in_code,enum rtx_code * next_code_ptr)7982 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7983 enum rtx_code in_code,
7984 enum rtx_code *next_code_ptr)
7985 {
7986 rtx x = *x_ptr;
7987 enum rtx_code next_code = *next_code_ptr;
7988 enum rtx_code code = GET_CODE (x);
7989 int mode_width = GET_MODE_PRECISION (mode);
7990 rtx rhs, lhs;
7991 rtx new_rtx = 0;
7992 int i;
7993 rtx tem;
7994 scalar_int_mode inner_mode;
7995 bool equality_comparison = false;
7996
7997 if (in_code == EQ)
7998 {
7999 equality_comparison = true;
8000 in_code = COMPARE;
8001 }
8002
8003 /* Process depending on the code of this operation. If NEW is set
8004 nonzero, it will be returned. */
8005
8006 switch (code)
8007 {
8008 case ASHIFT:
8009 /* Convert shifts by constants into multiplications if inside
8010 an address. */
8011 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8012 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8013 && INTVAL (XEXP (x, 1)) >= 0)
8014 {
8015 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8016 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8017
8018 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8019 if (GET_CODE (new_rtx) == NEG)
8020 {
8021 new_rtx = XEXP (new_rtx, 0);
8022 multval = -multval;
8023 }
8024 multval = trunc_int_for_mode (multval, mode);
8025 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8026 }
8027 break;
8028
8029 case PLUS:
8030 lhs = XEXP (x, 0);
8031 rhs = XEXP (x, 1);
8032 lhs = make_compound_operation (lhs, next_code);
8033 rhs = make_compound_operation (rhs, next_code);
8034 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8035 {
8036 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8037 XEXP (lhs, 1));
8038 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8039 }
8040 else if (GET_CODE (lhs) == MULT
8041 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8042 {
8043 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8044 simplify_gen_unary (NEG, mode,
8045 XEXP (lhs, 1),
8046 mode));
8047 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8048 }
8049 else
8050 {
8051 SUBST (XEXP (x, 0), lhs);
8052 SUBST (XEXP (x, 1), rhs);
8053 }
8054 maybe_swap_commutative_operands (x);
8055 return x;
8056
8057 case MINUS:
8058 lhs = XEXP (x, 0);
8059 rhs = XEXP (x, 1);
8060 lhs = make_compound_operation (lhs, next_code);
8061 rhs = make_compound_operation (rhs, next_code);
8062 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8063 {
8064 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8065 XEXP (rhs, 1));
8066 return simplify_gen_binary (PLUS, mode, tem, lhs);
8067 }
8068 else if (GET_CODE (rhs) == MULT
8069 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8070 {
8071 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8072 simplify_gen_unary (NEG, mode,
8073 XEXP (rhs, 1),
8074 mode));
8075 return simplify_gen_binary (PLUS, mode, tem, lhs);
8076 }
8077 else
8078 {
8079 SUBST (XEXP (x, 0), lhs);
8080 SUBST (XEXP (x, 1), rhs);
8081 return x;
8082 }
8083
8084 case AND:
8085 /* If the second operand is not a constant, we can't do anything
8086 with it. */
8087 if (!CONST_INT_P (XEXP (x, 1)))
8088 break;
8089
8090 /* If the constant is a power of two minus one and the first operand
8091 is a logical right shift, make an extraction. */
8092 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8093 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8094 {
8095 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8096 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8097 i, 1, 0, in_code == COMPARE);
8098 }
8099
8100 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8101 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8102 && subreg_lowpart_p (XEXP (x, 0))
8103 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8104 &inner_mode)
8105 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8106 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8107 {
8108 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8109 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8110 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8111 XEXP (inner_x0, 1),
8112 i, 1, 0, in_code == COMPARE);
8113
8114 /* If we narrowed the mode when dropping the subreg, then we lose. */
8115 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8116 new_rtx = NULL;
8117
8118 /* If that didn't give anything, see if the AND simplifies on
8119 its own. */
8120 if (!new_rtx && i >= 0)
8121 {
8122 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8123 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8124 0, in_code == COMPARE);
8125 }
8126 }
8127 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8128 else if ((GET_CODE (XEXP (x, 0)) == XOR
8129 || GET_CODE (XEXP (x, 0)) == IOR)
8130 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8131 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8132 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8133 {
8134 /* Apply the distributive law, and then try to make extractions. */
8135 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8136 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8137 XEXP (x, 1)),
8138 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8139 XEXP (x, 1)));
8140 new_rtx = make_compound_operation (new_rtx, in_code);
8141 }
8142
8143 /* If we are have (and (rotate X C) M) and C is larger than the number
8144 of bits in M, this is an extraction. */
8145
8146 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8147 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8148 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8149 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8150 {
8151 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8152 new_rtx = make_extraction (mode, new_rtx,
8153 (GET_MODE_PRECISION (mode)
8154 - INTVAL (XEXP (XEXP (x, 0), 1))),
8155 NULL_RTX, i, 1, 0, in_code == COMPARE);
8156 }
8157
8158 /* On machines without logical shifts, if the operand of the AND is
8159 a logical shift and our mask turns off all the propagated sign
8160 bits, we can replace the logical shift with an arithmetic shift. */
8161 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8162 && !have_insn_for (LSHIFTRT, mode)
8163 && have_insn_for (ASHIFTRT, mode)
8164 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8165 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8166 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8167 && mode_width <= HOST_BITS_PER_WIDE_INT)
8168 {
8169 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8170
8171 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8172 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8173 SUBST (XEXP (x, 0),
8174 gen_rtx_ASHIFTRT (mode,
8175 make_compound_operation (XEXP (XEXP (x,
8176 0),
8177 0),
8178 next_code),
8179 XEXP (XEXP (x, 0), 1)));
8180 }
8181
8182 /* If the constant is one less than a power of two, this might be
8183 representable by an extraction even if no shift is present.
8184 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8185 we are in a COMPARE. */
8186 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8187 new_rtx = make_extraction (mode,
8188 make_compound_operation (XEXP (x, 0),
8189 next_code),
8190 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8191
8192 /* If we are in a comparison and this is an AND with a power of two,
8193 convert this into the appropriate bit extract. */
8194 else if (in_code == COMPARE
8195 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8196 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8197 new_rtx = make_extraction (mode,
8198 make_compound_operation (XEXP (x, 0),
8199 next_code),
8200 i, NULL_RTX, 1, 1, 0, 1);
8201
8202 /* If the one operand is a paradoxical subreg of a register or memory and
8203 the constant (limited to the smaller mode) has only zero bits where
8204 the sub expression has known zero bits, this can be expressed as
8205 a zero_extend. */
8206 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8207 {
8208 rtx sub;
8209
8210 sub = XEXP (XEXP (x, 0), 0);
8211 machine_mode sub_mode = GET_MODE (sub);
8212 int sub_width;
8213 if ((REG_P (sub) || MEM_P (sub))
8214 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8215 && sub_width < mode_width)
8216 {
8217 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8218 unsigned HOST_WIDE_INT mask;
8219
8220 /* original AND constant with all the known zero bits set */
8221 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8222 if ((mask & mode_mask) == mode_mask)
8223 {
8224 new_rtx = make_compound_operation (sub, next_code);
8225 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8226 1, 0, in_code == COMPARE);
8227 }
8228 }
8229 }
8230
8231 break;
8232
8233 case LSHIFTRT:
8234 /* If the sign bit is known to be zero, replace this with an
8235 arithmetic shift. */
8236 if (have_insn_for (ASHIFTRT, mode)
8237 && ! have_insn_for (LSHIFTRT, mode)
8238 && mode_width <= HOST_BITS_PER_WIDE_INT
8239 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8240 {
8241 new_rtx = gen_rtx_ASHIFTRT (mode,
8242 make_compound_operation (XEXP (x, 0),
8243 next_code),
8244 XEXP (x, 1));
8245 break;
8246 }
8247
8248 /* fall through */
8249
8250 case ASHIFTRT:
8251 lhs = XEXP (x, 0);
8252 rhs = XEXP (x, 1);
8253
8254 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8255 this is a SIGN_EXTRACT. */
8256 if (CONST_INT_P (rhs)
8257 && GET_CODE (lhs) == ASHIFT
8258 && CONST_INT_P (XEXP (lhs, 1))
8259 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8260 && INTVAL (XEXP (lhs, 1)) >= 0
8261 && INTVAL (rhs) < mode_width)
8262 {
8263 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8264 new_rtx = make_extraction (mode, new_rtx,
8265 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8266 NULL_RTX, mode_width - INTVAL (rhs),
8267 code == LSHIFTRT, 0, in_code == COMPARE);
8268 break;
8269 }
8270
8271 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8272 If so, try to merge the shifts into a SIGN_EXTEND. We could
8273 also do this for some cases of SIGN_EXTRACT, but it doesn't
8274 seem worth the effort; the case checked for occurs on Alpha. */
8275
8276 if (!OBJECT_P (lhs)
8277 && ! (GET_CODE (lhs) == SUBREG
8278 && (OBJECT_P (SUBREG_REG (lhs))))
8279 && CONST_INT_P (rhs)
8280 && INTVAL (rhs) >= 0
8281 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8282 && INTVAL (rhs) < mode_width
8283 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8284 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8285 next_code),
8286 0, NULL_RTX, mode_width - INTVAL (rhs),
8287 code == LSHIFTRT, 0, in_code == COMPARE);
8288
8289 break;
8290
8291 case SUBREG:
8292 /* Call ourselves recursively on the inner expression. If we are
8293 narrowing the object and it has a different RTL code from
8294 what it originally did, do this SUBREG as a force_to_mode. */
8295 {
8296 rtx inner = SUBREG_REG (x), simplified;
8297 enum rtx_code subreg_code = in_code;
8298
8299 /* If the SUBREG is masking of a logical right shift,
8300 make an extraction. */
8301 if (GET_CODE (inner) == LSHIFTRT
8302 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8303 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8304 && CONST_INT_P (XEXP (inner, 1))
8305 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8306 && subreg_lowpart_p (x))
8307 {
8308 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8309 int width = GET_MODE_PRECISION (inner_mode)
8310 - INTVAL (XEXP (inner, 1));
8311 if (width > mode_width)
8312 width = mode_width;
8313 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8314 width, 1, 0, in_code == COMPARE);
8315 break;
8316 }
8317
8318 /* If in_code is COMPARE, it isn't always safe to pass it through
8319 to the recursive make_compound_operation call. */
8320 if (subreg_code == COMPARE
8321 && (!subreg_lowpart_p (x)
8322 || GET_CODE (inner) == SUBREG
8323 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8324 is (const_int 0), rather than
8325 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8326 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8327 for non-equality comparisons against 0 is not equivalent
8328 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8329 || (GET_CODE (inner) == AND
8330 && CONST_INT_P (XEXP (inner, 1))
8331 && partial_subreg_p (x)
8332 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8333 >= GET_MODE_BITSIZE (mode) - 1)))
8334 subreg_code = SET;
8335
8336 tem = make_compound_operation (inner, subreg_code);
8337
8338 simplified
8339 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8340 if (simplified)
8341 tem = simplified;
8342
8343 if (GET_CODE (tem) != GET_CODE (inner)
8344 && partial_subreg_p (x)
8345 && subreg_lowpart_p (x))
8346 {
8347 rtx newer
8348 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8349
8350 /* If we have something other than a SUBREG, we might have
8351 done an expansion, so rerun ourselves. */
8352 if (GET_CODE (newer) != SUBREG)
8353 newer = make_compound_operation (newer, in_code);
8354
8355 /* force_to_mode can expand compounds. If it just re-expanded
8356 the compound, use gen_lowpart to convert to the desired
8357 mode. */
8358 if (rtx_equal_p (newer, x)
8359 /* Likewise if it re-expanded the compound only partially.
8360 This happens for SUBREG of ZERO_EXTRACT if they extract
8361 the same number of bits. */
8362 || (GET_CODE (newer) == SUBREG
8363 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8364 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8365 && GET_CODE (inner) == AND
8366 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8367 return gen_lowpart (GET_MODE (x), tem);
8368
8369 return newer;
8370 }
8371
8372 if (simplified)
8373 return tem;
8374 }
8375 break;
8376
8377 default:
8378 break;
8379 }
8380
8381 if (new_rtx)
8382 *x_ptr = gen_lowpart (mode, new_rtx);
8383 *next_code_ptr = next_code;
8384 return NULL_RTX;
8385 }
8386
8387 /* Look at the expression rooted at X. Look for expressions
8388 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8389 Form these expressions.
8390
8391 Return the new rtx, usually just X.
8392
8393 Also, for machines like the VAX that don't have logical shift insns,
8394 try to convert logical to arithmetic shift operations in cases where
8395 they are equivalent. This undoes the canonicalizations to logical
8396 shifts done elsewhere.
8397
8398 We try, as much as possible, to re-use rtl expressions to save memory.
8399
8400 IN_CODE says what kind of expression we are processing. Normally, it is
8401 SET. In a memory address it is MEM. When processing the arguments of
8402 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8403 precisely it is an equality comparison against zero. */
8404
8405 rtx
make_compound_operation(rtx x,enum rtx_code in_code)8406 make_compound_operation (rtx x, enum rtx_code in_code)
8407 {
8408 enum rtx_code code = GET_CODE (x);
8409 const char *fmt;
8410 int i, j;
8411 enum rtx_code next_code;
8412 rtx new_rtx, tem;
8413
8414 /* Select the code to be used in recursive calls. Once we are inside an
8415 address, we stay there. If we have a comparison, set to COMPARE,
8416 but once inside, go back to our default of SET. */
8417
8418 next_code = (code == MEM ? MEM
8419 : ((code == COMPARE || COMPARISON_P (x))
8420 && XEXP (x, 1) == const0_rtx) ? COMPARE
8421 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8422
8423 scalar_int_mode mode;
8424 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8425 {
8426 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8427 &next_code);
8428 if (new_rtx)
8429 return new_rtx;
8430 code = GET_CODE (x);
8431 }
8432
8433 /* Now recursively process each operand of this operation. We need to
8434 handle ZERO_EXTEND specially so that we don't lose track of the
8435 inner mode. */
8436 if (code == ZERO_EXTEND)
8437 {
8438 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8439 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8440 new_rtx, GET_MODE (XEXP (x, 0)));
8441 if (tem)
8442 return tem;
8443 SUBST (XEXP (x, 0), new_rtx);
8444 return x;
8445 }
8446
8447 fmt = GET_RTX_FORMAT (code);
8448 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8449 if (fmt[i] == 'e')
8450 {
8451 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8452 SUBST (XEXP (x, i), new_rtx);
8453 }
8454 else if (fmt[i] == 'E')
8455 for (j = 0; j < XVECLEN (x, i); j++)
8456 {
8457 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8458 SUBST (XVECEXP (x, i, j), new_rtx);
8459 }
8460
8461 maybe_swap_commutative_operands (x);
8462 return x;
8463 }
8464
8465 /* Given M see if it is a value that would select a field of bits
8466 within an item, but not the entire word. Return -1 if not.
8467 Otherwise, return the starting position of the field, where 0 is the
8468 low-order bit.
8469
8470 *PLEN is set to the length of the field. */
8471
8472 static int
get_pos_from_mask(unsigned HOST_WIDE_INT m,unsigned HOST_WIDE_INT * plen)8473 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8474 {
8475 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8476 int pos = m ? ctz_hwi (m) : -1;
8477 int len = 0;
8478
8479 if (pos >= 0)
8480 /* Now shift off the low-order zero bits and see if we have a
8481 power of two minus 1. */
8482 len = exact_log2 ((m >> pos) + 1);
8483
8484 if (len <= 0)
8485 pos = -1;
8486
8487 *plen = len;
8488 return pos;
8489 }
8490
8491 /* If X refers to a register that equals REG in value, replace these
8492 references with REG. */
8493 static rtx
canon_reg_for_combine(rtx x,rtx reg)8494 canon_reg_for_combine (rtx x, rtx reg)
8495 {
8496 rtx op0, op1, op2;
8497 const char *fmt;
8498 int i;
8499 bool copied;
8500
8501 enum rtx_code code = GET_CODE (x);
8502 switch (GET_RTX_CLASS (code))
8503 {
8504 case RTX_UNARY:
8505 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8506 if (op0 != XEXP (x, 0))
8507 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8508 GET_MODE (reg));
8509 break;
8510
8511 case RTX_BIN_ARITH:
8512 case RTX_COMM_ARITH:
8513 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8514 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8515 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8516 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8517 break;
8518
8519 case RTX_COMPARE:
8520 case RTX_COMM_COMPARE:
8521 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8522 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8523 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8524 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8525 GET_MODE (op0), op0, op1);
8526 break;
8527
8528 case RTX_TERNARY:
8529 case RTX_BITFIELD_OPS:
8530 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8531 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8532 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8533 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8534 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8535 GET_MODE (op0), op0, op1, op2);
8536 /* FALLTHRU */
8537
8538 case RTX_OBJ:
8539 if (REG_P (x))
8540 {
8541 if (rtx_equal_p (get_last_value (reg), x)
8542 || rtx_equal_p (reg, get_last_value (x)))
8543 return reg;
8544 else
8545 break;
8546 }
8547
8548 /* fall through */
8549
8550 default:
8551 fmt = GET_RTX_FORMAT (code);
8552 copied = false;
8553 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8554 if (fmt[i] == 'e')
8555 {
8556 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8557 if (op != XEXP (x, i))
8558 {
8559 if (!copied)
8560 {
8561 copied = true;
8562 x = copy_rtx (x);
8563 }
8564 XEXP (x, i) = op;
8565 }
8566 }
8567 else if (fmt[i] == 'E')
8568 {
8569 int j;
8570 for (j = 0; j < XVECLEN (x, i); j++)
8571 {
8572 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8573 if (op != XVECEXP (x, i, j))
8574 {
8575 if (!copied)
8576 {
8577 copied = true;
8578 x = copy_rtx (x);
8579 }
8580 XVECEXP (x, i, j) = op;
8581 }
8582 }
8583 }
8584
8585 break;
8586 }
8587
8588 return x;
8589 }
8590
8591 /* Return X converted to MODE. If the value is already truncated to
8592 MODE we can just return a subreg even though in the general case we
8593 would need an explicit truncation. */
8594
8595 static rtx
gen_lowpart_or_truncate(machine_mode mode,rtx x)8596 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8597 {
8598 if (!CONST_INT_P (x)
8599 && partial_subreg_p (mode, GET_MODE (x))
8600 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8601 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8602 {
8603 /* Bit-cast X into an integer mode. */
8604 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8605 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8606 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8607 x, GET_MODE (x));
8608 }
8609
8610 return gen_lowpart (mode, x);
8611 }
8612
8613 /* See if X can be simplified knowing that we will only refer to it in
8614 MODE and will only refer to those bits that are nonzero in MASK.
8615 If other bits are being computed or if masking operations are done
8616 that select a superset of the bits in MASK, they can sometimes be
8617 ignored.
8618
8619 Return a possibly simplified expression, but always convert X to
8620 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8621
8622 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8623 are all off in X. This is used when X will be complemented, by either
8624 NOT, NEG, or XOR. */
8625
8626 static rtx
force_to_mode(rtx x,machine_mode mode,unsigned HOST_WIDE_INT mask,int just_select)8627 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8628 int just_select)
8629 {
8630 enum rtx_code code = GET_CODE (x);
8631 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8632 machine_mode op_mode;
8633 unsigned HOST_WIDE_INT nonzero;
8634
8635 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8636 code below will do the wrong thing since the mode of such an
8637 expression is VOIDmode.
8638
8639 Also do nothing if X is a CLOBBER; this can happen if X was
8640 the return value from a call to gen_lowpart. */
8641 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8642 return x;
8643
8644 /* We want to perform the operation in its present mode unless we know
8645 that the operation is valid in MODE, in which case we do the operation
8646 in MODE. */
8647 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8648 && have_insn_for (code, mode))
8649 ? mode : GET_MODE (x));
8650
8651 /* It is not valid to do a right-shift in a narrower mode
8652 than the one it came in with. */
8653 if ((code == LSHIFTRT || code == ASHIFTRT)
8654 && partial_subreg_p (mode, GET_MODE (x)))
8655 op_mode = GET_MODE (x);
8656
8657 /* Truncate MASK to fit OP_MODE. */
8658 if (op_mode)
8659 mask &= GET_MODE_MASK (op_mode);
8660
8661 /* Determine what bits of X are guaranteed to be (non)zero. */
8662 nonzero = nonzero_bits (x, mode);
8663
8664 /* If none of the bits in X are needed, return a zero. */
8665 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8666 x = const0_rtx;
8667
8668 /* If X is a CONST_INT, return a new one. Do this here since the
8669 test below will fail. */
8670 if (CONST_INT_P (x))
8671 {
8672 if (SCALAR_INT_MODE_P (mode))
8673 return gen_int_mode (INTVAL (x) & mask, mode);
8674 else
8675 {
8676 x = GEN_INT (INTVAL (x) & mask);
8677 return gen_lowpart_common (mode, x);
8678 }
8679 }
8680
8681 /* If X is narrower than MODE and we want all the bits in X's mode, just
8682 get X in the proper mode. */
8683 if (paradoxical_subreg_p (mode, GET_MODE (x))
8684 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8685 return gen_lowpart (mode, x);
8686
8687 /* We can ignore the effect of a SUBREG if it narrows the mode or
8688 if the constant masks to zero all the bits the mode doesn't have. */
8689 if (GET_CODE (x) == SUBREG
8690 && subreg_lowpart_p (x)
8691 && (partial_subreg_p (x)
8692 || (mask
8693 & GET_MODE_MASK (GET_MODE (x))
8694 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8695 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8696
8697 scalar_int_mode int_mode, xmode;
8698 if (is_a <scalar_int_mode> (mode, &int_mode)
8699 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8700 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8701 integer too. */
8702 return force_int_to_mode (x, int_mode, xmode,
8703 as_a <scalar_int_mode> (op_mode),
8704 mask, just_select);
8705
8706 return gen_lowpart_or_truncate (mode, x);
8707 }
8708
8709 /* Subroutine of force_to_mode that handles cases in which both X and
8710 the result are scalar integers. MODE is the mode of the result,
8711 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8712 is preferred for simplified versions of X. The other arguments
8713 are as for force_to_mode. */
8714
8715 static rtx
force_int_to_mode(rtx x,scalar_int_mode mode,scalar_int_mode xmode,scalar_int_mode op_mode,unsigned HOST_WIDE_INT mask,int just_select)8716 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8717 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8718 int just_select)
8719 {
8720 enum rtx_code code = GET_CODE (x);
8721 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8722 unsigned HOST_WIDE_INT fuller_mask;
8723 rtx op0, op1, temp;
8724
8725 /* When we have an arithmetic operation, or a shift whose count we
8726 do not know, we need to assume that all bits up to the highest-order
8727 bit in MASK will be needed. This is how we form such a mask. */
8728 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8729 fuller_mask = HOST_WIDE_INT_M1U;
8730 else
8731 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8732 - 1);
8733
8734 switch (code)
8735 {
8736 case CLOBBER:
8737 /* If X is a (clobber (const_int)), return it since we know we are
8738 generating something that won't match. */
8739 return x;
8740
8741 case SIGN_EXTEND:
8742 case ZERO_EXTEND:
8743 case ZERO_EXTRACT:
8744 case SIGN_EXTRACT:
8745 x = expand_compound_operation (x);
8746 if (GET_CODE (x) != code)
8747 return force_to_mode (x, mode, mask, next_select);
8748 break;
8749
8750 case TRUNCATE:
8751 /* Similarly for a truncate. */
8752 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8753
8754 case AND:
8755 /* If this is an AND with a constant, convert it into an AND
8756 whose constant is the AND of that constant with MASK. If it
8757 remains an AND of MASK, delete it since it is redundant. */
8758
8759 if (CONST_INT_P (XEXP (x, 1)))
8760 {
8761 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8762 mask & INTVAL (XEXP (x, 1)));
8763 xmode = op_mode;
8764
8765 /* If X is still an AND, see if it is an AND with a mask that
8766 is just some low-order bits. If so, and it is MASK, we don't
8767 need it. */
8768
8769 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8770 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8771 x = XEXP (x, 0);
8772
8773 /* If it remains an AND, try making another AND with the bits
8774 in the mode mask that aren't in MASK turned on. If the
8775 constant in the AND is wide enough, this might make a
8776 cheaper constant. */
8777
8778 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8779 && GET_MODE_MASK (xmode) != mask
8780 && HWI_COMPUTABLE_MODE_P (xmode))
8781 {
8782 unsigned HOST_WIDE_INT cval
8783 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8784 rtx y;
8785
8786 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8787 gen_int_mode (cval, xmode));
8788 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8789 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8790 x = y;
8791 }
8792
8793 break;
8794 }
8795
8796 goto binop;
8797
8798 case PLUS:
8799 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8800 low-order bits (as in an alignment operation) and FOO is already
8801 aligned to that boundary, mask C1 to that boundary as well.
8802 This may eliminate that PLUS and, later, the AND. */
8803
8804 {
8805 unsigned int width = GET_MODE_PRECISION (mode);
8806 unsigned HOST_WIDE_INT smask = mask;
8807
8808 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8809 number, sign extend it. */
8810
8811 if (width < HOST_BITS_PER_WIDE_INT
8812 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8813 smask |= HOST_WIDE_INT_M1U << width;
8814
8815 if (CONST_INT_P (XEXP (x, 1))
8816 && pow2p_hwi (- smask)
8817 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8818 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8819 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8820 (INTVAL (XEXP (x, 1)) & smask)),
8821 mode, smask, next_select);
8822 }
8823
8824 /* fall through */
8825
8826 case MULT:
8827 /* Substituting into the operands of a widening MULT is not likely to
8828 create RTL matching a machine insn. */
8829 if (code == MULT
8830 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8831 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8832 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8833 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8834 && REG_P (XEXP (XEXP (x, 0), 0))
8835 && REG_P (XEXP (XEXP (x, 1), 0)))
8836 return gen_lowpart_or_truncate (mode, x);
8837
8838 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8839 most significant bit in MASK since carries from those bits will
8840 affect the bits we are interested in. */
8841 mask = fuller_mask;
8842 goto binop;
8843
8844 case MINUS:
8845 /* If X is (minus C Y) where C's least set bit is larger than any bit
8846 in the mask, then we may replace with (neg Y). */
8847 if (CONST_INT_P (XEXP (x, 0))
8848 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8849 {
8850 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8851 return force_to_mode (x, mode, mask, next_select);
8852 }
8853
8854 /* Similarly, if C contains every bit in the fuller_mask, then we may
8855 replace with (not Y). */
8856 if (CONST_INT_P (XEXP (x, 0))
8857 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8858 {
8859 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8860 return force_to_mode (x, mode, mask, next_select);
8861 }
8862
8863 mask = fuller_mask;
8864 goto binop;
8865
8866 case IOR:
8867 case XOR:
8868 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8869 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8870 operation which may be a bitfield extraction. Ensure that the
8871 constant we form is not wider than the mode of X. */
8872
8873 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8874 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8875 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8876 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8877 && CONST_INT_P (XEXP (x, 1))
8878 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8879 + floor_log2 (INTVAL (XEXP (x, 1))))
8880 < GET_MODE_PRECISION (xmode))
8881 && (UINTVAL (XEXP (x, 1))
8882 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8883 {
8884 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8885 << INTVAL (XEXP (XEXP (x, 0), 1)),
8886 xmode);
8887 temp = simplify_gen_binary (GET_CODE (x), xmode,
8888 XEXP (XEXP (x, 0), 0), temp);
8889 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8890 XEXP (XEXP (x, 0), 1));
8891 return force_to_mode (x, mode, mask, next_select);
8892 }
8893
8894 binop:
8895 /* For most binary operations, just propagate into the operation and
8896 change the mode if we have an operation of that mode. */
8897
8898 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8899 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8900
8901 /* If we ended up truncating both operands, truncate the result of the
8902 operation instead. */
8903 if (GET_CODE (op0) == TRUNCATE
8904 && GET_CODE (op1) == TRUNCATE)
8905 {
8906 op0 = XEXP (op0, 0);
8907 op1 = XEXP (op1, 0);
8908 }
8909
8910 op0 = gen_lowpart_or_truncate (op_mode, op0);
8911 op1 = gen_lowpart_or_truncate (op_mode, op1);
8912
8913 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8914 {
8915 x = simplify_gen_binary (code, op_mode, op0, op1);
8916 xmode = op_mode;
8917 }
8918 break;
8919
8920 case ASHIFT:
8921 /* For left shifts, do the same, but just for the first operand.
8922 However, we cannot do anything with shifts where we cannot
8923 guarantee that the counts are smaller than the size of the mode
8924 because such a count will have a different meaning in a
8925 wider mode. */
8926
8927 if (! (CONST_INT_P (XEXP (x, 1))
8928 && INTVAL (XEXP (x, 1)) >= 0
8929 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8930 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8931 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8932 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8933 break;
8934
8935 /* If the shift count is a constant and we can do arithmetic in
8936 the mode of the shift, refine which bits we need. Otherwise, use the
8937 conservative form of the mask. */
8938 if (CONST_INT_P (XEXP (x, 1))
8939 && INTVAL (XEXP (x, 1)) >= 0
8940 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8941 && HWI_COMPUTABLE_MODE_P (op_mode))
8942 mask >>= INTVAL (XEXP (x, 1));
8943 else
8944 mask = fuller_mask;
8945
8946 op0 = gen_lowpart_or_truncate (op_mode,
8947 force_to_mode (XEXP (x, 0), mode,
8948 mask, next_select));
8949
8950 if (op_mode != xmode || op0 != XEXP (x, 0))
8951 {
8952 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8953 xmode = op_mode;
8954 }
8955 break;
8956
8957 case LSHIFTRT:
8958 /* Here we can only do something if the shift count is a constant,
8959 this shift constant is valid for the host, and we can do arithmetic
8960 in OP_MODE. */
8961
8962 if (CONST_INT_P (XEXP (x, 1))
8963 && INTVAL (XEXP (x, 1)) >= 0
8964 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8965 && HWI_COMPUTABLE_MODE_P (op_mode))
8966 {
8967 rtx inner = XEXP (x, 0);
8968 unsigned HOST_WIDE_INT inner_mask;
8969
8970 /* Select the mask of the bits we need for the shift operand. */
8971 inner_mask = mask << INTVAL (XEXP (x, 1));
8972
8973 /* We can only change the mode of the shift if we can do arithmetic
8974 in the mode of the shift and INNER_MASK is no wider than the
8975 width of X's mode. */
8976 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8977 op_mode = xmode;
8978
8979 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8980
8981 if (xmode != op_mode || inner != XEXP (x, 0))
8982 {
8983 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8984 xmode = op_mode;
8985 }
8986 }
8987
8988 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8989 shift and AND produces only copies of the sign bit (C2 is one less
8990 than a power of two), we can do this with just a shift. */
8991
8992 if (GET_CODE (x) == LSHIFTRT
8993 && CONST_INT_P (XEXP (x, 1))
8994 /* The shift puts one of the sign bit copies in the least significant
8995 bit. */
8996 && ((INTVAL (XEXP (x, 1))
8997 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8998 >= GET_MODE_PRECISION (xmode))
8999 && pow2p_hwi (mask + 1)
9000 /* Number of bits left after the shift must be more than the mask
9001 needs. */
9002 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9003 <= GET_MODE_PRECISION (xmode))
9004 /* Must be more sign bit copies than the mask needs. */
9005 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9006 >= exact_log2 (mask + 1)))
9007 {
9008 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9009 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9010 gen_int_shift_amount (xmode, nbits));
9011 }
9012 goto shiftrt;
9013
9014 case ASHIFTRT:
9015 /* If we are just looking for the sign bit, we don't need this shift at
9016 all, even if it has a variable count. */
9017 if (val_signbit_p (xmode, mask))
9018 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9019
9020 /* If this is a shift by a constant, get a mask that contains those bits
9021 that are not copies of the sign bit. We then have two cases: If
9022 MASK only includes those bits, this can be a logical shift, which may
9023 allow simplifications. If MASK is a single-bit field not within
9024 those bits, we are requesting a copy of the sign bit and hence can
9025 shift the sign bit to the appropriate location. */
9026
9027 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9028 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9029 {
9030 unsigned HOST_WIDE_INT nonzero;
9031 int i;
9032
9033 /* If the considered data is wider than HOST_WIDE_INT, we can't
9034 represent a mask for all its bits in a single scalar.
9035 But we only care about the lower bits, so calculate these. */
9036
9037 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9038 {
9039 nonzero = HOST_WIDE_INT_M1U;
9040
9041 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9042 is the number of bits a full-width mask would have set.
9043 We need only shift if these are fewer than nonzero can
9044 hold. If not, we must keep all bits set in nonzero. */
9045
9046 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9047 < HOST_BITS_PER_WIDE_INT)
9048 nonzero >>= INTVAL (XEXP (x, 1))
9049 + HOST_BITS_PER_WIDE_INT
9050 - GET_MODE_PRECISION (xmode);
9051 }
9052 else
9053 {
9054 nonzero = GET_MODE_MASK (xmode);
9055 nonzero >>= INTVAL (XEXP (x, 1));
9056 }
9057
9058 if ((mask & ~nonzero) == 0)
9059 {
9060 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9061 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9062 if (GET_CODE (x) != ASHIFTRT)
9063 return force_to_mode (x, mode, mask, next_select);
9064 }
9065
9066 else if ((i = exact_log2 (mask)) >= 0)
9067 {
9068 x = simplify_shift_const
9069 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9070 GET_MODE_PRECISION (xmode) - 1 - i);
9071
9072 if (GET_CODE (x) != ASHIFTRT)
9073 return force_to_mode (x, mode, mask, next_select);
9074 }
9075 }
9076
9077 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9078 even if the shift count isn't a constant. */
9079 if (mask == 1)
9080 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9081
9082 shiftrt:
9083
9084 /* If this is a zero- or sign-extension operation that just affects bits
9085 we don't care about, remove it. Be sure the call above returned
9086 something that is still a shift. */
9087
9088 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9089 && CONST_INT_P (XEXP (x, 1))
9090 && INTVAL (XEXP (x, 1)) >= 0
9091 && (INTVAL (XEXP (x, 1))
9092 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9093 && GET_CODE (XEXP (x, 0)) == ASHIFT
9094 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9095 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9096 next_select);
9097
9098 break;
9099
9100 case ROTATE:
9101 case ROTATERT:
9102 /* If the shift count is constant and we can do computations
9103 in the mode of X, compute where the bits we care about are.
9104 Otherwise, we can't do anything. Don't change the mode of
9105 the shift or propagate MODE into the shift, though. */
9106 if (CONST_INT_P (XEXP (x, 1))
9107 && INTVAL (XEXP (x, 1)) >= 0)
9108 {
9109 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9110 xmode, gen_int_mode (mask, xmode),
9111 XEXP (x, 1));
9112 if (temp && CONST_INT_P (temp))
9113 x = simplify_gen_binary (code, xmode,
9114 force_to_mode (XEXP (x, 0), xmode,
9115 INTVAL (temp), next_select),
9116 XEXP (x, 1));
9117 }
9118 break;
9119
9120 case NEG:
9121 /* If we just want the low-order bit, the NEG isn't needed since it
9122 won't change the low-order bit. */
9123 if (mask == 1)
9124 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9125
9126 /* We need any bits less significant than the most significant bit in
9127 MASK since carries from those bits will affect the bits we are
9128 interested in. */
9129 mask = fuller_mask;
9130 goto unop;
9131
9132 case NOT:
9133 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9134 same as the XOR case above. Ensure that the constant we form is not
9135 wider than the mode of X. */
9136
9137 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9138 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9139 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9140 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9141 < GET_MODE_PRECISION (xmode))
9142 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9143 {
9144 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9145 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9146 x = simplify_gen_binary (LSHIFTRT, xmode,
9147 temp, XEXP (XEXP (x, 0), 1));
9148
9149 return force_to_mode (x, mode, mask, next_select);
9150 }
9151
9152 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9153 use the full mask inside the NOT. */
9154 mask = fuller_mask;
9155
9156 unop:
9157 op0 = gen_lowpart_or_truncate (op_mode,
9158 force_to_mode (XEXP (x, 0), mode, mask,
9159 next_select));
9160 if (op_mode != xmode || op0 != XEXP (x, 0))
9161 {
9162 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9163 xmode = op_mode;
9164 }
9165 break;
9166
9167 case NE:
9168 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9169 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9170 which is equal to STORE_FLAG_VALUE. */
9171 if ((mask & ~STORE_FLAG_VALUE) == 0
9172 && XEXP (x, 1) == const0_rtx
9173 && GET_MODE (XEXP (x, 0)) == mode
9174 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9175 && (nonzero_bits (XEXP (x, 0), mode)
9176 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9177 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9178
9179 break;
9180
9181 case IF_THEN_ELSE:
9182 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9183 written in a narrower mode. We play it safe and do not do so. */
9184
9185 op0 = gen_lowpart_or_truncate (xmode,
9186 force_to_mode (XEXP (x, 1), mode,
9187 mask, next_select));
9188 op1 = gen_lowpart_or_truncate (xmode,
9189 force_to_mode (XEXP (x, 2), mode,
9190 mask, next_select));
9191 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9192 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9193 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9194 op0, op1);
9195 break;
9196
9197 default:
9198 break;
9199 }
9200
9201 /* Ensure we return a value of the proper mode. */
9202 return gen_lowpart_or_truncate (mode, x);
9203 }
9204
9205 /* Return nonzero if X is an expression that has one of two values depending on
9206 whether some other value is zero or nonzero. In that case, we return the
9207 value that is being tested, *PTRUE is set to the value if the rtx being
9208 returned has a nonzero value, and *PFALSE is set to the other alternative.
9209
9210 If we return zero, we set *PTRUE and *PFALSE to X. */
9211
9212 static rtx
if_then_else_cond(rtx x,rtx * ptrue,rtx * pfalse)9213 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9214 {
9215 machine_mode mode = GET_MODE (x);
9216 enum rtx_code code = GET_CODE (x);
9217 rtx cond0, cond1, true0, true1, false0, false1;
9218 unsigned HOST_WIDE_INT nz;
9219 scalar_int_mode int_mode;
9220
9221 /* If we are comparing a value against zero, we are done. */
9222 if ((code == NE || code == EQ)
9223 && XEXP (x, 1) == const0_rtx)
9224 {
9225 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9226 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9227 return XEXP (x, 0);
9228 }
9229
9230 /* If this is a unary operation whose operand has one of two values, apply
9231 our opcode to compute those values. */
9232 else if (UNARY_P (x)
9233 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9234 {
9235 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9236 *pfalse = simplify_gen_unary (code, mode, false0,
9237 GET_MODE (XEXP (x, 0)));
9238 return cond0;
9239 }
9240
9241 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9242 make can't possibly match and would suppress other optimizations. */
9243 else if (code == COMPARE)
9244 ;
9245
9246 /* If this is a binary operation, see if either side has only one of two
9247 values. If either one does or if both do and they are conditional on
9248 the same value, compute the new true and false values. */
9249 else if (BINARY_P (x))
9250 {
9251 rtx op0 = XEXP (x, 0);
9252 rtx op1 = XEXP (x, 1);
9253 cond0 = if_then_else_cond (op0, &true0, &false0);
9254 cond1 = if_then_else_cond (op1, &true1, &false1);
9255
9256 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9257 && (REG_P (op0) || REG_P (op1)))
9258 {
9259 /* Try to enable a simplification by undoing work done by
9260 if_then_else_cond if it converted a REG into something more
9261 complex. */
9262 if (REG_P (op0))
9263 {
9264 cond0 = 0;
9265 true0 = false0 = op0;
9266 }
9267 else
9268 {
9269 cond1 = 0;
9270 true1 = false1 = op1;
9271 }
9272 }
9273
9274 if ((cond0 != 0 || cond1 != 0)
9275 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9276 {
9277 /* If if_then_else_cond returned zero, then true/false are the
9278 same rtl. We must copy one of them to prevent invalid rtl
9279 sharing. */
9280 if (cond0 == 0)
9281 true0 = copy_rtx (true0);
9282 else if (cond1 == 0)
9283 true1 = copy_rtx (true1);
9284
9285 if (COMPARISON_P (x))
9286 {
9287 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9288 true0, true1);
9289 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9290 false0, false1);
9291 }
9292 else
9293 {
9294 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9295 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9296 }
9297
9298 return cond0 ? cond0 : cond1;
9299 }
9300
9301 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9302 operands is zero when the other is nonzero, and vice-versa,
9303 and STORE_FLAG_VALUE is 1 or -1. */
9304
9305 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9306 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9307 || code == UMAX)
9308 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9309 {
9310 rtx op0 = XEXP (XEXP (x, 0), 1);
9311 rtx op1 = XEXP (XEXP (x, 1), 1);
9312
9313 cond0 = XEXP (XEXP (x, 0), 0);
9314 cond1 = XEXP (XEXP (x, 1), 0);
9315
9316 if (COMPARISON_P (cond0)
9317 && COMPARISON_P (cond1)
9318 && SCALAR_INT_MODE_P (mode)
9319 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9320 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9321 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9322 || ((swap_condition (GET_CODE (cond0))
9323 == reversed_comparison_code (cond1, NULL))
9324 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9325 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9326 && ! side_effects_p (x))
9327 {
9328 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9329 *pfalse = simplify_gen_binary (MULT, mode,
9330 (code == MINUS
9331 ? simplify_gen_unary (NEG, mode,
9332 op1, mode)
9333 : op1),
9334 const_true_rtx);
9335 return cond0;
9336 }
9337 }
9338
9339 /* Similarly for MULT, AND and UMIN, except that for these the result
9340 is always zero. */
9341 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9342 && (code == MULT || code == AND || code == UMIN)
9343 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9344 {
9345 cond0 = XEXP (XEXP (x, 0), 0);
9346 cond1 = XEXP (XEXP (x, 1), 0);
9347
9348 if (COMPARISON_P (cond0)
9349 && COMPARISON_P (cond1)
9350 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9351 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9352 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9353 || ((swap_condition (GET_CODE (cond0))
9354 == reversed_comparison_code (cond1, NULL))
9355 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9356 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9357 && ! side_effects_p (x))
9358 {
9359 *ptrue = *pfalse = const0_rtx;
9360 return cond0;
9361 }
9362 }
9363 }
9364
9365 else if (code == IF_THEN_ELSE)
9366 {
9367 /* If we have IF_THEN_ELSE already, extract the condition and
9368 canonicalize it if it is NE or EQ. */
9369 cond0 = XEXP (x, 0);
9370 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9371 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9372 return XEXP (cond0, 0);
9373 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9374 {
9375 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9376 return XEXP (cond0, 0);
9377 }
9378 else
9379 return cond0;
9380 }
9381
9382 /* If X is a SUBREG, we can narrow both the true and false values
9383 if the inner expression, if there is a condition. */
9384 else if (code == SUBREG
9385 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9386 &false0)) != 0)
9387 {
9388 true0 = simplify_gen_subreg (mode, true0,
9389 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9390 false0 = simplify_gen_subreg (mode, false0,
9391 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9392 if (true0 && false0)
9393 {
9394 *ptrue = true0;
9395 *pfalse = false0;
9396 return cond0;
9397 }
9398 }
9399
9400 /* If X is a constant, this isn't special and will cause confusions
9401 if we treat it as such. Likewise if it is equivalent to a constant. */
9402 else if (CONSTANT_P (x)
9403 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9404 ;
9405
9406 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9407 will be least confusing to the rest of the compiler. */
9408 else if (mode == BImode)
9409 {
9410 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9411 return x;
9412 }
9413
9414 /* If X is known to be either 0 or -1, those are the true and
9415 false values when testing X. */
9416 else if (x == constm1_rtx || x == const0_rtx
9417 || (is_a <scalar_int_mode> (mode, &int_mode)
9418 && (num_sign_bit_copies (x, int_mode)
9419 == GET_MODE_PRECISION (int_mode))))
9420 {
9421 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9422 return x;
9423 }
9424
9425 /* Likewise for 0 or a single bit. */
9426 else if (HWI_COMPUTABLE_MODE_P (mode)
9427 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9428 {
9429 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9430 return x;
9431 }
9432
9433 /* Otherwise fail; show no condition with true and false values the same. */
9434 *ptrue = *pfalse = x;
9435 return 0;
9436 }
9437
9438 /* Return the value of expression X given the fact that condition COND
9439 is known to be true when applied to REG as its first operand and VAL
9440 as its second. X is known to not be shared and so can be modified in
9441 place.
9442
9443 We only handle the simplest cases, and specifically those cases that
9444 arise with IF_THEN_ELSE expressions. */
9445
9446 static rtx
known_cond(rtx x,enum rtx_code cond,rtx reg,rtx val)9447 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9448 {
9449 enum rtx_code code = GET_CODE (x);
9450 const char *fmt;
9451 int i, j;
9452
9453 if (side_effects_p (x))
9454 return x;
9455
9456 /* If either operand of the condition is a floating point value,
9457 then we have to avoid collapsing an EQ comparison. */
9458 if (cond == EQ
9459 && rtx_equal_p (x, reg)
9460 && ! FLOAT_MODE_P (GET_MODE (x))
9461 && ! FLOAT_MODE_P (GET_MODE (val)))
9462 return val;
9463
9464 if (cond == UNEQ && rtx_equal_p (x, reg))
9465 return val;
9466
9467 /* If X is (abs REG) and we know something about REG's relationship
9468 with zero, we may be able to simplify this. */
9469
9470 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9471 switch (cond)
9472 {
9473 case GE: case GT: case EQ:
9474 return XEXP (x, 0);
9475 case LT: case LE:
9476 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9477 XEXP (x, 0),
9478 GET_MODE (XEXP (x, 0)));
9479 default:
9480 break;
9481 }
9482
9483 /* The only other cases we handle are MIN, MAX, and comparisons if the
9484 operands are the same as REG and VAL. */
9485
9486 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9487 {
9488 if (rtx_equal_p (XEXP (x, 0), val))
9489 {
9490 std::swap (val, reg);
9491 cond = swap_condition (cond);
9492 }
9493
9494 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9495 {
9496 if (COMPARISON_P (x))
9497 {
9498 if (comparison_dominates_p (cond, code))
9499 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9500
9501 code = reversed_comparison_code (x, NULL);
9502 if (code != UNKNOWN
9503 && comparison_dominates_p (cond, code))
9504 return CONST0_RTX (GET_MODE (x));
9505 else
9506 return x;
9507 }
9508 else if (code == SMAX || code == SMIN
9509 || code == UMIN || code == UMAX)
9510 {
9511 int unsignedp = (code == UMIN || code == UMAX);
9512
9513 /* Do not reverse the condition when it is NE or EQ.
9514 This is because we cannot conclude anything about
9515 the value of 'SMAX (x, y)' when x is not equal to y,
9516 but we can when x equals y. */
9517 if ((code == SMAX || code == UMAX)
9518 && ! (cond == EQ || cond == NE))
9519 cond = reverse_condition (cond);
9520
9521 switch (cond)
9522 {
9523 case GE: case GT:
9524 return unsignedp ? x : XEXP (x, 1);
9525 case LE: case LT:
9526 return unsignedp ? x : XEXP (x, 0);
9527 case GEU: case GTU:
9528 return unsignedp ? XEXP (x, 1) : x;
9529 case LEU: case LTU:
9530 return unsignedp ? XEXP (x, 0) : x;
9531 default:
9532 break;
9533 }
9534 }
9535 }
9536 }
9537 else if (code == SUBREG)
9538 {
9539 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9540 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9541
9542 if (SUBREG_REG (x) != r)
9543 {
9544 /* We must simplify subreg here, before we lose track of the
9545 original inner_mode. */
9546 new_rtx = simplify_subreg (GET_MODE (x), r,
9547 inner_mode, SUBREG_BYTE (x));
9548 if (new_rtx)
9549 return new_rtx;
9550 else
9551 SUBST (SUBREG_REG (x), r);
9552 }
9553
9554 return x;
9555 }
9556 /* We don't have to handle SIGN_EXTEND here, because even in the
9557 case of replacing something with a modeless CONST_INT, a
9558 CONST_INT is already (supposed to be) a valid sign extension for
9559 its narrower mode, which implies it's already properly
9560 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9561 story is different. */
9562 else if (code == ZERO_EXTEND)
9563 {
9564 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9565 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9566
9567 if (XEXP (x, 0) != r)
9568 {
9569 /* We must simplify the zero_extend here, before we lose
9570 track of the original inner_mode. */
9571 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9572 r, inner_mode);
9573 if (new_rtx)
9574 return new_rtx;
9575 else
9576 SUBST (XEXP (x, 0), r);
9577 }
9578
9579 return x;
9580 }
9581
9582 fmt = GET_RTX_FORMAT (code);
9583 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9584 {
9585 if (fmt[i] == 'e')
9586 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9587 else if (fmt[i] == 'E')
9588 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9589 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9590 cond, reg, val));
9591 }
9592
9593 return x;
9594 }
9595
9596 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9597 assignment as a field assignment. */
9598
9599 static int
rtx_equal_for_field_assignment_p(rtx x,rtx y,bool widen_x)9600 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9601 {
9602 if (widen_x && GET_MODE (x) != GET_MODE (y))
9603 {
9604 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9605 return 0;
9606 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9607 return 0;
9608 x = adjust_address_nv (x, GET_MODE (y),
9609 byte_lowpart_offset (GET_MODE (y),
9610 GET_MODE (x)));
9611 }
9612
9613 if (x == y || rtx_equal_p (x, y))
9614 return 1;
9615
9616 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9617 return 0;
9618
9619 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9620 Note that all SUBREGs of MEM are paradoxical; otherwise they
9621 would have been rewritten. */
9622 if (MEM_P (x) && GET_CODE (y) == SUBREG
9623 && MEM_P (SUBREG_REG (y))
9624 && rtx_equal_p (SUBREG_REG (y),
9625 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9626 return 1;
9627
9628 if (MEM_P (y) && GET_CODE (x) == SUBREG
9629 && MEM_P (SUBREG_REG (x))
9630 && rtx_equal_p (SUBREG_REG (x),
9631 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9632 return 1;
9633
9634 /* We used to see if get_last_value of X and Y were the same but that's
9635 not correct. In one direction, we'll cause the assignment to have
9636 the wrong destination and in the case, we'll import a register into this
9637 insn that might have already have been dead. So fail if none of the
9638 above cases are true. */
9639 return 0;
9640 }
9641
9642 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9643 Return that assignment if so.
9644
9645 We only handle the most common cases. */
9646
9647 static rtx
make_field_assignment(rtx x)9648 make_field_assignment (rtx x)
9649 {
9650 rtx dest = SET_DEST (x);
9651 rtx src = SET_SRC (x);
9652 rtx assign;
9653 rtx rhs, lhs;
9654 HOST_WIDE_INT c1;
9655 HOST_WIDE_INT pos;
9656 unsigned HOST_WIDE_INT len;
9657 rtx other;
9658
9659 /* All the rules in this function are specific to scalar integers. */
9660 scalar_int_mode mode;
9661 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9662 return x;
9663
9664 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9665 a clear of a one-bit field. We will have changed it to
9666 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9667 for a SUBREG. */
9668
9669 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9670 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9671 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9672 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9673 {
9674 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9675 1, 1, 1, 0);
9676 if (assign != 0)
9677 return gen_rtx_SET (assign, const0_rtx);
9678 return x;
9679 }
9680
9681 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9682 && subreg_lowpart_p (XEXP (src, 0))
9683 && partial_subreg_p (XEXP (src, 0))
9684 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9685 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9686 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9687 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9688 {
9689 assign = make_extraction (VOIDmode, dest, 0,
9690 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9691 1, 1, 1, 0);
9692 if (assign != 0)
9693 return gen_rtx_SET (assign, const0_rtx);
9694 return x;
9695 }
9696
9697 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9698 one-bit field. */
9699 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9700 && XEXP (XEXP (src, 0), 0) == const1_rtx
9701 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9702 {
9703 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9704 1, 1, 1, 0);
9705 if (assign != 0)
9706 return gen_rtx_SET (assign, const1_rtx);
9707 return x;
9708 }
9709
9710 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9711 SRC is an AND with all bits of that field set, then we can discard
9712 the AND. */
9713 if (GET_CODE (dest) == ZERO_EXTRACT
9714 && CONST_INT_P (XEXP (dest, 1))
9715 && GET_CODE (src) == AND
9716 && CONST_INT_P (XEXP (src, 1)))
9717 {
9718 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9719 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9720 unsigned HOST_WIDE_INT ze_mask;
9721
9722 if (width >= HOST_BITS_PER_WIDE_INT)
9723 ze_mask = -1;
9724 else
9725 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9726
9727 /* Complete overlap. We can remove the source AND. */
9728 if ((and_mask & ze_mask) == ze_mask)
9729 return gen_rtx_SET (dest, XEXP (src, 0));
9730
9731 /* Partial overlap. We can reduce the source AND. */
9732 if ((and_mask & ze_mask) != and_mask)
9733 {
9734 src = gen_rtx_AND (mode, XEXP (src, 0),
9735 gen_int_mode (and_mask & ze_mask, mode));
9736 return gen_rtx_SET (dest, src);
9737 }
9738 }
9739
9740 /* The other case we handle is assignments into a constant-position
9741 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9742 a mask that has all one bits except for a group of zero bits and
9743 OTHER is known to have zeros where C1 has ones, this is such an
9744 assignment. Compute the position and length from C1. Shift OTHER
9745 to the appropriate position, force it to the required mode, and
9746 make the extraction. Check for the AND in both operands. */
9747
9748 /* One or more SUBREGs might obscure the constant-position field
9749 assignment. The first one we are likely to encounter is an outer
9750 narrowing SUBREG, which we can just strip for the purposes of
9751 identifying the constant-field assignment. */
9752 scalar_int_mode src_mode = mode;
9753 if (GET_CODE (src) == SUBREG
9754 && subreg_lowpart_p (src)
9755 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9756 src = SUBREG_REG (src);
9757
9758 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9759 return x;
9760
9761 rhs = expand_compound_operation (XEXP (src, 0));
9762 lhs = expand_compound_operation (XEXP (src, 1));
9763
9764 if (GET_CODE (rhs) == AND
9765 && CONST_INT_P (XEXP (rhs, 1))
9766 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9767 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9768 /* The second SUBREG that might get in the way is a paradoxical
9769 SUBREG around the first operand of the AND. We want to
9770 pretend the operand is as wide as the destination here. We
9771 do this by adjusting the MEM to wider mode for the sole
9772 purpose of the call to rtx_equal_for_field_assignment_p. Also
9773 note this trick only works for MEMs. */
9774 else if (GET_CODE (rhs) == AND
9775 && paradoxical_subreg_p (XEXP (rhs, 0))
9776 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9777 && CONST_INT_P (XEXP (rhs, 1))
9778 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9779 dest, true))
9780 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9781 else if (GET_CODE (lhs) == AND
9782 && CONST_INT_P (XEXP (lhs, 1))
9783 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9784 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9785 /* The second SUBREG that might get in the way is a paradoxical
9786 SUBREG around the first operand of the AND. We want to
9787 pretend the operand is as wide as the destination here. We
9788 do this by adjusting the MEM to wider mode for the sole
9789 purpose of the call to rtx_equal_for_field_assignment_p. Also
9790 note this trick only works for MEMs. */
9791 else if (GET_CODE (lhs) == AND
9792 && paradoxical_subreg_p (XEXP (lhs, 0))
9793 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9794 && CONST_INT_P (XEXP (lhs, 1))
9795 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9796 dest, true))
9797 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9798 else
9799 return x;
9800
9801 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9802 if (pos < 0
9803 || pos + len > GET_MODE_PRECISION (mode)
9804 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9805 || (c1 & nonzero_bits (other, mode)) != 0)
9806 return x;
9807
9808 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9809 if (assign == 0)
9810 return x;
9811
9812 /* The mode to use for the source is the mode of the assignment, or of
9813 what is inside a possible STRICT_LOW_PART. */
9814 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9815 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9816
9817 /* Shift OTHER right POS places and make it the source, restricting it
9818 to the proper length and mode. */
9819
9820 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9821 src_mode, other, pos),
9822 dest);
9823 src = force_to_mode (src, new_mode,
9824 len >= HOST_BITS_PER_WIDE_INT
9825 ? HOST_WIDE_INT_M1U
9826 : (HOST_WIDE_INT_1U << len) - 1,
9827 0);
9828
9829 /* If SRC is masked by an AND that does not make a difference in
9830 the value being stored, strip it. */
9831 if (GET_CODE (assign) == ZERO_EXTRACT
9832 && CONST_INT_P (XEXP (assign, 1))
9833 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9834 && GET_CODE (src) == AND
9835 && CONST_INT_P (XEXP (src, 1))
9836 && UINTVAL (XEXP (src, 1))
9837 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9838 src = XEXP (src, 0);
9839
9840 return gen_rtx_SET (assign, src);
9841 }
9842
9843 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9844 if so. */
9845
9846 static rtx
apply_distributive_law(rtx x)9847 apply_distributive_law (rtx x)
9848 {
9849 enum rtx_code code = GET_CODE (x);
9850 enum rtx_code inner_code;
9851 rtx lhs, rhs, other;
9852 rtx tem;
9853
9854 /* Distributivity is not true for floating point as it can change the
9855 value. So we don't do it unless -funsafe-math-optimizations. */
9856 if (FLOAT_MODE_P (GET_MODE (x))
9857 && ! flag_unsafe_math_optimizations)
9858 return x;
9859
9860 /* The outer operation can only be one of the following: */
9861 if (code != IOR && code != AND && code != XOR
9862 && code != PLUS && code != MINUS)
9863 return x;
9864
9865 lhs = XEXP (x, 0);
9866 rhs = XEXP (x, 1);
9867
9868 /* If either operand is a primitive we can't do anything, so get out
9869 fast. */
9870 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9871 return x;
9872
9873 lhs = expand_compound_operation (lhs);
9874 rhs = expand_compound_operation (rhs);
9875 inner_code = GET_CODE (lhs);
9876 if (inner_code != GET_CODE (rhs))
9877 return x;
9878
9879 /* See if the inner and outer operations distribute. */
9880 switch (inner_code)
9881 {
9882 case LSHIFTRT:
9883 case ASHIFTRT:
9884 case AND:
9885 case IOR:
9886 /* These all distribute except over PLUS. */
9887 if (code == PLUS || code == MINUS)
9888 return x;
9889 break;
9890
9891 case MULT:
9892 if (code != PLUS && code != MINUS)
9893 return x;
9894 break;
9895
9896 case ASHIFT:
9897 /* This is also a multiply, so it distributes over everything. */
9898 break;
9899
9900 /* This used to handle SUBREG, but this turned out to be counter-
9901 productive, since (subreg (op ...)) usually is not handled by
9902 insn patterns, and this "optimization" therefore transformed
9903 recognizable patterns into unrecognizable ones. Therefore the
9904 SUBREG case was removed from here.
9905
9906 It is possible that distributing SUBREG over arithmetic operations
9907 leads to an intermediate result than can then be optimized further,
9908 e.g. by moving the outer SUBREG to the other side of a SET as done
9909 in simplify_set. This seems to have been the original intent of
9910 handling SUBREGs here.
9911
9912 However, with current GCC this does not appear to actually happen,
9913 at least on major platforms. If some case is found where removing
9914 the SUBREG case here prevents follow-on optimizations, distributing
9915 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9916
9917 default:
9918 return x;
9919 }
9920
9921 /* Set LHS and RHS to the inner operands (A and B in the example
9922 above) and set OTHER to the common operand (C in the example).
9923 There is only one way to do this unless the inner operation is
9924 commutative. */
9925 if (COMMUTATIVE_ARITH_P (lhs)
9926 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9927 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9928 else if (COMMUTATIVE_ARITH_P (lhs)
9929 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9930 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9931 else if (COMMUTATIVE_ARITH_P (lhs)
9932 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9933 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9934 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9935 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9936 else
9937 return x;
9938
9939 /* Form the new inner operation, seeing if it simplifies first. */
9940 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9941
9942 /* There is one exception to the general way of distributing:
9943 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9944 if (code == XOR && inner_code == IOR)
9945 {
9946 inner_code = AND;
9947 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9948 }
9949
9950 /* We may be able to continuing distributing the result, so call
9951 ourselves recursively on the inner operation before forming the
9952 outer operation, which we return. */
9953 return simplify_gen_binary (inner_code, GET_MODE (x),
9954 apply_distributive_law (tem), other);
9955 }
9956
9957 /* See if X is of the form (* (+ A B) C), and if so convert to
9958 (+ (* A C) (* B C)) and try to simplify.
9959
9960 Most of the time, this results in no change. However, if some of
9961 the operands are the same or inverses of each other, simplifications
9962 will result.
9963
9964 For example, (and (ior A B) (not B)) can occur as the result of
9965 expanding a bit field assignment. When we apply the distributive
9966 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9967 which then simplifies to (and (A (not B))).
9968
9969 Note that no checks happen on the validity of applying the inverse
9970 distributive law. This is pointless since we can do it in the
9971 few places where this routine is called.
9972
9973 N is the index of the term that is decomposed (the arithmetic operation,
9974 i.e. (+ A B) in the first example above). !N is the index of the term that
9975 is distributed, i.e. of C in the first example above. */
9976 static rtx
distribute_and_simplify_rtx(rtx x,int n)9977 distribute_and_simplify_rtx (rtx x, int n)
9978 {
9979 machine_mode mode;
9980 enum rtx_code outer_code, inner_code;
9981 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9982
9983 /* Distributivity is not true for floating point as it can change the
9984 value. So we don't do it unless -funsafe-math-optimizations. */
9985 if (FLOAT_MODE_P (GET_MODE (x))
9986 && ! flag_unsafe_math_optimizations)
9987 return NULL_RTX;
9988
9989 decomposed = XEXP (x, n);
9990 if (!ARITHMETIC_P (decomposed))
9991 return NULL_RTX;
9992
9993 mode = GET_MODE (x);
9994 outer_code = GET_CODE (x);
9995 distributed = XEXP (x, !n);
9996
9997 inner_code = GET_CODE (decomposed);
9998 inner_op0 = XEXP (decomposed, 0);
9999 inner_op1 = XEXP (decomposed, 1);
10000
10001 /* Special case (and (xor B C) (not A)), which is equivalent to
10002 (xor (ior A B) (ior A C)) */
10003 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10004 {
10005 distributed = XEXP (distributed, 0);
10006 outer_code = IOR;
10007 }
10008
10009 if (n == 0)
10010 {
10011 /* Distribute the second term. */
10012 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10013 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10014 }
10015 else
10016 {
10017 /* Distribute the first term. */
10018 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10019 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10020 }
10021
10022 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10023 new_op0, new_op1));
10024 if (GET_CODE (tmp) != outer_code
10025 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10026 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10027 return tmp;
10028
10029 return NULL_RTX;
10030 }
10031
10032 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10033 in MODE. Return an equivalent form, if different from (and VAROP
10034 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10035
10036 static rtx
simplify_and_const_int_1(scalar_int_mode mode,rtx varop,unsigned HOST_WIDE_INT constop)10037 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10038 unsigned HOST_WIDE_INT constop)
10039 {
10040 unsigned HOST_WIDE_INT nonzero;
10041 unsigned HOST_WIDE_INT orig_constop;
10042 rtx orig_varop;
10043 int i;
10044
10045 orig_varop = varop;
10046 orig_constop = constop;
10047 if (GET_CODE (varop) == CLOBBER)
10048 return NULL_RTX;
10049
10050 /* Simplify VAROP knowing that we will be only looking at some of the
10051 bits in it.
10052
10053 Note by passing in CONSTOP, we guarantee that the bits not set in
10054 CONSTOP are not significant and will never be examined. We must
10055 ensure that is the case by explicitly masking out those bits
10056 before returning. */
10057 varop = force_to_mode (varop, mode, constop, 0);
10058
10059 /* If VAROP is a CLOBBER, we will fail so return it. */
10060 if (GET_CODE (varop) == CLOBBER)
10061 return varop;
10062
10063 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10064 to VAROP and return the new constant. */
10065 if (CONST_INT_P (varop))
10066 return gen_int_mode (INTVAL (varop) & constop, mode);
10067
10068 /* See what bits may be nonzero in VAROP. Unlike the general case of
10069 a call to nonzero_bits, here we don't care about bits outside
10070 MODE. */
10071
10072 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10073
10074 /* Turn off all bits in the constant that are known to already be zero.
10075 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10076 which is tested below. */
10077
10078 constop &= nonzero;
10079
10080 /* If we don't have any bits left, return zero. */
10081 if (constop == 0 && !side_effects_p (varop))
10082 return const0_rtx;
10083
10084 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10085 a power of two, we can replace this with an ASHIFT. */
10086 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10087 && (i = exact_log2 (constop)) >= 0)
10088 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10089
10090 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10091 or XOR, then try to apply the distributive law. This may eliminate
10092 operations if either branch can be simplified because of the AND.
10093 It may also make some cases more complex, but those cases probably
10094 won't match a pattern either with or without this. */
10095
10096 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10097 {
10098 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10099 return
10100 gen_lowpart
10101 (mode,
10102 apply_distributive_law
10103 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10104 simplify_and_const_int (NULL_RTX, varop_mode,
10105 XEXP (varop, 0),
10106 constop),
10107 simplify_and_const_int (NULL_RTX, varop_mode,
10108 XEXP (varop, 1),
10109 constop))));
10110 }
10111
10112 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10113 the AND and see if one of the operands simplifies to zero. If so, we
10114 may eliminate it. */
10115
10116 if (GET_CODE (varop) == PLUS
10117 && pow2p_hwi (constop + 1))
10118 {
10119 rtx o0, o1;
10120
10121 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10122 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10123 if (o0 == const0_rtx)
10124 return o1;
10125 if (o1 == const0_rtx)
10126 return o0;
10127 }
10128
10129 /* Make a SUBREG if necessary. If we can't make it, fail. */
10130 varop = gen_lowpart (mode, varop);
10131 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10132 return NULL_RTX;
10133
10134 /* If we are only masking insignificant bits, return VAROP. */
10135 if (constop == nonzero)
10136 return varop;
10137
10138 if (varop == orig_varop && constop == orig_constop)
10139 return NULL_RTX;
10140
10141 /* Otherwise, return an AND. */
10142 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10143 }
10144
10145
10146 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10147 in MODE.
10148
10149 Return an equivalent form, if different from X. Otherwise, return X. If
10150 X is zero, we are to always construct the equivalent form. */
10151
10152 static rtx
simplify_and_const_int(rtx x,scalar_int_mode mode,rtx varop,unsigned HOST_WIDE_INT constop)10153 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10154 unsigned HOST_WIDE_INT constop)
10155 {
10156 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10157 if (tem)
10158 return tem;
10159
10160 if (!x)
10161 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10162 gen_int_mode (constop, mode));
10163 if (GET_MODE (x) != mode)
10164 x = gen_lowpart (mode, x);
10165 return x;
10166 }
10167
10168 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10169 We don't care about bits outside of those defined in MODE.
10170
10171 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10172 a shift, AND, or zero_extract, we can do better. */
10173
10174 static rtx
reg_nonzero_bits_for_combine(const_rtx x,scalar_int_mode xmode,scalar_int_mode mode,unsigned HOST_WIDE_INT * nonzero)10175 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10176 scalar_int_mode mode,
10177 unsigned HOST_WIDE_INT *nonzero)
10178 {
10179 rtx tem;
10180 reg_stat_type *rsp;
10181
10182 /* If X is a register whose nonzero bits value is current, use it.
10183 Otherwise, if X is a register whose value we can find, use that
10184 value. Otherwise, use the previously-computed global nonzero bits
10185 for this register. */
10186
10187 rsp = ®_stat[REGNO (x)];
10188 if (rsp->last_set_value != 0
10189 && (rsp->last_set_mode == mode
10190 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10191 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10192 && GET_MODE_CLASS (mode) == MODE_INT))
10193 && ((rsp->last_set_label >= label_tick_ebb_start
10194 && rsp->last_set_label < label_tick)
10195 || (rsp->last_set_label == label_tick
10196 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10197 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10198 && REGNO (x) < reg_n_sets_max
10199 && REG_N_SETS (REGNO (x)) == 1
10200 && !REGNO_REG_SET_P
10201 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10202 REGNO (x)))))
10203 {
10204 /* Note that, even if the precision of last_set_mode is lower than that
10205 of mode, record_value_for_reg invoked nonzero_bits on the register
10206 with nonzero_bits_mode (because last_set_mode is necessarily integral
10207 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10208 are all valid, hence in mode too since nonzero_bits_mode is defined
10209 to the largest HWI_COMPUTABLE_MODE_P mode. */
10210 *nonzero &= rsp->last_set_nonzero_bits;
10211 return NULL;
10212 }
10213
10214 tem = get_last_value (x);
10215 if (tem)
10216 {
10217 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10218 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10219
10220 return tem;
10221 }
10222
10223 if (nonzero_sign_valid && rsp->nonzero_bits)
10224 {
10225 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10226
10227 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10228 /* We don't know anything about the upper bits. */
10229 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10230
10231 *nonzero &= mask;
10232 }
10233
10234 return NULL;
10235 }
10236
10237 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10238 end of X that are known to be equal to the sign bit. X will be used
10239 in mode MODE; the returned value will always be between 1 and the
10240 number of bits in MODE. */
10241
10242 static rtx
reg_num_sign_bit_copies_for_combine(const_rtx x,scalar_int_mode xmode,scalar_int_mode mode,unsigned int * result)10243 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10244 scalar_int_mode mode,
10245 unsigned int *result)
10246 {
10247 rtx tem;
10248 reg_stat_type *rsp;
10249
10250 rsp = ®_stat[REGNO (x)];
10251 if (rsp->last_set_value != 0
10252 && rsp->last_set_mode == mode
10253 && ((rsp->last_set_label >= label_tick_ebb_start
10254 && rsp->last_set_label < label_tick)
10255 || (rsp->last_set_label == label_tick
10256 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10257 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10258 && REGNO (x) < reg_n_sets_max
10259 && REG_N_SETS (REGNO (x)) == 1
10260 && !REGNO_REG_SET_P
10261 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10262 REGNO (x)))))
10263 {
10264 *result = rsp->last_set_sign_bit_copies;
10265 return NULL;
10266 }
10267
10268 tem = get_last_value (x);
10269 if (tem != 0)
10270 return tem;
10271
10272 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10273 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10274 *result = rsp->sign_bit_copies;
10275
10276 return NULL;
10277 }
10278
10279 /* Return the number of "extended" bits there are in X, when interpreted
10280 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10281 unsigned quantities, this is the number of high-order zero bits.
10282 For signed quantities, this is the number of copies of the sign bit
10283 minus 1. In both case, this function returns the number of "spare"
10284 bits. For example, if two quantities for which this function returns
10285 at least 1 are added, the addition is known not to overflow.
10286
10287 This function will always return 0 unless called during combine, which
10288 implies that it must be called from a define_split. */
10289
10290 unsigned int
extended_count(const_rtx x,machine_mode mode,int unsignedp)10291 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10292 {
10293 if (nonzero_sign_valid == 0)
10294 return 0;
10295
10296 scalar_int_mode int_mode;
10297 return (unsignedp
10298 ? (is_a <scalar_int_mode> (mode, &int_mode)
10299 && HWI_COMPUTABLE_MODE_P (int_mode)
10300 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10301 - floor_log2 (nonzero_bits (x, int_mode)))
10302 : 0)
10303 : num_sign_bit_copies (x, mode) - 1);
10304 }
10305
10306 /* This function is called from `simplify_shift_const' to merge two
10307 outer operations. Specifically, we have already found that we need
10308 to perform operation *POP0 with constant *PCONST0 at the outermost
10309 position. We would now like to also perform OP1 with constant CONST1
10310 (with *POP0 being done last).
10311
10312 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10313 the resulting operation. *PCOMP_P is set to 1 if we would need to
10314 complement the innermost operand, otherwise it is unchanged.
10315
10316 MODE is the mode in which the operation will be done. No bits outside
10317 the width of this mode matter. It is assumed that the width of this mode
10318 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10319
10320 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10321 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10322 result is simply *PCONST0.
10323
10324 If the resulting operation cannot be expressed as one operation, we
10325 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10326
10327 static int
merge_outer_ops(enum rtx_code * pop0,HOST_WIDE_INT * pconst0,enum rtx_code op1,HOST_WIDE_INT const1,machine_mode mode,int * pcomp_p)10328 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10329 {
10330 enum rtx_code op0 = *pop0;
10331 HOST_WIDE_INT const0 = *pconst0;
10332
10333 const0 &= GET_MODE_MASK (mode);
10334 const1 &= GET_MODE_MASK (mode);
10335
10336 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10337 if (op0 == AND)
10338 const1 &= const0;
10339
10340 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10341 if OP0 is SET. */
10342
10343 if (op1 == UNKNOWN || op0 == SET)
10344 return 1;
10345
10346 else if (op0 == UNKNOWN)
10347 op0 = op1, const0 = const1;
10348
10349 else if (op0 == op1)
10350 {
10351 switch (op0)
10352 {
10353 case AND:
10354 const0 &= const1;
10355 break;
10356 case IOR:
10357 const0 |= const1;
10358 break;
10359 case XOR:
10360 const0 ^= const1;
10361 break;
10362 case PLUS:
10363 const0 += const1;
10364 break;
10365 case NEG:
10366 op0 = UNKNOWN;
10367 break;
10368 default:
10369 break;
10370 }
10371 }
10372
10373 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10374 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10375 return 0;
10376
10377 /* If the two constants aren't the same, we can't do anything. The
10378 remaining six cases can all be done. */
10379 else if (const0 != const1)
10380 return 0;
10381
10382 else
10383 switch (op0)
10384 {
10385 case IOR:
10386 if (op1 == AND)
10387 /* (a & b) | b == b */
10388 op0 = SET;
10389 else /* op1 == XOR */
10390 /* (a ^ b) | b == a | b */
10391 {;}
10392 break;
10393
10394 case XOR:
10395 if (op1 == AND)
10396 /* (a & b) ^ b == (~a) & b */
10397 op0 = AND, *pcomp_p = 1;
10398 else /* op1 == IOR */
10399 /* (a | b) ^ b == a & ~b */
10400 op0 = AND, const0 = ~const0;
10401 break;
10402
10403 case AND:
10404 if (op1 == IOR)
10405 /* (a | b) & b == b */
10406 op0 = SET;
10407 else /* op1 == XOR */
10408 /* (a ^ b) & b) == (~a) & b */
10409 *pcomp_p = 1;
10410 break;
10411 default:
10412 break;
10413 }
10414
10415 /* Check for NO-OP cases. */
10416 const0 &= GET_MODE_MASK (mode);
10417 if (const0 == 0
10418 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10419 op0 = UNKNOWN;
10420 else if (const0 == 0 && op0 == AND)
10421 op0 = SET;
10422 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10423 && op0 == AND)
10424 op0 = UNKNOWN;
10425
10426 *pop0 = op0;
10427
10428 /* ??? Slightly redundant with the above mask, but not entirely.
10429 Moving this above means we'd have to sign-extend the mode mask
10430 for the final test. */
10431 if (op0 != UNKNOWN && op0 != NEG)
10432 *pconst0 = trunc_int_for_mode (const0, mode);
10433
10434 return 1;
10435 }
10436
10437 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10438 the shift in. The original shift operation CODE is performed on OP in
10439 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10440 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10441 result of the shift is subject to operation OUTER_CODE with operand
10442 OUTER_CONST. */
10443
10444 static scalar_int_mode
try_widen_shift_mode(enum rtx_code code,rtx op,int count,scalar_int_mode orig_mode,scalar_int_mode mode,enum rtx_code outer_code,HOST_WIDE_INT outer_const)10445 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10446 scalar_int_mode orig_mode, scalar_int_mode mode,
10447 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10448 {
10449 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10450
10451 /* In general we can't perform in wider mode for right shift and rotate. */
10452 switch (code)
10453 {
10454 case ASHIFTRT:
10455 /* We can still widen if the bits brought in from the left are identical
10456 to the sign bit of ORIG_MODE. */
10457 if (num_sign_bit_copies (op, mode)
10458 > (unsigned) (GET_MODE_PRECISION (mode)
10459 - GET_MODE_PRECISION (orig_mode)))
10460 return mode;
10461 return orig_mode;
10462
10463 case LSHIFTRT:
10464 /* Similarly here but with zero bits. */
10465 if (HWI_COMPUTABLE_MODE_P (mode)
10466 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10467 return mode;
10468
10469 /* We can also widen if the bits brought in will be masked off. This
10470 operation is performed in ORIG_MODE. */
10471 if (outer_code == AND)
10472 {
10473 int care_bits = low_bitmask_len (orig_mode, outer_const);
10474
10475 if (care_bits >= 0
10476 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10477 return mode;
10478 }
10479 /* fall through */
10480
10481 case ROTATE:
10482 return orig_mode;
10483
10484 case ROTATERT:
10485 gcc_unreachable ();
10486
10487 default:
10488 return mode;
10489 }
10490 }
10491
10492 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10493 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10494 if we cannot simplify it. Otherwise, return a simplified value.
10495
10496 The shift is normally computed in the widest mode we find in VAROP, as
10497 long as it isn't a different number of words than RESULT_MODE. Exceptions
10498 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10499
10500 static rtx
simplify_shift_const_1(enum rtx_code code,machine_mode result_mode,rtx varop,int orig_count)10501 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10502 rtx varop, int orig_count)
10503 {
10504 enum rtx_code orig_code = code;
10505 rtx orig_varop = varop;
10506 int count, log2;
10507 machine_mode mode = result_mode;
10508 machine_mode shift_mode;
10509 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10510 /* We form (outer_op (code varop count) (outer_const)). */
10511 enum rtx_code outer_op = UNKNOWN;
10512 HOST_WIDE_INT outer_const = 0;
10513 int complement_p = 0;
10514 rtx new_rtx, x;
10515
10516 /* Make sure and truncate the "natural" shift on the way in. We don't
10517 want to do this inside the loop as it makes it more difficult to
10518 combine shifts. */
10519 if (SHIFT_COUNT_TRUNCATED)
10520 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10521
10522 /* If we were given an invalid count, don't do anything except exactly
10523 what was requested. */
10524
10525 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10526 return NULL_RTX;
10527
10528 count = orig_count;
10529
10530 /* Unless one of the branches of the `if' in this loop does a `continue',
10531 we will `break' the loop after the `if'. */
10532
10533 while (count != 0)
10534 {
10535 /* If we have an operand of (clobber (const_int 0)), fail. */
10536 if (GET_CODE (varop) == CLOBBER)
10537 return NULL_RTX;
10538
10539 /* Convert ROTATERT to ROTATE. */
10540 if (code == ROTATERT)
10541 {
10542 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10543 code = ROTATE;
10544 count = bitsize - count;
10545 }
10546
10547 shift_mode = result_mode;
10548 if (shift_mode != mode)
10549 {
10550 /* We only change the modes of scalar shifts. */
10551 int_mode = as_a <scalar_int_mode> (mode);
10552 int_result_mode = as_a <scalar_int_mode> (result_mode);
10553 shift_mode = try_widen_shift_mode (code, varop, count,
10554 int_result_mode, int_mode,
10555 outer_op, outer_const);
10556 }
10557
10558 scalar_int_mode shift_unit_mode
10559 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10560
10561 /* Handle cases where the count is greater than the size of the mode
10562 minus 1. For ASHIFT, use the size minus one as the count (this can
10563 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10564 take the count modulo the size. For other shifts, the result is
10565 zero.
10566
10567 Since these shifts are being produced by the compiler by combining
10568 multiple operations, each of which are defined, we know what the
10569 result is supposed to be. */
10570
10571 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10572 {
10573 if (code == ASHIFTRT)
10574 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10575 else if (code == ROTATE || code == ROTATERT)
10576 count %= GET_MODE_PRECISION (shift_unit_mode);
10577 else
10578 {
10579 /* We can't simply return zero because there may be an
10580 outer op. */
10581 varop = const0_rtx;
10582 count = 0;
10583 break;
10584 }
10585 }
10586
10587 /* If we discovered we had to complement VAROP, leave. Making a NOT
10588 here would cause an infinite loop. */
10589 if (complement_p)
10590 break;
10591
10592 if (shift_mode == shift_unit_mode)
10593 {
10594 /* An arithmetic right shift of a quantity known to be -1 or 0
10595 is a no-op. */
10596 if (code == ASHIFTRT
10597 && (num_sign_bit_copies (varop, shift_unit_mode)
10598 == GET_MODE_PRECISION (shift_unit_mode)))
10599 {
10600 count = 0;
10601 break;
10602 }
10603
10604 /* If we are doing an arithmetic right shift and discarding all but
10605 the sign bit copies, this is equivalent to doing a shift by the
10606 bitsize minus one. Convert it into that shift because it will
10607 often allow other simplifications. */
10608
10609 if (code == ASHIFTRT
10610 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10611 >= GET_MODE_PRECISION (shift_unit_mode)))
10612 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10613
10614 /* We simplify the tests below and elsewhere by converting
10615 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10616 `make_compound_operation' will convert it to an ASHIFTRT for
10617 those machines (such as VAX) that don't have an LSHIFTRT. */
10618 if (code == ASHIFTRT
10619 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10620 && val_signbit_known_clear_p (shift_unit_mode,
10621 nonzero_bits (varop,
10622 shift_unit_mode)))
10623 code = LSHIFTRT;
10624
10625 if (((code == LSHIFTRT
10626 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10627 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10628 || (code == ASHIFT
10629 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10630 && !((nonzero_bits (varop, shift_unit_mode) << count)
10631 & GET_MODE_MASK (shift_unit_mode))))
10632 && !side_effects_p (varop))
10633 varop = const0_rtx;
10634 }
10635
10636 switch (GET_CODE (varop))
10637 {
10638 case SIGN_EXTEND:
10639 case ZERO_EXTEND:
10640 case SIGN_EXTRACT:
10641 case ZERO_EXTRACT:
10642 new_rtx = expand_compound_operation (varop);
10643 if (new_rtx != varop)
10644 {
10645 varop = new_rtx;
10646 continue;
10647 }
10648 break;
10649
10650 case MEM:
10651 /* The following rules apply only to scalars. */
10652 if (shift_mode != shift_unit_mode)
10653 break;
10654 int_mode = as_a <scalar_int_mode> (mode);
10655
10656 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10657 minus the width of a smaller mode, we can do this with a
10658 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10659 if ((code == ASHIFTRT || code == LSHIFTRT)
10660 && ! mode_dependent_address_p (XEXP (varop, 0),
10661 MEM_ADDR_SPACE (varop))
10662 && ! MEM_VOLATILE_P (varop)
10663 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10664 .exists (&tmode)))
10665 {
10666 new_rtx = adjust_address_nv (varop, tmode,
10667 BYTES_BIG_ENDIAN ? 0
10668 : count / BITS_PER_UNIT);
10669
10670 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10671 : ZERO_EXTEND, int_mode, new_rtx);
10672 count = 0;
10673 continue;
10674 }
10675 break;
10676
10677 case SUBREG:
10678 /* The following rules apply only to scalars. */
10679 if (shift_mode != shift_unit_mode)
10680 break;
10681 int_mode = as_a <scalar_int_mode> (mode);
10682 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10683
10684 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10685 the same number of words as what we've seen so far. Then store
10686 the widest mode in MODE. */
10687 if (subreg_lowpart_p (varop)
10688 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10689 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10690 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10691 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10692 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10693 {
10694 varop = SUBREG_REG (varop);
10695 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10696 mode = inner_mode;
10697 continue;
10698 }
10699 break;
10700
10701 case MULT:
10702 /* Some machines use MULT instead of ASHIFT because MULT
10703 is cheaper. But it is still better on those machines to
10704 merge two shifts into one. */
10705 if (CONST_INT_P (XEXP (varop, 1))
10706 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10707 {
10708 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10709 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10710 XEXP (varop, 0), log2_rtx);
10711 continue;
10712 }
10713 break;
10714
10715 case UDIV:
10716 /* Similar, for when divides are cheaper. */
10717 if (CONST_INT_P (XEXP (varop, 1))
10718 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10719 {
10720 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10721 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10722 XEXP (varop, 0), log2_rtx);
10723 continue;
10724 }
10725 break;
10726
10727 case ASHIFTRT:
10728 /* If we are extracting just the sign bit of an arithmetic
10729 right shift, that shift is not needed. However, the sign
10730 bit of a wider mode may be different from what would be
10731 interpreted as the sign bit in a narrower mode, so, if
10732 the result is narrower, don't discard the shift. */
10733 if (code == LSHIFTRT
10734 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10735 && (GET_MODE_UNIT_BITSIZE (result_mode)
10736 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10737 {
10738 varop = XEXP (varop, 0);
10739 continue;
10740 }
10741
10742 /* fall through */
10743
10744 case LSHIFTRT:
10745 case ASHIFT:
10746 case ROTATE:
10747 /* The following rules apply only to scalars. */
10748 if (shift_mode != shift_unit_mode)
10749 break;
10750 int_mode = as_a <scalar_int_mode> (mode);
10751 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10752 int_result_mode = as_a <scalar_int_mode> (result_mode);
10753
10754 /* Here we have two nested shifts. The result is usually the
10755 AND of a new shift with a mask. We compute the result below. */
10756 if (CONST_INT_P (XEXP (varop, 1))
10757 && INTVAL (XEXP (varop, 1)) >= 0
10758 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10759 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10760 && HWI_COMPUTABLE_MODE_P (int_mode))
10761 {
10762 enum rtx_code first_code = GET_CODE (varop);
10763 unsigned int first_count = INTVAL (XEXP (varop, 1));
10764 unsigned HOST_WIDE_INT mask;
10765 rtx mask_rtx;
10766
10767 /* We have one common special case. We can't do any merging if
10768 the inner code is an ASHIFTRT of a smaller mode. However, if
10769 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10770 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10771 we can convert it to
10772 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10773 This simplifies certain SIGN_EXTEND operations. */
10774 if (code == ASHIFT && first_code == ASHIFTRT
10775 && count == (GET_MODE_PRECISION (int_result_mode)
10776 - GET_MODE_PRECISION (int_varop_mode)))
10777 {
10778 /* C3 has the low-order C1 bits zero. */
10779
10780 mask = GET_MODE_MASK (int_mode)
10781 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10782
10783 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10784 XEXP (varop, 0), mask);
10785 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10786 int_result_mode, varop, count);
10787 count = first_count;
10788 code = ASHIFTRT;
10789 continue;
10790 }
10791
10792 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10793 than C1 high-order bits equal to the sign bit, we can convert
10794 this to either an ASHIFT or an ASHIFTRT depending on the
10795 two counts.
10796
10797 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10798
10799 if (code == ASHIFTRT && first_code == ASHIFT
10800 && int_varop_mode == shift_unit_mode
10801 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10802 > first_count))
10803 {
10804 varop = XEXP (varop, 0);
10805 count -= first_count;
10806 if (count < 0)
10807 {
10808 count = -count;
10809 code = ASHIFT;
10810 }
10811
10812 continue;
10813 }
10814
10815 /* There are some cases we can't do. If CODE is ASHIFTRT,
10816 we can only do this if FIRST_CODE is also ASHIFTRT.
10817
10818 We can't do the case when CODE is ROTATE and FIRST_CODE is
10819 ASHIFTRT.
10820
10821 If the mode of this shift is not the mode of the outer shift,
10822 we can't do this if either shift is a right shift or ROTATE.
10823
10824 Finally, we can't do any of these if the mode is too wide
10825 unless the codes are the same.
10826
10827 Handle the case where the shift codes are the same
10828 first. */
10829
10830 if (code == first_code)
10831 {
10832 if (int_varop_mode != int_result_mode
10833 && (code == ASHIFTRT || code == LSHIFTRT
10834 || code == ROTATE))
10835 break;
10836
10837 count += first_count;
10838 varop = XEXP (varop, 0);
10839 continue;
10840 }
10841
10842 if (code == ASHIFTRT
10843 || (code == ROTATE && first_code == ASHIFTRT)
10844 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10845 || (int_varop_mode != int_result_mode
10846 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10847 || first_code == ROTATE
10848 || code == ROTATE)))
10849 break;
10850
10851 /* To compute the mask to apply after the shift, shift the
10852 nonzero bits of the inner shift the same way the
10853 outer shift will. */
10854
10855 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10856 int_result_mode);
10857 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10858 mask_rtx
10859 = simplify_const_binary_operation (code, int_result_mode,
10860 mask_rtx, count_rtx);
10861
10862 /* Give up if we can't compute an outer operation to use. */
10863 if (mask_rtx == 0
10864 || !CONST_INT_P (mask_rtx)
10865 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10866 INTVAL (mask_rtx),
10867 int_result_mode, &complement_p))
10868 break;
10869
10870 /* If the shifts are in the same direction, we add the
10871 counts. Otherwise, we subtract them. */
10872 if ((code == ASHIFTRT || code == LSHIFTRT)
10873 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10874 count += first_count;
10875 else
10876 count -= first_count;
10877
10878 /* If COUNT is positive, the new shift is usually CODE,
10879 except for the two exceptions below, in which case it is
10880 FIRST_CODE. If the count is negative, FIRST_CODE should
10881 always be used */
10882 if (count > 0
10883 && ((first_code == ROTATE && code == ASHIFT)
10884 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10885 code = first_code;
10886 else if (count < 0)
10887 code = first_code, count = -count;
10888
10889 varop = XEXP (varop, 0);
10890 continue;
10891 }
10892
10893 /* If we have (A << B << C) for any shift, we can convert this to
10894 (A << C << B). This wins if A is a constant. Only try this if
10895 B is not a constant. */
10896
10897 else if (GET_CODE (varop) == code
10898 && CONST_INT_P (XEXP (varop, 0))
10899 && !CONST_INT_P (XEXP (varop, 1)))
10900 {
10901 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10902 sure the result will be masked. See PR70222. */
10903 if (code == LSHIFTRT
10904 && int_mode != int_result_mode
10905 && !merge_outer_ops (&outer_op, &outer_const, AND,
10906 GET_MODE_MASK (int_result_mode)
10907 >> orig_count, int_result_mode,
10908 &complement_p))
10909 break;
10910 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10911 up outer sign extension (often left and right shift) is
10912 hardly more efficient than the original. See PR70429.
10913 Similarly punt for rotates with different modes.
10914 See PR97386. */
10915 if ((code == ASHIFTRT || code == ROTATE)
10916 && int_mode != int_result_mode)
10917 break;
10918
10919 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10920 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10921 XEXP (varop, 0),
10922 count_rtx);
10923 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10924 count = 0;
10925 continue;
10926 }
10927 break;
10928
10929 case NOT:
10930 /* The following rules apply only to scalars. */
10931 if (shift_mode != shift_unit_mode)
10932 break;
10933
10934 /* Make this fit the case below. */
10935 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10936 continue;
10937
10938 case IOR:
10939 case AND:
10940 case XOR:
10941 /* The following rules apply only to scalars. */
10942 if (shift_mode != shift_unit_mode)
10943 break;
10944 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10945 int_result_mode = as_a <scalar_int_mode> (result_mode);
10946
10947 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10948 with C the size of VAROP - 1 and the shift is logical if
10949 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10950 we have an (le X 0) operation. If we have an arithmetic shift
10951 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10952 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10953
10954 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10955 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10956 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10957 && (code == LSHIFTRT || code == ASHIFTRT)
10958 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10959 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10960 {
10961 count = 0;
10962 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10963 const0_rtx);
10964
10965 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10966 varop = gen_rtx_NEG (int_varop_mode, varop);
10967
10968 continue;
10969 }
10970
10971 /* If we have (shift (logical)), move the logical to the outside
10972 to allow it to possibly combine with another logical and the
10973 shift to combine with another shift. This also canonicalizes to
10974 what a ZERO_EXTRACT looks like. Also, some machines have
10975 (and (shift)) insns. */
10976
10977 if (CONST_INT_P (XEXP (varop, 1))
10978 /* We can't do this if we have (ashiftrt (xor)) and the
10979 constant has its sign bit set in shift_unit_mode with
10980 shift_unit_mode wider than result_mode. */
10981 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10982 && int_result_mode != shift_unit_mode
10983 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10984 shift_unit_mode) < 0)
10985 && (new_rtx = simplify_const_binary_operation
10986 (code, int_result_mode,
10987 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10988 gen_int_shift_amount (int_result_mode, count))) != 0
10989 && CONST_INT_P (new_rtx)
10990 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10991 INTVAL (new_rtx), int_result_mode,
10992 &complement_p))
10993 {
10994 varop = XEXP (varop, 0);
10995 continue;
10996 }
10997
10998 /* If we can't do that, try to simplify the shift in each arm of the
10999 logical expression, make a new logical expression, and apply
11000 the inverse distributive law. This also can't be done for
11001 (ashiftrt (xor)) where we've widened the shift and the constant
11002 changes the sign bit. */
11003 if (CONST_INT_P (XEXP (varop, 1))
11004 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11005 && int_result_mode != shift_unit_mode
11006 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11007 shift_unit_mode) < 0))
11008 {
11009 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11010 XEXP (varop, 0), count);
11011 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11012 XEXP (varop, 1), count);
11013
11014 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11015 lhs, rhs);
11016 varop = apply_distributive_law (varop);
11017
11018 count = 0;
11019 continue;
11020 }
11021 break;
11022
11023 case EQ:
11024 /* The following rules apply only to scalars. */
11025 if (shift_mode != shift_unit_mode)
11026 break;
11027 int_result_mode = as_a <scalar_int_mode> (result_mode);
11028
11029 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11030 says that the sign bit can be tested, FOO has mode MODE, C is
11031 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11032 that may be nonzero. */
11033 if (code == LSHIFTRT
11034 && XEXP (varop, 1) == const0_rtx
11035 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11036 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11037 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11038 && STORE_FLAG_VALUE == -1
11039 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11040 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11041 int_result_mode, &complement_p))
11042 {
11043 varop = XEXP (varop, 0);
11044 count = 0;
11045 continue;
11046 }
11047 break;
11048
11049 case NEG:
11050 /* The following rules apply only to scalars. */
11051 if (shift_mode != shift_unit_mode)
11052 break;
11053 int_result_mode = as_a <scalar_int_mode> (result_mode);
11054
11055 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11056 than the number of bits in the mode is equivalent to A. */
11057 if (code == LSHIFTRT
11058 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11059 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11060 {
11061 varop = XEXP (varop, 0);
11062 count = 0;
11063 continue;
11064 }
11065
11066 /* NEG commutes with ASHIFT since it is multiplication. Move the
11067 NEG outside to allow shifts to combine. */
11068 if (code == ASHIFT
11069 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11070 int_result_mode, &complement_p))
11071 {
11072 varop = XEXP (varop, 0);
11073 continue;
11074 }
11075 break;
11076
11077 case PLUS:
11078 /* The following rules apply only to scalars. */
11079 if (shift_mode != shift_unit_mode)
11080 break;
11081 int_result_mode = as_a <scalar_int_mode> (result_mode);
11082
11083 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11084 is one less than the number of bits in the mode is
11085 equivalent to (xor A 1). */
11086 if (code == LSHIFTRT
11087 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11088 && XEXP (varop, 1) == constm1_rtx
11089 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11090 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11091 int_result_mode, &complement_p))
11092 {
11093 count = 0;
11094 varop = XEXP (varop, 0);
11095 continue;
11096 }
11097
11098 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11099 that might be nonzero in BAR are those being shifted out and those
11100 bits are known zero in FOO, we can replace the PLUS with FOO.
11101 Similarly in the other operand order. This code occurs when
11102 we are computing the size of a variable-size array. */
11103
11104 if ((code == ASHIFTRT || code == LSHIFTRT)
11105 && count < HOST_BITS_PER_WIDE_INT
11106 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11107 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11108 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11109 {
11110 varop = XEXP (varop, 0);
11111 continue;
11112 }
11113 else if ((code == ASHIFTRT || code == LSHIFTRT)
11114 && count < HOST_BITS_PER_WIDE_INT
11115 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11116 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11117 >> count) == 0
11118 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11119 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11120 {
11121 varop = XEXP (varop, 1);
11122 continue;
11123 }
11124
11125 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11126 if (code == ASHIFT
11127 && CONST_INT_P (XEXP (varop, 1))
11128 && (new_rtx = simplify_const_binary_operation
11129 (ASHIFT, int_result_mode,
11130 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11131 gen_int_shift_amount (int_result_mode, count))) != 0
11132 && CONST_INT_P (new_rtx)
11133 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11134 INTVAL (new_rtx), int_result_mode,
11135 &complement_p))
11136 {
11137 varop = XEXP (varop, 0);
11138 continue;
11139 }
11140
11141 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11142 signbit', and attempt to change the PLUS to an XOR and move it to
11143 the outer operation as is done above in the AND/IOR/XOR case
11144 leg for shift(logical). See details in logical handling above
11145 for reasoning in doing so. */
11146 if (code == LSHIFTRT
11147 && CONST_INT_P (XEXP (varop, 1))
11148 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11149 && (new_rtx = simplify_const_binary_operation
11150 (code, int_result_mode,
11151 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11152 gen_int_shift_amount (int_result_mode, count))) != 0
11153 && CONST_INT_P (new_rtx)
11154 && merge_outer_ops (&outer_op, &outer_const, XOR,
11155 INTVAL (new_rtx), int_result_mode,
11156 &complement_p))
11157 {
11158 varop = XEXP (varop, 0);
11159 continue;
11160 }
11161
11162 break;
11163
11164 case MINUS:
11165 /* The following rules apply only to scalars. */
11166 if (shift_mode != shift_unit_mode)
11167 break;
11168 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11169
11170 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11171 with C the size of VAROP - 1 and the shift is logical if
11172 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11173 we have a (gt X 0) operation. If the shift is arithmetic with
11174 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11175 we have a (neg (gt X 0)) operation. */
11176
11177 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11178 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11179 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11180 && (code == LSHIFTRT || code == ASHIFTRT)
11181 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11182 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11183 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11184 {
11185 count = 0;
11186 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11187 const0_rtx);
11188
11189 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11190 varop = gen_rtx_NEG (int_varop_mode, varop);
11191
11192 continue;
11193 }
11194 break;
11195
11196 case TRUNCATE:
11197 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11198 if the truncate does not affect the value. */
11199 if (code == LSHIFTRT
11200 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11201 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11202 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11203 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11204 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11205 {
11206 rtx varop_inner = XEXP (varop, 0);
11207 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11208 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11209 new_count);
11210 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11211 XEXP (varop_inner, 0),
11212 new_count_rtx);
11213 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11214 count = 0;
11215 continue;
11216 }
11217 break;
11218
11219 default:
11220 break;
11221 }
11222
11223 break;
11224 }
11225
11226 shift_mode = result_mode;
11227 if (shift_mode != mode)
11228 {
11229 /* We only change the modes of scalar shifts. */
11230 int_mode = as_a <scalar_int_mode> (mode);
11231 int_result_mode = as_a <scalar_int_mode> (result_mode);
11232 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11233 int_mode, outer_op, outer_const);
11234 }
11235
11236 /* We have now finished analyzing the shift. The result should be
11237 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11238 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11239 to the result of the shift. OUTER_CONST is the relevant constant,
11240 but we must turn off all bits turned off in the shift. */
11241
11242 if (outer_op == UNKNOWN
11243 && orig_code == code && orig_count == count
11244 && varop == orig_varop
11245 && shift_mode == GET_MODE (varop))
11246 return NULL_RTX;
11247
11248 /* Make a SUBREG if necessary. If we can't make it, fail. */
11249 varop = gen_lowpart (shift_mode, varop);
11250 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11251 return NULL_RTX;
11252
11253 /* If we have an outer operation and we just made a shift, it is
11254 possible that we could have simplified the shift were it not
11255 for the outer operation. So try to do the simplification
11256 recursively. */
11257
11258 if (outer_op != UNKNOWN)
11259 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11260 else
11261 x = NULL_RTX;
11262
11263 if (x == NULL_RTX)
11264 x = simplify_gen_binary (code, shift_mode, varop,
11265 gen_int_shift_amount (shift_mode, count));
11266
11267 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11268 turn off all the bits that the shift would have turned off. */
11269 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11270 /* We only change the modes of scalar shifts. */
11271 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11272 x, GET_MODE_MASK (result_mode) >> orig_count);
11273
11274 /* Do the remainder of the processing in RESULT_MODE. */
11275 x = gen_lowpart_or_truncate (result_mode, x);
11276
11277 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11278 operation. */
11279 if (complement_p)
11280 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11281
11282 if (outer_op != UNKNOWN)
11283 {
11284 int_result_mode = as_a <scalar_int_mode> (result_mode);
11285
11286 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11287 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11288 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11289
11290 if (outer_op == AND)
11291 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11292 else if (outer_op == SET)
11293 {
11294 /* This means that we have determined that the result is
11295 equivalent to a constant. This should be rare. */
11296 if (!side_effects_p (x))
11297 x = GEN_INT (outer_const);
11298 }
11299 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11300 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11301 else
11302 x = simplify_gen_binary (outer_op, int_result_mode, x,
11303 GEN_INT (outer_const));
11304 }
11305
11306 return x;
11307 }
11308
11309 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11310 The result of the shift is RESULT_MODE. If we cannot simplify it,
11311 return X or, if it is NULL, synthesize the expression with
11312 simplify_gen_binary. Otherwise, return a simplified value.
11313
11314 The shift is normally computed in the widest mode we find in VAROP, as
11315 long as it isn't a different number of words than RESULT_MODE. Exceptions
11316 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11317
11318 static rtx
simplify_shift_const(rtx x,enum rtx_code code,machine_mode result_mode,rtx varop,int count)11319 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11320 rtx varop, int count)
11321 {
11322 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11323 if (tem)
11324 return tem;
11325
11326 if (!x)
11327 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11328 gen_int_shift_amount (GET_MODE (varop), count));
11329 if (GET_MODE (x) != result_mode)
11330 x = gen_lowpart (result_mode, x);
11331 return x;
11332 }
11333
11334
11335 /* A subroutine of recog_for_combine. See there for arguments and
11336 return value. */
11337
11338 static int
recog_for_combine_1(rtx * pnewpat,rtx_insn * insn,rtx * pnotes)11339 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11340 {
11341 rtx pat = *pnewpat;
11342 rtx pat_without_clobbers;
11343 int insn_code_number;
11344 int num_clobbers_to_add = 0;
11345 int i;
11346 rtx notes = NULL_RTX;
11347 rtx old_notes, old_pat;
11348 int old_icode;
11349
11350 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11351 we use to indicate that something didn't match. If we find such a
11352 thing, force rejection. */
11353 if (GET_CODE (pat) == PARALLEL)
11354 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11355 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11356 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11357 return -1;
11358
11359 old_pat = PATTERN (insn);
11360 old_notes = REG_NOTES (insn);
11361 PATTERN (insn) = pat;
11362 REG_NOTES (insn) = NULL_RTX;
11363
11364 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11365 if (dump_file && (dump_flags & TDF_DETAILS))
11366 {
11367 if (insn_code_number < 0)
11368 fputs ("Failed to match this instruction:\n", dump_file);
11369 else
11370 fputs ("Successfully matched this instruction:\n", dump_file);
11371 print_rtl_single (dump_file, pat);
11372 }
11373
11374 /* If it isn't, there is the possibility that we previously had an insn
11375 that clobbered some register as a side effect, but the combined
11376 insn doesn't need to do that. So try once more without the clobbers
11377 unless this represents an ASM insn. */
11378
11379 if (insn_code_number < 0 && ! check_asm_operands (pat)
11380 && GET_CODE (pat) == PARALLEL)
11381 {
11382 int pos;
11383
11384 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11385 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11386 {
11387 if (i != pos)
11388 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11389 pos++;
11390 }
11391
11392 SUBST_INT (XVECLEN (pat, 0), pos);
11393
11394 if (pos == 1)
11395 pat = XVECEXP (pat, 0, 0);
11396
11397 PATTERN (insn) = pat;
11398 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11399 if (dump_file && (dump_flags & TDF_DETAILS))
11400 {
11401 if (insn_code_number < 0)
11402 fputs ("Failed to match this instruction:\n", dump_file);
11403 else
11404 fputs ("Successfully matched this instruction:\n", dump_file);
11405 print_rtl_single (dump_file, pat);
11406 }
11407 }
11408
11409 pat_without_clobbers = pat;
11410
11411 PATTERN (insn) = old_pat;
11412 REG_NOTES (insn) = old_notes;
11413
11414 /* Recognize all noop sets, these will be killed by followup pass. */
11415 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11416 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11417
11418 /* If we had any clobbers to add, make a new pattern than contains
11419 them. Then check to make sure that all of them are dead. */
11420 if (num_clobbers_to_add)
11421 {
11422 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11423 rtvec_alloc (GET_CODE (pat) == PARALLEL
11424 ? (XVECLEN (pat, 0)
11425 + num_clobbers_to_add)
11426 : num_clobbers_to_add + 1));
11427
11428 if (GET_CODE (pat) == PARALLEL)
11429 for (i = 0; i < XVECLEN (pat, 0); i++)
11430 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11431 else
11432 XVECEXP (newpat, 0, 0) = pat;
11433
11434 add_clobbers (newpat, insn_code_number);
11435
11436 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11437 i < XVECLEN (newpat, 0); i++)
11438 {
11439 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11440 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11441 return -1;
11442 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11443 {
11444 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11445 notes = alloc_reg_note (REG_UNUSED,
11446 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11447 }
11448 }
11449 pat = newpat;
11450 }
11451
11452 if (insn_code_number >= 0
11453 && insn_code_number != NOOP_MOVE_INSN_CODE)
11454 {
11455 old_pat = PATTERN (insn);
11456 old_notes = REG_NOTES (insn);
11457 old_icode = INSN_CODE (insn);
11458 PATTERN (insn) = pat;
11459 REG_NOTES (insn) = notes;
11460 INSN_CODE (insn) = insn_code_number;
11461
11462 /* Allow targets to reject combined insn. */
11463 if (!targetm.legitimate_combined_insn (insn))
11464 {
11465 if (dump_file && (dump_flags & TDF_DETAILS))
11466 fputs ("Instruction not appropriate for target.",
11467 dump_file);
11468
11469 /* Callers expect recog_for_combine to strip
11470 clobbers from the pattern on failure. */
11471 pat = pat_without_clobbers;
11472 notes = NULL_RTX;
11473
11474 insn_code_number = -1;
11475 }
11476
11477 PATTERN (insn) = old_pat;
11478 REG_NOTES (insn) = old_notes;
11479 INSN_CODE (insn) = old_icode;
11480 }
11481
11482 *pnewpat = pat;
11483 *pnotes = notes;
11484
11485 return insn_code_number;
11486 }
11487
11488 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11489 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11490 Return whether anything was so changed. */
11491
11492 static bool
change_zero_ext(rtx pat)11493 change_zero_ext (rtx pat)
11494 {
11495 bool changed = false;
11496 rtx *src = &SET_SRC (pat);
11497
11498 subrtx_ptr_iterator::array_type array;
11499 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11500 {
11501 rtx x = **iter;
11502 scalar_int_mode mode, inner_mode;
11503 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11504 continue;
11505 int size;
11506
11507 if (GET_CODE (x) == ZERO_EXTRACT
11508 && CONST_INT_P (XEXP (x, 1))
11509 && CONST_INT_P (XEXP (x, 2))
11510 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11511 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11512 {
11513 size = INTVAL (XEXP (x, 1));
11514
11515 int start = INTVAL (XEXP (x, 2));
11516 if (BITS_BIG_ENDIAN)
11517 start = GET_MODE_PRECISION (inner_mode) - size - start;
11518
11519 if (start != 0)
11520 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11521 gen_int_shift_amount (inner_mode, start));
11522 else
11523 x = XEXP (x, 0);
11524
11525 if (mode != inner_mode)
11526 {
11527 if (REG_P (x) && HARD_REGISTER_P (x)
11528 && !can_change_dest_mode (x, 0, mode))
11529 continue;
11530
11531 x = gen_lowpart_SUBREG (mode, x);
11532 }
11533 }
11534 else if (GET_CODE (x) == ZERO_EXTEND
11535 && GET_CODE (XEXP (x, 0)) == SUBREG
11536 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11537 && !paradoxical_subreg_p (XEXP (x, 0))
11538 && subreg_lowpart_p (XEXP (x, 0)))
11539 {
11540 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11541 size = GET_MODE_PRECISION (inner_mode);
11542 x = SUBREG_REG (XEXP (x, 0));
11543 if (GET_MODE (x) != mode)
11544 {
11545 if (REG_P (x) && HARD_REGISTER_P (x)
11546 && !can_change_dest_mode (x, 0, mode))
11547 continue;
11548
11549 x = gen_lowpart_SUBREG (mode, x);
11550 }
11551 }
11552 else if (GET_CODE (x) == ZERO_EXTEND
11553 && REG_P (XEXP (x, 0))
11554 && HARD_REGISTER_P (XEXP (x, 0))
11555 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11556 {
11557 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11558 size = GET_MODE_PRECISION (inner_mode);
11559 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11560 }
11561 else
11562 continue;
11563
11564 if (!(GET_CODE (x) == LSHIFTRT
11565 && CONST_INT_P (XEXP (x, 1))
11566 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11567 {
11568 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11569 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11570 }
11571
11572 SUBST (**iter, x);
11573 changed = true;
11574 }
11575
11576 if (changed)
11577 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11578 maybe_swap_commutative_operands (**iter);
11579
11580 rtx *dst = &SET_DEST (pat);
11581 scalar_int_mode mode;
11582 if (GET_CODE (*dst) == ZERO_EXTRACT
11583 && REG_P (XEXP (*dst, 0))
11584 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11585 && CONST_INT_P (XEXP (*dst, 1))
11586 && CONST_INT_P (XEXP (*dst, 2)))
11587 {
11588 rtx reg = XEXP (*dst, 0);
11589 int width = INTVAL (XEXP (*dst, 1));
11590 int offset = INTVAL (XEXP (*dst, 2));
11591 int reg_width = GET_MODE_PRECISION (mode);
11592 if (BITS_BIG_ENDIAN)
11593 offset = reg_width - width - offset;
11594
11595 rtx x, y, z, w;
11596 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11597 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11598 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11599 if (offset)
11600 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11601 else
11602 y = SET_SRC (pat);
11603 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11604 w = gen_rtx_IOR (mode, x, z);
11605 SUBST (SET_DEST (pat), reg);
11606 SUBST (SET_SRC (pat), w);
11607
11608 changed = true;
11609 }
11610
11611 return changed;
11612 }
11613
11614 /* Like recog, but we receive the address of a pointer to a new pattern.
11615 We try to match the rtx that the pointer points to.
11616 If that fails, we may try to modify or replace the pattern,
11617 storing the replacement into the same pointer object.
11618
11619 Modifications include deletion or addition of CLOBBERs. If the
11620 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11621 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11622 (and undo if that fails).
11623
11624 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11625 the CLOBBERs are placed.
11626
11627 The value is the final insn code from the pattern ultimately matched,
11628 or -1. */
11629
11630 static int
recog_for_combine(rtx * pnewpat,rtx_insn * insn,rtx * pnotes)11631 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11632 {
11633 rtx pat = *pnewpat;
11634 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11635 if (insn_code_number >= 0 || check_asm_operands (pat))
11636 return insn_code_number;
11637
11638 void *marker = get_undo_marker ();
11639 bool changed = false;
11640
11641 if (GET_CODE (pat) == SET)
11642 changed = change_zero_ext (pat);
11643 else if (GET_CODE (pat) == PARALLEL)
11644 {
11645 int i;
11646 for (i = 0; i < XVECLEN (pat, 0); i++)
11647 {
11648 rtx set = XVECEXP (pat, 0, i);
11649 if (GET_CODE (set) == SET)
11650 changed |= change_zero_ext (set);
11651 }
11652 }
11653
11654 if (changed)
11655 {
11656 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11657
11658 if (insn_code_number < 0)
11659 undo_to_marker (marker);
11660 }
11661
11662 return insn_code_number;
11663 }
11664
11665 /* Like gen_lowpart_general but for use by combine. In combine it
11666 is not possible to create any new pseudoregs. However, it is
11667 safe to create invalid memory addresses, because combine will
11668 try to recognize them and all they will do is make the combine
11669 attempt fail.
11670
11671 If for some reason this cannot do its job, an rtx
11672 (clobber (const_int 0)) is returned.
11673 An insn containing that will not be recognized. */
11674
11675 static rtx
gen_lowpart_for_combine(machine_mode omode,rtx x)11676 gen_lowpart_for_combine (machine_mode omode, rtx x)
11677 {
11678 machine_mode imode = GET_MODE (x);
11679 rtx result;
11680
11681 if (omode == imode)
11682 return x;
11683
11684 /* We can only support MODE being wider than a word if X is a
11685 constant integer or has a mode the same size. */
11686 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11687 && ! (CONST_SCALAR_INT_P (x)
11688 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11689 goto fail;
11690
11691 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11692 won't know what to do. So we will strip off the SUBREG here and
11693 process normally. */
11694 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11695 {
11696 x = SUBREG_REG (x);
11697
11698 /* For use in case we fall down into the address adjustments
11699 further below, we need to adjust the known mode and size of
11700 x; imode and isize, since we just adjusted x. */
11701 imode = GET_MODE (x);
11702
11703 if (imode == omode)
11704 return x;
11705 }
11706
11707 result = gen_lowpart_common (omode, x);
11708
11709 if (result)
11710 return result;
11711
11712 if (MEM_P (x))
11713 {
11714 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11715 address. */
11716 if (MEM_VOLATILE_P (x)
11717 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11718 goto fail;
11719
11720 /* If we want to refer to something bigger than the original memref,
11721 generate a paradoxical subreg instead. That will force a reload
11722 of the original memref X. */
11723 if (paradoxical_subreg_p (omode, imode))
11724 return gen_rtx_SUBREG (omode, x, 0);
11725
11726 poly_int64 offset = byte_lowpart_offset (omode, imode);
11727 return adjust_address_nv (x, omode, offset);
11728 }
11729
11730 /* If X is a comparison operator, rewrite it in a new mode. This
11731 probably won't match, but may allow further simplifications. */
11732 else if (COMPARISON_P (x))
11733 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11734
11735 /* If we couldn't simplify X any other way, just enclose it in a
11736 SUBREG. Normally, this SUBREG won't match, but some patterns may
11737 include an explicit SUBREG or we may simplify it further in combine. */
11738 else
11739 {
11740 rtx res;
11741
11742 if (imode == VOIDmode)
11743 {
11744 imode = int_mode_for_mode (omode).require ();
11745 x = gen_lowpart_common (imode, x);
11746 if (x == NULL)
11747 goto fail;
11748 }
11749 res = lowpart_subreg (omode, x, imode);
11750 if (res)
11751 return res;
11752 }
11753
11754 fail:
11755 return gen_rtx_CLOBBER (omode, const0_rtx);
11756 }
11757
11758 /* Try to simplify a comparison between OP0 and a constant OP1,
11759 where CODE is the comparison code that will be tested, into a
11760 (CODE OP0 const0_rtx) form.
11761
11762 The result is a possibly different comparison code to use.
11763 *POP1 may be updated. */
11764
11765 static enum rtx_code
simplify_compare_const(enum rtx_code code,machine_mode mode,rtx op0,rtx * pop1)11766 simplify_compare_const (enum rtx_code code, machine_mode mode,
11767 rtx op0, rtx *pop1)
11768 {
11769 scalar_int_mode int_mode;
11770 HOST_WIDE_INT const_op = INTVAL (*pop1);
11771
11772 /* Get the constant we are comparing against and turn off all bits
11773 not on in our mode. */
11774 if (mode != VOIDmode)
11775 const_op = trunc_int_for_mode (const_op, mode);
11776
11777 /* If we are comparing against a constant power of two and the value
11778 being compared can only have that single bit nonzero (e.g., it was
11779 `and'ed with that bit), we can replace this with a comparison
11780 with zero. */
11781 if (const_op
11782 && (code == EQ || code == NE || code == GE || code == GEU
11783 || code == LT || code == LTU)
11784 && is_a <scalar_int_mode> (mode, &int_mode)
11785 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11786 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11787 && (nonzero_bits (op0, int_mode)
11788 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11789 {
11790 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11791 const_op = 0;
11792 }
11793
11794 /* Similarly, if we are comparing a value known to be either -1 or
11795 0 with -1, change it to the opposite comparison against zero. */
11796 if (const_op == -1
11797 && (code == EQ || code == NE || code == GT || code == LE
11798 || code == GEU || code == LTU)
11799 && is_a <scalar_int_mode> (mode, &int_mode)
11800 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11801 {
11802 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11803 const_op = 0;
11804 }
11805
11806 /* Do some canonicalizations based on the comparison code. We prefer
11807 comparisons against zero and then prefer equality comparisons.
11808 If we can reduce the size of a constant, we will do that too. */
11809 switch (code)
11810 {
11811 case LT:
11812 /* < C is equivalent to <= (C - 1) */
11813 if (const_op > 0)
11814 {
11815 const_op -= 1;
11816 code = LE;
11817 /* ... fall through to LE case below. */
11818 gcc_fallthrough ();
11819 }
11820 else
11821 break;
11822
11823 case LE:
11824 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11825 if (const_op < 0)
11826 {
11827 const_op += 1;
11828 code = LT;
11829 }
11830
11831 /* If we are doing a <= 0 comparison on a value known to have
11832 a zero sign bit, we can replace this with == 0. */
11833 else if (const_op == 0
11834 && is_a <scalar_int_mode> (mode, &int_mode)
11835 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11836 && (nonzero_bits (op0, int_mode)
11837 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11838 == 0)
11839 code = EQ;
11840 break;
11841
11842 case GE:
11843 /* >= C is equivalent to > (C - 1). */
11844 if (const_op > 0)
11845 {
11846 const_op -= 1;
11847 code = GT;
11848 /* ... fall through to GT below. */
11849 gcc_fallthrough ();
11850 }
11851 else
11852 break;
11853
11854 case GT:
11855 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11856 if (const_op < 0)
11857 {
11858 const_op += 1;
11859 code = GE;
11860 }
11861
11862 /* If we are doing a > 0 comparison on a value known to have
11863 a zero sign bit, we can replace this with != 0. */
11864 else if (const_op == 0
11865 && is_a <scalar_int_mode> (mode, &int_mode)
11866 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11867 && (nonzero_bits (op0, int_mode)
11868 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11869 == 0)
11870 code = NE;
11871 break;
11872
11873 case LTU:
11874 /* < C is equivalent to <= (C - 1). */
11875 if (const_op > 0)
11876 {
11877 const_op -= 1;
11878 code = LEU;
11879 /* ... fall through ... */
11880 gcc_fallthrough ();
11881 }
11882 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11883 else if (is_a <scalar_int_mode> (mode, &int_mode)
11884 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11885 && ((unsigned HOST_WIDE_INT) const_op
11886 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11887 {
11888 const_op = 0;
11889 code = GE;
11890 break;
11891 }
11892 else
11893 break;
11894
11895 case LEU:
11896 /* unsigned <= 0 is equivalent to == 0 */
11897 if (const_op == 0)
11898 code = EQ;
11899 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11900 else if (is_a <scalar_int_mode> (mode, &int_mode)
11901 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11902 && ((unsigned HOST_WIDE_INT) const_op
11903 == ((HOST_WIDE_INT_1U
11904 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11905 {
11906 const_op = 0;
11907 code = GE;
11908 }
11909 break;
11910
11911 case GEU:
11912 /* >= C is equivalent to > (C - 1). */
11913 if (const_op > 1)
11914 {
11915 const_op -= 1;
11916 code = GTU;
11917 /* ... fall through ... */
11918 gcc_fallthrough ();
11919 }
11920
11921 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11922 else if (is_a <scalar_int_mode> (mode, &int_mode)
11923 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11924 && ((unsigned HOST_WIDE_INT) const_op
11925 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11926 {
11927 const_op = 0;
11928 code = LT;
11929 break;
11930 }
11931 else
11932 break;
11933
11934 case GTU:
11935 /* unsigned > 0 is equivalent to != 0 */
11936 if (const_op == 0)
11937 code = NE;
11938 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11939 else if (is_a <scalar_int_mode> (mode, &int_mode)
11940 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11941 && ((unsigned HOST_WIDE_INT) const_op
11942 == (HOST_WIDE_INT_1U
11943 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11944 {
11945 const_op = 0;
11946 code = LT;
11947 }
11948 break;
11949
11950 default:
11951 break;
11952 }
11953
11954 *pop1 = GEN_INT (const_op);
11955 return code;
11956 }
11957
11958 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11959 comparison code that will be tested.
11960
11961 The result is a possibly different comparison code to use. *POP0 and
11962 *POP1 may be updated.
11963
11964 It is possible that we might detect that a comparison is either always
11965 true or always false. However, we do not perform general constant
11966 folding in combine, so this knowledge isn't useful. Such tautologies
11967 should have been detected earlier. Hence we ignore all such cases. */
11968
11969 static enum rtx_code
simplify_comparison(enum rtx_code code,rtx * pop0,rtx * pop1)11970 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11971 {
11972 rtx op0 = *pop0;
11973 rtx op1 = *pop1;
11974 rtx tem, tem1;
11975 int i;
11976 scalar_int_mode mode, inner_mode, tmode;
11977 opt_scalar_int_mode tmode_iter;
11978
11979 /* Try a few ways of applying the same transformation to both operands. */
11980 while (1)
11981 {
11982 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11983 so check specially. */
11984 if (!WORD_REGISTER_OPERATIONS
11985 && code != GTU && code != GEU && code != LTU && code != LEU
11986 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11987 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11988 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11989 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11990 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11991 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11992 && (is_a <scalar_int_mode>
11993 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11994 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11995 && CONST_INT_P (XEXP (op0, 1))
11996 && XEXP (op0, 1) == XEXP (op1, 1)
11997 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11998 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11999 && (INTVAL (XEXP (op0, 1))
12000 == (GET_MODE_PRECISION (mode)
12001 - GET_MODE_PRECISION (inner_mode))))
12002 {
12003 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12004 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12005 }
12006
12007 /* If both operands are the same constant shift, see if we can ignore the
12008 shift. We can if the shift is a rotate or if the bits shifted out of
12009 this shift are known to be zero for both inputs and if the type of
12010 comparison is compatible with the shift. */
12011 if (GET_CODE (op0) == GET_CODE (op1)
12012 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12013 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12014 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12015 && (code != GT && code != LT && code != GE && code != LE))
12016 || (GET_CODE (op0) == ASHIFTRT
12017 && (code != GTU && code != LTU
12018 && code != GEU && code != LEU)))
12019 && CONST_INT_P (XEXP (op0, 1))
12020 && INTVAL (XEXP (op0, 1)) >= 0
12021 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12022 && XEXP (op0, 1) == XEXP (op1, 1))
12023 {
12024 machine_mode mode = GET_MODE (op0);
12025 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12026 int shift_count = INTVAL (XEXP (op0, 1));
12027
12028 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12029 mask &= (mask >> shift_count) << shift_count;
12030 else if (GET_CODE (op0) == ASHIFT)
12031 mask = (mask & (mask << shift_count)) >> shift_count;
12032
12033 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12034 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12035 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12036 else
12037 break;
12038 }
12039
12040 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12041 SUBREGs are of the same mode, and, in both cases, the AND would
12042 be redundant if the comparison was done in the narrower mode,
12043 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12044 and the operand's possibly nonzero bits are 0xffffff01; in that case
12045 if we only care about QImode, we don't need the AND). This case
12046 occurs if the output mode of an scc insn is not SImode and
12047 STORE_FLAG_VALUE == 1 (e.g., the 386).
12048
12049 Similarly, check for a case where the AND's are ZERO_EXTEND
12050 operations from some narrower mode even though a SUBREG is not
12051 present. */
12052
12053 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12054 && CONST_INT_P (XEXP (op0, 1))
12055 && CONST_INT_P (XEXP (op1, 1)))
12056 {
12057 rtx inner_op0 = XEXP (op0, 0);
12058 rtx inner_op1 = XEXP (op1, 0);
12059 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12060 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12061 int changed = 0;
12062
12063 if (paradoxical_subreg_p (inner_op0)
12064 && GET_CODE (inner_op1) == SUBREG
12065 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12066 && (GET_MODE (SUBREG_REG (inner_op0))
12067 == GET_MODE (SUBREG_REG (inner_op1)))
12068 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12069 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12070 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12071 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12072 {
12073 op0 = SUBREG_REG (inner_op0);
12074 op1 = SUBREG_REG (inner_op1);
12075
12076 /* The resulting comparison is always unsigned since we masked
12077 off the original sign bit. */
12078 code = unsigned_condition (code);
12079
12080 changed = 1;
12081 }
12082
12083 else if (c0 == c1)
12084 FOR_EACH_MODE_UNTIL (tmode,
12085 as_a <scalar_int_mode> (GET_MODE (op0)))
12086 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12087 {
12088 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12089 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12090 code = unsigned_condition (code);
12091 changed = 1;
12092 break;
12093 }
12094
12095 if (! changed)
12096 break;
12097 }
12098
12099 /* If both operands are NOT, we can strip off the outer operation
12100 and adjust the comparison code for swapped operands; similarly for
12101 NEG, except that this must be an equality comparison. */
12102 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12103 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12104 && (code == EQ || code == NE)))
12105 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12106
12107 else
12108 break;
12109 }
12110
12111 /* If the first operand is a constant, swap the operands and adjust the
12112 comparison code appropriately, but don't do this if the second operand
12113 is already a constant integer. */
12114 if (swap_commutative_operands_p (op0, op1))
12115 {
12116 std::swap (op0, op1);
12117 code = swap_condition (code);
12118 }
12119
12120 /* We now enter a loop during which we will try to simplify the comparison.
12121 For the most part, we only are concerned with comparisons with zero,
12122 but some things may really be comparisons with zero but not start
12123 out looking that way. */
12124
12125 while (CONST_INT_P (op1))
12126 {
12127 machine_mode raw_mode = GET_MODE (op0);
12128 scalar_int_mode int_mode;
12129 int equality_comparison_p;
12130 int sign_bit_comparison_p;
12131 int unsigned_comparison_p;
12132 HOST_WIDE_INT const_op;
12133
12134 /* We only want to handle integral modes. This catches VOIDmode,
12135 CCmode, and the floating-point modes. An exception is that we
12136 can handle VOIDmode if OP0 is a COMPARE or a comparison
12137 operation. */
12138
12139 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12140 && ! (raw_mode == VOIDmode
12141 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12142 break;
12143
12144 /* Try to simplify the compare to constant, possibly changing the
12145 comparison op, and/or changing op1 to zero. */
12146 code = simplify_compare_const (code, raw_mode, op0, &op1);
12147 const_op = INTVAL (op1);
12148
12149 /* Compute some predicates to simplify code below. */
12150
12151 equality_comparison_p = (code == EQ || code == NE);
12152 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12153 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12154 || code == GEU);
12155
12156 /* If this is a sign bit comparison and we can do arithmetic in
12157 MODE, say that we will only be needing the sign bit of OP0. */
12158 if (sign_bit_comparison_p
12159 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12160 && HWI_COMPUTABLE_MODE_P (int_mode))
12161 op0 = force_to_mode (op0, int_mode,
12162 HOST_WIDE_INT_1U
12163 << (GET_MODE_PRECISION (int_mode) - 1),
12164 0);
12165
12166 if (COMPARISON_P (op0))
12167 {
12168 /* We can't do anything if OP0 is a condition code value, rather
12169 than an actual data value. */
12170 if (const_op != 0
12171 || CC0_P (XEXP (op0, 0))
12172 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12173 break;
12174
12175 /* Get the two operands being compared. */
12176 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12177 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12178 else
12179 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12180
12181 /* Check for the cases where we simply want the result of the
12182 earlier test or the opposite of that result. */
12183 if (code == NE || code == EQ
12184 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12185 && (code == LT || code == GE)))
12186 {
12187 enum rtx_code new_code;
12188 if (code == LT || code == NE)
12189 new_code = GET_CODE (op0);
12190 else
12191 new_code = reversed_comparison_code (op0, NULL);
12192
12193 if (new_code != UNKNOWN)
12194 {
12195 code = new_code;
12196 op0 = tem;
12197 op1 = tem1;
12198 continue;
12199 }
12200 }
12201 break;
12202 }
12203
12204 if (raw_mode == VOIDmode)
12205 break;
12206 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12207
12208 /* Now try cases based on the opcode of OP0. If none of the cases
12209 does a "continue", we exit this loop immediately after the
12210 switch. */
12211
12212 unsigned int mode_width = GET_MODE_PRECISION (mode);
12213 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12214 switch (GET_CODE (op0))
12215 {
12216 case ZERO_EXTRACT:
12217 /* If we are extracting a single bit from a variable position in
12218 a constant that has only a single bit set and are comparing it
12219 with zero, we can convert this into an equality comparison
12220 between the position and the location of the single bit. */
12221 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12222 have already reduced the shift count modulo the word size. */
12223 if (!SHIFT_COUNT_TRUNCATED
12224 && CONST_INT_P (XEXP (op0, 0))
12225 && XEXP (op0, 1) == const1_rtx
12226 && equality_comparison_p && const_op == 0
12227 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12228 {
12229 if (BITS_BIG_ENDIAN)
12230 i = BITS_PER_WORD - 1 - i;
12231
12232 op0 = XEXP (op0, 2);
12233 op1 = GEN_INT (i);
12234 const_op = i;
12235
12236 /* Result is nonzero iff shift count is equal to I. */
12237 code = reverse_condition (code);
12238 continue;
12239 }
12240
12241 /* fall through */
12242
12243 case SIGN_EXTRACT:
12244 tem = expand_compound_operation (op0);
12245 if (tem != op0)
12246 {
12247 op0 = tem;
12248 continue;
12249 }
12250 break;
12251
12252 case NOT:
12253 /* If testing for equality, we can take the NOT of the constant. */
12254 if (equality_comparison_p
12255 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12256 {
12257 op0 = XEXP (op0, 0);
12258 op1 = tem;
12259 continue;
12260 }
12261
12262 /* If just looking at the sign bit, reverse the sense of the
12263 comparison. */
12264 if (sign_bit_comparison_p)
12265 {
12266 op0 = XEXP (op0, 0);
12267 code = (code == GE ? LT : GE);
12268 continue;
12269 }
12270 break;
12271
12272 case NEG:
12273 /* If testing for equality, we can take the NEG of the constant. */
12274 if (equality_comparison_p
12275 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12276 {
12277 op0 = XEXP (op0, 0);
12278 op1 = tem;
12279 continue;
12280 }
12281
12282 /* The remaining cases only apply to comparisons with zero. */
12283 if (const_op != 0)
12284 break;
12285
12286 /* When X is ABS or is known positive,
12287 (neg X) is < 0 if and only if X != 0. */
12288
12289 if (sign_bit_comparison_p
12290 && (GET_CODE (XEXP (op0, 0)) == ABS
12291 || (mode_width <= HOST_BITS_PER_WIDE_INT
12292 && (nonzero_bits (XEXP (op0, 0), mode)
12293 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12294 == 0)))
12295 {
12296 op0 = XEXP (op0, 0);
12297 code = (code == LT ? NE : EQ);
12298 continue;
12299 }
12300
12301 /* If we have NEG of something whose two high-order bits are the
12302 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12303 if (num_sign_bit_copies (op0, mode) >= 2)
12304 {
12305 op0 = XEXP (op0, 0);
12306 code = swap_condition (code);
12307 continue;
12308 }
12309 break;
12310
12311 case ROTATE:
12312 /* If we are testing equality and our count is a constant, we
12313 can perform the inverse operation on our RHS. */
12314 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12315 && (tem = simplify_binary_operation (ROTATERT, mode,
12316 op1, XEXP (op0, 1))) != 0)
12317 {
12318 op0 = XEXP (op0, 0);
12319 op1 = tem;
12320 continue;
12321 }
12322
12323 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12324 a particular bit. Convert it to an AND of a constant of that
12325 bit. This will be converted into a ZERO_EXTRACT. */
12326 if (const_op == 0 && sign_bit_comparison_p
12327 && CONST_INT_P (XEXP (op0, 1))
12328 && mode_width <= HOST_BITS_PER_WIDE_INT
12329 && UINTVAL (XEXP (op0, 1)) < mode_width)
12330 {
12331 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12332 (HOST_WIDE_INT_1U
12333 << (mode_width - 1
12334 - INTVAL (XEXP (op0, 1)))));
12335 code = (code == LT ? NE : EQ);
12336 continue;
12337 }
12338
12339 /* Fall through. */
12340
12341 case ABS:
12342 /* ABS is ignorable inside an equality comparison with zero. */
12343 if (const_op == 0 && equality_comparison_p)
12344 {
12345 op0 = XEXP (op0, 0);
12346 continue;
12347 }
12348 break;
12349
12350 case SIGN_EXTEND:
12351 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12352 (compare FOO CONST) if CONST fits in FOO's mode and we
12353 are either testing inequality or have an unsigned
12354 comparison with ZERO_EXTEND or a signed comparison with
12355 SIGN_EXTEND. But don't do it if we don't have a compare
12356 insn of the given mode, since we'd have to revert it
12357 later on, and then we wouldn't know whether to sign- or
12358 zero-extend. */
12359 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12360 && ! unsigned_comparison_p
12361 && HWI_COMPUTABLE_MODE_P (mode)
12362 && trunc_int_for_mode (const_op, mode) == const_op
12363 && have_insn_for (COMPARE, mode))
12364 {
12365 op0 = XEXP (op0, 0);
12366 continue;
12367 }
12368 break;
12369
12370 case SUBREG:
12371 /* Check for the case where we are comparing A - C1 with C2, that is
12372
12373 (subreg:MODE (plus (A) (-C1))) op (C2)
12374
12375 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12376 comparison in the wider mode. One of the following two conditions
12377 must be true in order for this to be valid:
12378
12379 1. The mode extension results in the same bit pattern being added
12380 on both sides and the comparison is equality or unsigned. As
12381 C2 has been truncated to fit in MODE, the pattern can only be
12382 all 0s or all 1s.
12383
12384 2. The mode extension results in the sign bit being copied on
12385 each side.
12386
12387 The difficulty here is that we have predicates for A but not for
12388 (A - C1) so we need to check that C1 is within proper bounds so
12389 as to perturbate A as little as possible. */
12390
12391 if (mode_width <= HOST_BITS_PER_WIDE_INT
12392 && subreg_lowpart_p (op0)
12393 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12394 &inner_mode)
12395 && GET_MODE_PRECISION (inner_mode) > mode_width
12396 && GET_CODE (SUBREG_REG (op0)) == PLUS
12397 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12398 {
12399 rtx a = XEXP (SUBREG_REG (op0), 0);
12400 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12401
12402 if ((c1 > 0
12403 && (unsigned HOST_WIDE_INT) c1
12404 < HOST_WIDE_INT_1U << (mode_width - 1)
12405 && (equality_comparison_p || unsigned_comparison_p)
12406 /* (A - C1) zero-extends if it is positive and sign-extends
12407 if it is negative, C2 both zero- and sign-extends. */
12408 && (((nonzero_bits (a, inner_mode)
12409 & ~GET_MODE_MASK (mode)) == 0
12410 && const_op >= 0)
12411 /* (A - C1) sign-extends if it is positive and 1-extends
12412 if it is negative, C2 both sign- and 1-extends. */
12413 || (num_sign_bit_copies (a, inner_mode)
12414 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12415 - mode_width)
12416 && const_op < 0)))
12417 || ((unsigned HOST_WIDE_INT) c1
12418 < HOST_WIDE_INT_1U << (mode_width - 2)
12419 /* (A - C1) always sign-extends, like C2. */
12420 && num_sign_bit_copies (a, inner_mode)
12421 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12422 - (mode_width - 1))))
12423 {
12424 op0 = SUBREG_REG (op0);
12425 continue;
12426 }
12427 }
12428
12429 /* If the inner mode is narrower and we are extracting the low part,
12430 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12431 if (paradoxical_subreg_p (op0))
12432 ;
12433 else if (subreg_lowpart_p (op0)
12434 && GET_MODE_CLASS (mode) == MODE_INT
12435 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12436 && (code == NE || code == EQ)
12437 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12438 && !paradoxical_subreg_p (op0)
12439 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12440 & ~GET_MODE_MASK (mode)) == 0)
12441 {
12442 /* Remove outer subregs that don't do anything. */
12443 tem = gen_lowpart (inner_mode, op1);
12444
12445 if ((nonzero_bits (tem, inner_mode)
12446 & ~GET_MODE_MASK (mode)) == 0)
12447 {
12448 op0 = SUBREG_REG (op0);
12449 op1 = tem;
12450 continue;
12451 }
12452 break;
12453 }
12454 else
12455 break;
12456
12457 /* FALLTHROUGH */
12458
12459 case ZERO_EXTEND:
12460 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12461 && (unsigned_comparison_p || equality_comparison_p)
12462 && HWI_COMPUTABLE_MODE_P (mode)
12463 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12464 && const_op >= 0
12465 && have_insn_for (COMPARE, mode))
12466 {
12467 op0 = XEXP (op0, 0);
12468 continue;
12469 }
12470 break;
12471
12472 case PLUS:
12473 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12474 this for equality comparisons due to pathological cases involving
12475 overflows. */
12476 if (equality_comparison_p
12477 && (tem = simplify_binary_operation (MINUS, mode,
12478 op1, XEXP (op0, 1))) != 0)
12479 {
12480 op0 = XEXP (op0, 0);
12481 op1 = tem;
12482 continue;
12483 }
12484
12485 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12486 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12487 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12488 {
12489 op0 = XEXP (XEXP (op0, 0), 0);
12490 code = (code == LT ? EQ : NE);
12491 continue;
12492 }
12493 break;
12494
12495 case MINUS:
12496 /* We used to optimize signed comparisons against zero, but that
12497 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12498 arrive here as equality comparisons, or (GEU, LTU) are
12499 optimized away. No need to special-case them. */
12500
12501 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12502 (eq B (minus A C)), whichever simplifies. We can only do
12503 this for equality comparisons due to pathological cases involving
12504 overflows. */
12505 if (equality_comparison_p
12506 && (tem = simplify_binary_operation (PLUS, mode,
12507 XEXP (op0, 1), op1)) != 0)
12508 {
12509 op0 = XEXP (op0, 0);
12510 op1 = tem;
12511 continue;
12512 }
12513
12514 if (equality_comparison_p
12515 && (tem = simplify_binary_operation (MINUS, mode,
12516 XEXP (op0, 0), op1)) != 0)
12517 {
12518 op0 = XEXP (op0, 1);
12519 op1 = tem;
12520 continue;
12521 }
12522
12523 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12524 of bits in X minus 1, is one iff X > 0. */
12525 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12526 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12527 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12528 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12529 {
12530 op0 = XEXP (op0, 1);
12531 code = (code == GE ? LE : GT);
12532 continue;
12533 }
12534 break;
12535
12536 case XOR:
12537 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12538 if C is zero or B is a constant. */
12539 if (equality_comparison_p
12540 && (tem = simplify_binary_operation (XOR, mode,
12541 XEXP (op0, 1), op1)) != 0)
12542 {
12543 op0 = XEXP (op0, 0);
12544 op1 = tem;
12545 continue;
12546 }
12547 break;
12548
12549
12550 case IOR:
12551 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12552 iff X <= 0. */
12553 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12554 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12555 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12556 {
12557 op0 = XEXP (op0, 1);
12558 code = (code == GE ? GT : LE);
12559 continue;
12560 }
12561 break;
12562
12563 case AND:
12564 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12565 will be converted to a ZERO_EXTRACT later. */
12566 if (const_op == 0 && equality_comparison_p
12567 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12568 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12569 {
12570 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12571 XEXP (XEXP (op0, 0), 1));
12572 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12573 continue;
12574 }
12575
12576 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12577 zero and X is a comparison and C1 and C2 describe only bits set
12578 in STORE_FLAG_VALUE, we can compare with X. */
12579 if (const_op == 0 && equality_comparison_p
12580 && mode_width <= HOST_BITS_PER_WIDE_INT
12581 && CONST_INT_P (XEXP (op0, 1))
12582 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12583 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12584 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12585 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12586 {
12587 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12588 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12589 if ((~STORE_FLAG_VALUE & mask) == 0
12590 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12591 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12592 && COMPARISON_P (tem))))
12593 {
12594 op0 = XEXP (XEXP (op0, 0), 0);
12595 continue;
12596 }
12597 }
12598
12599 /* If we are doing an equality comparison of an AND of a bit equal
12600 to the sign bit, replace this with a LT or GE comparison of
12601 the underlying value. */
12602 if (equality_comparison_p
12603 && const_op == 0
12604 && CONST_INT_P (XEXP (op0, 1))
12605 && mode_width <= HOST_BITS_PER_WIDE_INT
12606 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12607 == HOST_WIDE_INT_1U << (mode_width - 1)))
12608 {
12609 op0 = XEXP (op0, 0);
12610 code = (code == EQ ? GE : LT);
12611 continue;
12612 }
12613
12614 /* If this AND operation is really a ZERO_EXTEND from a narrower
12615 mode, the constant fits within that mode, and this is either an
12616 equality or unsigned comparison, try to do this comparison in
12617 the narrower mode.
12618
12619 Note that in:
12620
12621 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12622 -> (ne:DI (reg:SI 4) (const_int 0))
12623
12624 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12625 known to hold a value of the required mode the
12626 transformation is invalid. */
12627 if ((equality_comparison_p || unsigned_comparison_p)
12628 && CONST_INT_P (XEXP (op0, 1))
12629 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12630 & GET_MODE_MASK (mode))
12631 + 1)) >= 0
12632 && const_op >> i == 0
12633 && int_mode_for_size (i, 1).exists (&tmode))
12634 {
12635 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12636 continue;
12637 }
12638
12639 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12640 fits in both M1 and M2 and the SUBREG is either paradoxical
12641 or represents the low part, permute the SUBREG and the AND
12642 and try again. */
12643 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12644 && CONST_INT_P (XEXP (op0, 1)))
12645 {
12646 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12647 /* Require an integral mode, to avoid creating something like
12648 (AND:SF ...). */
12649 if ((is_a <scalar_int_mode>
12650 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12651 /* It is unsafe to commute the AND into the SUBREG if the
12652 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12653 not defined. As originally written the upper bits
12654 have a defined value due to the AND operation.
12655 However, if we commute the AND inside the SUBREG then
12656 they no longer have defined values and the meaning of
12657 the code has been changed.
12658 Also C1 should not change value in the smaller mode,
12659 see PR67028 (a positive C1 can become negative in the
12660 smaller mode, so that the AND does no longer mask the
12661 upper bits). */
12662 && ((WORD_REGISTER_OPERATIONS
12663 && mode_width > GET_MODE_PRECISION (tmode)
12664 && mode_width <= BITS_PER_WORD
12665 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12666 || (mode_width <= GET_MODE_PRECISION (tmode)
12667 && subreg_lowpart_p (XEXP (op0, 0))))
12668 && mode_width <= HOST_BITS_PER_WIDE_INT
12669 && HWI_COMPUTABLE_MODE_P (tmode)
12670 && (c1 & ~mask) == 0
12671 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12672 && c1 != mask
12673 && c1 != GET_MODE_MASK (tmode))
12674 {
12675 op0 = simplify_gen_binary (AND, tmode,
12676 SUBREG_REG (XEXP (op0, 0)),
12677 gen_int_mode (c1, tmode));
12678 op0 = gen_lowpart (mode, op0);
12679 continue;
12680 }
12681 }
12682
12683 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12684 if (const_op == 0 && equality_comparison_p
12685 && XEXP (op0, 1) == const1_rtx
12686 && GET_CODE (XEXP (op0, 0)) == NOT)
12687 {
12688 op0 = simplify_and_const_int (NULL_RTX, mode,
12689 XEXP (XEXP (op0, 0), 0), 1);
12690 code = (code == NE ? EQ : NE);
12691 continue;
12692 }
12693
12694 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12695 (eq (and (lshiftrt X) 1) 0).
12696 Also handle the case where (not X) is expressed using xor. */
12697 if (const_op == 0 && equality_comparison_p
12698 && XEXP (op0, 1) == const1_rtx
12699 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12700 {
12701 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12702 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12703
12704 if (GET_CODE (shift_op) == NOT
12705 || (GET_CODE (shift_op) == XOR
12706 && CONST_INT_P (XEXP (shift_op, 1))
12707 && CONST_INT_P (shift_count)
12708 && HWI_COMPUTABLE_MODE_P (mode)
12709 && (UINTVAL (XEXP (shift_op, 1))
12710 == HOST_WIDE_INT_1U
12711 << INTVAL (shift_count))))
12712 {
12713 op0
12714 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12715 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12716 code = (code == NE ? EQ : NE);
12717 continue;
12718 }
12719 }
12720 break;
12721
12722 case ASHIFT:
12723 /* If we have (compare (ashift FOO N) (const_int C)) and
12724 the high order N bits of FOO (N+1 if an inequality comparison)
12725 are known to be zero, we can do this by comparing FOO with C
12726 shifted right N bits so long as the low-order N bits of C are
12727 zero. */
12728 if (CONST_INT_P (XEXP (op0, 1))
12729 && INTVAL (XEXP (op0, 1)) >= 0
12730 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12731 < HOST_BITS_PER_WIDE_INT)
12732 && (((unsigned HOST_WIDE_INT) const_op
12733 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12734 - 1)) == 0)
12735 && mode_width <= HOST_BITS_PER_WIDE_INT
12736 && (nonzero_bits (XEXP (op0, 0), mode)
12737 & ~(mask >> (INTVAL (XEXP (op0, 1))
12738 + ! equality_comparison_p))) == 0)
12739 {
12740 /* We must perform a logical shift, not an arithmetic one,
12741 as we want the top N bits of C to be zero. */
12742 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12743
12744 temp >>= INTVAL (XEXP (op0, 1));
12745 op1 = gen_int_mode (temp, mode);
12746 op0 = XEXP (op0, 0);
12747 continue;
12748 }
12749
12750 /* If we are doing a sign bit comparison, it means we are testing
12751 a particular bit. Convert it to the appropriate AND. */
12752 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12753 && mode_width <= HOST_BITS_PER_WIDE_INT)
12754 {
12755 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12756 (HOST_WIDE_INT_1U
12757 << (mode_width - 1
12758 - INTVAL (XEXP (op0, 1)))));
12759 code = (code == LT ? NE : EQ);
12760 continue;
12761 }
12762
12763 /* If this an equality comparison with zero and we are shifting
12764 the low bit to the sign bit, we can convert this to an AND of the
12765 low-order bit. */
12766 if (const_op == 0 && equality_comparison_p
12767 && CONST_INT_P (XEXP (op0, 1))
12768 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12769 {
12770 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12771 continue;
12772 }
12773 break;
12774
12775 case ASHIFTRT:
12776 /* If this is an equality comparison with zero, we can do this
12777 as a logical shift, which might be much simpler. */
12778 if (equality_comparison_p && const_op == 0
12779 && CONST_INT_P (XEXP (op0, 1)))
12780 {
12781 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12782 XEXP (op0, 0),
12783 INTVAL (XEXP (op0, 1)));
12784 continue;
12785 }
12786
12787 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12788 do the comparison in a narrower mode. */
12789 if (! unsigned_comparison_p
12790 && CONST_INT_P (XEXP (op0, 1))
12791 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12792 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12793 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12794 .exists (&tmode))
12795 && (((unsigned HOST_WIDE_INT) const_op
12796 + (GET_MODE_MASK (tmode) >> 1) + 1)
12797 <= GET_MODE_MASK (tmode)))
12798 {
12799 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12800 continue;
12801 }
12802
12803 /* Likewise if OP0 is a PLUS of a sign extension with a
12804 constant, which is usually represented with the PLUS
12805 between the shifts. */
12806 if (! unsigned_comparison_p
12807 && CONST_INT_P (XEXP (op0, 1))
12808 && GET_CODE (XEXP (op0, 0)) == PLUS
12809 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12810 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12811 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12812 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12813 .exists (&tmode))
12814 && (((unsigned HOST_WIDE_INT) const_op
12815 + (GET_MODE_MASK (tmode) >> 1) + 1)
12816 <= GET_MODE_MASK (tmode)))
12817 {
12818 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12819 rtx add_const = XEXP (XEXP (op0, 0), 1);
12820 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12821 add_const, XEXP (op0, 1));
12822
12823 op0 = simplify_gen_binary (PLUS, tmode,
12824 gen_lowpart (tmode, inner),
12825 new_const);
12826 continue;
12827 }
12828
12829 /* FALLTHROUGH */
12830 case LSHIFTRT:
12831 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12832 the low order N bits of FOO are known to be zero, we can do this
12833 by comparing FOO with C shifted left N bits so long as no
12834 overflow occurs. Even if the low order N bits of FOO aren't known
12835 to be zero, if the comparison is >= or < we can use the same
12836 optimization and for > or <= by setting all the low
12837 order N bits in the comparison constant. */
12838 if (CONST_INT_P (XEXP (op0, 1))
12839 && INTVAL (XEXP (op0, 1)) > 0
12840 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12841 && mode_width <= HOST_BITS_PER_WIDE_INT
12842 && (((unsigned HOST_WIDE_INT) const_op
12843 + (GET_CODE (op0) != LSHIFTRT
12844 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12845 + 1)
12846 : 0))
12847 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12848 {
12849 unsigned HOST_WIDE_INT low_bits
12850 = (nonzero_bits (XEXP (op0, 0), mode)
12851 & ((HOST_WIDE_INT_1U
12852 << INTVAL (XEXP (op0, 1))) - 1));
12853 if (low_bits == 0 || !equality_comparison_p)
12854 {
12855 /* If the shift was logical, then we must make the condition
12856 unsigned. */
12857 if (GET_CODE (op0) == LSHIFTRT)
12858 code = unsigned_condition (code);
12859
12860 const_op = (unsigned HOST_WIDE_INT) const_op
12861 << INTVAL (XEXP (op0, 1));
12862 if (low_bits != 0
12863 && (code == GT || code == GTU
12864 || code == LE || code == LEU))
12865 const_op
12866 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12867 op1 = GEN_INT (const_op);
12868 op0 = XEXP (op0, 0);
12869 continue;
12870 }
12871 }
12872
12873 /* If we are using this shift to extract just the sign bit, we
12874 can replace this with an LT or GE comparison. */
12875 if (const_op == 0
12876 && (equality_comparison_p || sign_bit_comparison_p)
12877 && CONST_INT_P (XEXP (op0, 1))
12878 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12879 {
12880 op0 = XEXP (op0, 0);
12881 code = (code == NE || code == GT ? LT : GE);
12882 continue;
12883 }
12884 break;
12885
12886 default:
12887 break;
12888 }
12889
12890 break;
12891 }
12892
12893 /* Now make any compound operations involved in this comparison. Then,
12894 check for an outmost SUBREG on OP0 that is not doing anything or is
12895 paradoxical. The latter transformation must only be performed when
12896 it is known that the "extra" bits will be the same in op0 and op1 or
12897 that they don't matter. There are three cases to consider:
12898
12899 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12900 care bits and we can assume they have any convenient value. So
12901 making the transformation is safe.
12902
12903 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12904 In this case the upper bits of op0 are undefined. We should not make
12905 the simplification in that case as we do not know the contents of
12906 those bits.
12907
12908 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12909 In that case we know those bits are zeros or ones. We must also be
12910 sure that they are the same as the upper bits of op1.
12911
12912 We can never remove a SUBREG for a non-equality comparison because
12913 the sign bit is in a different place in the underlying object. */
12914
12915 rtx_code op0_mco_code = SET;
12916 if (op1 == const0_rtx)
12917 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12918
12919 op0 = make_compound_operation (op0, op0_mco_code);
12920 op1 = make_compound_operation (op1, SET);
12921
12922 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12923 && is_int_mode (GET_MODE (op0), &mode)
12924 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12925 && (code == NE || code == EQ))
12926 {
12927 if (paradoxical_subreg_p (op0))
12928 {
12929 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12930 implemented. */
12931 if (REG_P (SUBREG_REG (op0)))
12932 {
12933 op0 = SUBREG_REG (op0);
12934 op1 = gen_lowpart (inner_mode, op1);
12935 }
12936 }
12937 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12938 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12939 & ~GET_MODE_MASK (mode)) == 0)
12940 {
12941 tem = gen_lowpart (inner_mode, op1);
12942
12943 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12944 op0 = SUBREG_REG (op0), op1 = tem;
12945 }
12946 }
12947
12948 /* We now do the opposite procedure: Some machines don't have compare
12949 insns in all modes. If OP0's mode is an integer mode smaller than a
12950 word and we can't do a compare in that mode, see if there is a larger
12951 mode for which we can do the compare. There are a number of cases in
12952 which we can use the wider mode. */
12953
12954 if (is_int_mode (GET_MODE (op0), &mode)
12955 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12956 && ! have_insn_for (COMPARE, mode))
12957 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12958 {
12959 tmode = tmode_iter.require ();
12960 if (!HWI_COMPUTABLE_MODE_P (tmode))
12961 break;
12962 if (have_insn_for (COMPARE, tmode))
12963 {
12964 int zero_extended;
12965
12966 /* If this is a test for negative, we can make an explicit
12967 test of the sign bit. Test this first so we can use
12968 a paradoxical subreg to extend OP0. */
12969
12970 if (op1 == const0_rtx && (code == LT || code == GE)
12971 && HWI_COMPUTABLE_MODE_P (mode))
12972 {
12973 unsigned HOST_WIDE_INT sign
12974 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12975 op0 = simplify_gen_binary (AND, tmode,
12976 gen_lowpart (tmode, op0),
12977 gen_int_mode (sign, tmode));
12978 code = (code == LT) ? NE : EQ;
12979 break;
12980 }
12981
12982 /* If the only nonzero bits in OP0 and OP1 are those in the
12983 narrower mode and this is an equality or unsigned comparison,
12984 we can use the wider mode. Similarly for sign-extended
12985 values, in which case it is true for all comparisons. */
12986 zero_extended = ((code == EQ || code == NE
12987 || code == GEU || code == GTU
12988 || code == LEU || code == LTU)
12989 && (nonzero_bits (op0, tmode)
12990 & ~GET_MODE_MASK (mode)) == 0
12991 && ((CONST_INT_P (op1)
12992 || (nonzero_bits (op1, tmode)
12993 & ~GET_MODE_MASK (mode)) == 0)));
12994
12995 if (zero_extended
12996 || ((num_sign_bit_copies (op0, tmode)
12997 > (unsigned int) (GET_MODE_PRECISION (tmode)
12998 - GET_MODE_PRECISION (mode)))
12999 && (num_sign_bit_copies (op1, tmode)
13000 > (unsigned int) (GET_MODE_PRECISION (tmode)
13001 - GET_MODE_PRECISION (mode)))))
13002 {
13003 /* If OP0 is an AND and we don't have an AND in MODE either,
13004 make a new AND in the proper mode. */
13005 if (GET_CODE (op0) == AND
13006 && !have_insn_for (AND, mode))
13007 op0 = simplify_gen_binary (AND, tmode,
13008 gen_lowpart (tmode,
13009 XEXP (op0, 0)),
13010 gen_lowpart (tmode,
13011 XEXP (op0, 1)));
13012 else
13013 {
13014 if (zero_extended)
13015 {
13016 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13017 op0, mode);
13018 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13019 op1, mode);
13020 }
13021 else
13022 {
13023 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13024 op0, mode);
13025 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13026 op1, mode);
13027 }
13028 break;
13029 }
13030 }
13031 }
13032 }
13033
13034 /* We may have changed the comparison operands. Re-canonicalize. */
13035 if (swap_commutative_operands_p (op0, op1))
13036 {
13037 std::swap (op0, op1);
13038 code = swap_condition (code);
13039 }
13040
13041 /* If this machine only supports a subset of valid comparisons, see if we
13042 can convert an unsupported one into a supported one. */
13043 target_canonicalize_comparison (&code, &op0, &op1, 0);
13044
13045 *pop0 = op0;
13046 *pop1 = op1;
13047
13048 return code;
13049 }
13050
13051 /* Utility function for record_value_for_reg. Count number of
13052 rtxs in X. */
13053 static int
count_rtxs(rtx x)13054 count_rtxs (rtx x)
13055 {
13056 enum rtx_code code = GET_CODE (x);
13057 const char *fmt;
13058 int i, j, ret = 1;
13059
13060 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13061 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13062 {
13063 rtx x0 = XEXP (x, 0);
13064 rtx x1 = XEXP (x, 1);
13065
13066 if (x0 == x1)
13067 return 1 + 2 * count_rtxs (x0);
13068
13069 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13070 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13071 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13072 return 2 + 2 * count_rtxs (x0)
13073 + count_rtxs (x == XEXP (x1, 0)
13074 ? XEXP (x1, 1) : XEXP (x1, 0));
13075
13076 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13077 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13078 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13079 return 2 + 2 * count_rtxs (x1)
13080 + count_rtxs (x == XEXP (x0, 0)
13081 ? XEXP (x0, 1) : XEXP (x0, 0));
13082 }
13083
13084 fmt = GET_RTX_FORMAT (code);
13085 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13086 if (fmt[i] == 'e')
13087 ret += count_rtxs (XEXP (x, i));
13088 else if (fmt[i] == 'E')
13089 for (j = 0; j < XVECLEN (x, i); j++)
13090 ret += count_rtxs (XVECEXP (x, i, j));
13091
13092 return ret;
13093 }
13094
13095 /* Utility function for following routine. Called when X is part of a value
13096 being stored into last_set_value. Sets last_set_table_tick
13097 for each register mentioned. Similar to mention_regs in cse.c */
13098
13099 static void
update_table_tick(rtx x)13100 update_table_tick (rtx x)
13101 {
13102 enum rtx_code code = GET_CODE (x);
13103 const char *fmt = GET_RTX_FORMAT (code);
13104 int i, j;
13105
13106 if (code == REG)
13107 {
13108 unsigned int regno = REGNO (x);
13109 unsigned int endregno = END_REGNO (x);
13110 unsigned int r;
13111
13112 for (r = regno; r < endregno; r++)
13113 {
13114 reg_stat_type *rsp = ®_stat[r];
13115 rsp->last_set_table_tick = label_tick;
13116 }
13117
13118 return;
13119 }
13120
13121 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13122 if (fmt[i] == 'e')
13123 {
13124 /* Check for identical subexpressions. If x contains
13125 identical subexpression we only have to traverse one of
13126 them. */
13127 if (i == 0 && ARITHMETIC_P (x))
13128 {
13129 /* Note that at this point x1 has already been
13130 processed. */
13131 rtx x0 = XEXP (x, 0);
13132 rtx x1 = XEXP (x, 1);
13133
13134 /* If x0 and x1 are identical then there is no need to
13135 process x0. */
13136 if (x0 == x1)
13137 break;
13138
13139 /* If x0 is identical to a subexpression of x1 then while
13140 processing x1, x0 has already been processed. Thus we
13141 are done with x. */
13142 if (ARITHMETIC_P (x1)
13143 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13144 break;
13145
13146 /* If x1 is identical to a subexpression of x0 then we
13147 still have to process the rest of x0. */
13148 if (ARITHMETIC_P (x0)
13149 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13150 {
13151 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13152 break;
13153 }
13154 }
13155
13156 update_table_tick (XEXP (x, i));
13157 }
13158 else if (fmt[i] == 'E')
13159 for (j = 0; j < XVECLEN (x, i); j++)
13160 update_table_tick (XVECEXP (x, i, j));
13161 }
13162
13163 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13164 are saying that the register is clobbered and we no longer know its
13165 value. If INSN is zero, don't update reg_stat[].last_set; this is
13166 only permitted with VALUE also zero and is used to invalidate the
13167 register. */
13168
13169 static void
record_value_for_reg(rtx reg,rtx_insn * insn,rtx value)13170 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13171 {
13172 unsigned int regno = REGNO (reg);
13173 unsigned int endregno = END_REGNO (reg);
13174 unsigned int i;
13175 reg_stat_type *rsp;
13176
13177 /* If VALUE contains REG and we have a previous value for REG, substitute
13178 the previous value. */
13179 if (value && insn && reg_overlap_mentioned_p (reg, value))
13180 {
13181 rtx tem;
13182
13183 /* Set things up so get_last_value is allowed to see anything set up to
13184 our insn. */
13185 subst_low_luid = DF_INSN_LUID (insn);
13186 tem = get_last_value (reg);
13187
13188 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13189 it isn't going to be useful and will take a lot of time to process,
13190 so just use the CLOBBER. */
13191
13192 if (tem)
13193 {
13194 if (ARITHMETIC_P (tem)
13195 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13196 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13197 tem = XEXP (tem, 0);
13198 else if (count_occurrences (value, reg, 1) >= 2)
13199 {
13200 /* If there are two or more occurrences of REG in VALUE,
13201 prevent the value from growing too much. */
13202 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13203 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13204 }
13205
13206 value = replace_rtx (copy_rtx (value), reg, tem);
13207 }
13208 }
13209
13210 /* For each register modified, show we don't know its value, that
13211 we don't know about its bitwise content, that its value has been
13212 updated, and that we don't know the location of the death of the
13213 register. */
13214 for (i = regno; i < endregno; i++)
13215 {
13216 rsp = ®_stat[i];
13217
13218 if (insn)
13219 rsp->last_set = insn;
13220
13221 rsp->last_set_value = 0;
13222 rsp->last_set_mode = VOIDmode;
13223 rsp->last_set_nonzero_bits = 0;
13224 rsp->last_set_sign_bit_copies = 0;
13225 rsp->last_death = 0;
13226 rsp->truncated_to_mode = VOIDmode;
13227 }
13228
13229 /* Mark registers that are being referenced in this value. */
13230 if (value)
13231 update_table_tick (value);
13232
13233 /* Now update the status of each register being set.
13234 If someone is using this register in this block, set this register
13235 to invalid since we will get confused between the two lives in this
13236 basic block. This makes using this register always invalid. In cse, we
13237 scan the table to invalidate all entries using this register, but this
13238 is too much work for us. */
13239
13240 for (i = regno; i < endregno; i++)
13241 {
13242 rsp = ®_stat[i];
13243 rsp->last_set_label = label_tick;
13244 if (!insn
13245 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13246 rsp->last_set_invalid = 1;
13247 else
13248 rsp->last_set_invalid = 0;
13249 }
13250
13251 /* The value being assigned might refer to X (like in "x++;"). In that
13252 case, we must replace it with (clobber (const_int 0)) to prevent
13253 infinite loops. */
13254 rsp = ®_stat[regno];
13255 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13256 {
13257 value = copy_rtx (value);
13258 if (!get_last_value_validate (&value, insn, label_tick, 1))
13259 value = 0;
13260 }
13261
13262 /* For the main register being modified, update the value, the mode, the
13263 nonzero bits, and the number of sign bit copies. */
13264
13265 rsp->last_set_value = value;
13266
13267 if (value)
13268 {
13269 machine_mode mode = GET_MODE (reg);
13270 subst_low_luid = DF_INSN_LUID (insn);
13271 rsp->last_set_mode = mode;
13272 if (GET_MODE_CLASS (mode) == MODE_INT
13273 && HWI_COMPUTABLE_MODE_P (mode))
13274 mode = nonzero_bits_mode;
13275 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13276 rsp->last_set_sign_bit_copies
13277 = num_sign_bit_copies (value, GET_MODE (reg));
13278 }
13279 }
13280
13281 /* Called via note_stores from record_dead_and_set_regs to handle one
13282 SET or CLOBBER in an insn. DATA is the instruction in which the
13283 set is occurring. */
13284
13285 static void
record_dead_and_set_regs_1(rtx dest,const_rtx setter,void * data)13286 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13287 {
13288 rtx_insn *record_dead_insn = (rtx_insn *) data;
13289
13290 if (GET_CODE (dest) == SUBREG)
13291 dest = SUBREG_REG (dest);
13292
13293 if (!record_dead_insn)
13294 {
13295 if (REG_P (dest))
13296 record_value_for_reg (dest, NULL, NULL_RTX);
13297 return;
13298 }
13299
13300 if (REG_P (dest))
13301 {
13302 /* If we are setting the whole register, we know its value. Otherwise
13303 show that we don't know the value. We can handle a SUBREG if it's
13304 the low part, but we must be careful with paradoxical SUBREGs on
13305 RISC architectures because we cannot strip e.g. an extension around
13306 a load and record the naked load since the RTL middle-end considers
13307 that the upper bits are defined according to LOAD_EXTEND_OP. */
13308 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13309 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13310 else if (GET_CODE (setter) == SET
13311 && GET_CODE (SET_DEST (setter)) == SUBREG
13312 && SUBREG_REG (SET_DEST (setter)) == dest
13313 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13314 BITS_PER_WORD)
13315 && subreg_lowpart_p (SET_DEST (setter)))
13316 record_value_for_reg (dest, record_dead_insn,
13317 WORD_REGISTER_OPERATIONS
13318 && word_register_operation_p (SET_SRC (setter))
13319 && paradoxical_subreg_p (SET_DEST (setter))
13320 ? SET_SRC (setter)
13321 : gen_lowpart (GET_MODE (dest),
13322 SET_SRC (setter)));
13323 else
13324 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13325 }
13326 else if (MEM_P (dest)
13327 /* Ignore pushes, they clobber nothing. */
13328 && ! push_operand (dest, GET_MODE (dest)))
13329 mem_last_set = DF_INSN_LUID (record_dead_insn);
13330 }
13331
13332 /* Update the records of when each REG was most recently set or killed
13333 for the things done by INSN. This is the last thing done in processing
13334 INSN in the combiner loop.
13335
13336 We update reg_stat[], in particular fields last_set, last_set_value,
13337 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13338 last_death, and also the similar information mem_last_set (which insn
13339 most recently modified memory) and last_call_luid (which insn was the
13340 most recent subroutine call). */
13341
13342 static void
record_dead_and_set_regs(rtx_insn * insn)13343 record_dead_and_set_regs (rtx_insn *insn)
13344 {
13345 rtx link;
13346 unsigned int i;
13347
13348 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13349 {
13350 if (REG_NOTE_KIND (link) == REG_DEAD
13351 && REG_P (XEXP (link, 0)))
13352 {
13353 unsigned int regno = REGNO (XEXP (link, 0));
13354 unsigned int endregno = END_REGNO (XEXP (link, 0));
13355
13356 for (i = regno; i < endregno; i++)
13357 {
13358 reg_stat_type *rsp;
13359
13360 rsp = ®_stat[i];
13361 rsp->last_death = insn;
13362 }
13363 }
13364 else if (REG_NOTE_KIND (link) == REG_INC)
13365 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13366 }
13367
13368 if (CALL_P (insn))
13369 {
13370 hard_reg_set_iterator hrsi;
13371 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13372 {
13373 reg_stat_type *rsp;
13374
13375 rsp = ®_stat[i];
13376 rsp->last_set_invalid = 1;
13377 rsp->last_set = insn;
13378 rsp->last_set_value = 0;
13379 rsp->last_set_mode = VOIDmode;
13380 rsp->last_set_nonzero_bits = 0;
13381 rsp->last_set_sign_bit_copies = 0;
13382 rsp->last_death = 0;
13383 rsp->truncated_to_mode = VOIDmode;
13384 }
13385
13386 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13387
13388 /* We can't combine into a call pattern. Remember, though, that
13389 the return value register is set at this LUID. We could
13390 still replace a register with the return value from the
13391 wrong subroutine call! */
13392 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13393 }
13394 else
13395 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13396 }
13397
13398 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13399 register present in the SUBREG, so for each such SUBREG go back and
13400 adjust nonzero and sign bit information of the registers that are
13401 known to have some zero/sign bits set.
13402
13403 This is needed because when combine blows the SUBREGs away, the
13404 information on zero/sign bits is lost and further combines can be
13405 missed because of that. */
13406
13407 static void
record_promoted_value(rtx_insn * insn,rtx subreg)13408 record_promoted_value (rtx_insn *insn, rtx subreg)
13409 {
13410 struct insn_link *links;
13411 rtx set;
13412 unsigned int regno = REGNO (SUBREG_REG (subreg));
13413 machine_mode mode = GET_MODE (subreg);
13414
13415 if (!HWI_COMPUTABLE_MODE_P (mode))
13416 return;
13417
13418 for (links = LOG_LINKS (insn); links;)
13419 {
13420 reg_stat_type *rsp;
13421
13422 insn = links->insn;
13423 set = single_set (insn);
13424
13425 if (! set || !REG_P (SET_DEST (set))
13426 || REGNO (SET_DEST (set)) != regno
13427 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13428 {
13429 links = links->next;
13430 continue;
13431 }
13432
13433 rsp = ®_stat[regno];
13434 if (rsp->last_set == insn)
13435 {
13436 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13437 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13438 }
13439
13440 if (REG_P (SET_SRC (set)))
13441 {
13442 regno = REGNO (SET_SRC (set));
13443 links = LOG_LINKS (insn);
13444 }
13445 else
13446 break;
13447 }
13448 }
13449
13450 /* Check if X, a register, is known to contain a value already
13451 truncated to MODE. In this case we can use a subreg to refer to
13452 the truncated value even though in the generic case we would need
13453 an explicit truncation. */
13454
13455 static bool
reg_truncated_to_mode(machine_mode mode,const_rtx x)13456 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13457 {
13458 reg_stat_type *rsp = ®_stat[REGNO (x)];
13459 machine_mode truncated = rsp->truncated_to_mode;
13460
13461 if (truncated == 0
13462 || rsp->truncation_label < label_tick_ebb_start)
13463 return false;
13464 if (!partial_subreg_p (mode, truncated))
13465 return true;
13466 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13467 return true;
13468 return false;
13469 }
13470
13471 /* If X is a hard reg or a subreg record the mode that the register is
13472 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13473 able to turn a truncate into a subreg using this information. Return true
13474 if traversing X is complete. */
13475
13476 static bool
record_truncated_value(rtx x)13477 record_truncated_value (rtx x)
13478 {
13479 machine_mode truncated_mode;
13480 reg_stat_type *rsp;
13481
13482 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13483 {
13484 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13485 truncated_mode = GET_MODE (x);
13486
13487 if (!partial_subreg_p (truncated_mode, original_mode))
13488 return true;
13489
13490 truncated_mode = GET_MODE (x);
13491 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13492 return true;
13493
13494 x = SUBREG_REG (x);
13495 }
13496 /* ??? For hard-regs we now record everything. We might be able to
13497 optimize this using last_set_mode. */
13498 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13499 truncated_mode = GET_MODE (x);
13500 else
13501 return false;
13502
13503 rsp = ®_stat[REGNO (x)];
13504 if (rsp->truncated_to_mode == 0
13505 || rsp->truncation_label < label_tick_ebb_start
13506 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13507 {
13508 rsp->truncated_to_mode = truncated_mode;
13509 rsp->truncation_label = label_tick;
13510 }
13511
13512 return true;
13513 }
13514
13515 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13516 the modes they are used in. This can help truning TRUNCATEs into
13517 SUBREGs. */
13518
13519 static void
record_truncated_values(rtx * loc,void * data ATTRIBUTE_UNUSED)13520 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13521 {
13522 subrtx_var_iterator::array_type array;
13523 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13524 if (record_truncated_value (*iter))
13525 iter.skip_subrtxes ();
13526 }
13527
13528 /* Scan X for promoted SUBREGs. For each one found,
13529 note what it implies to the registers used in it. */
13530
13531 static void
check_promoted_subreg(rtx_insn * insn,rtx x)13532 check_promoted_subreg (rtx_insn *insn, rtx x)
13533 {
13534 if (GET_CODE (x) == SUBREG
13535 && SUBREG_PROMOTED_VAR_P (x)
13536 && REG_P (SUBREG_REG (x)))
13537 record_promoted_value (insn, x);
13538 else
13539 {
13540 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13541 int i, j;
13542
13543 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13544 switch (format[i])
13545 {
13546 case 'e':
13547 check_promoted_subreg (insn, XEXP (x, i));
13548 break;
13549 case 'V':
13550 case 'E':
13551 if (XVEC (x, i) != 0)
13552 for (j = 0; j < XVECLEN (x, i); j++)
13553 check_promoted_subreg (insn, XVECEXP (x, i, j));
13554 break;
13555 }
13556 }
13557 }
13558
13559 /* Verify that all the registers and memory references mentioned in *LOC are
13560 still valid. *LOC was part of a value set in INSN when label_tick was
13561 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13562 the invalid references with (clobber (const_int 0)) and return 1. This
13563 replacement is useful because we often can get useful information about
13564 the form of a value (e.g., if it was produced by a shift that always
13565 produces -1 or 0) even though we don't know exactly what registers it
13566 was produced from. */
13567
13568 static int
get_last_value_validate(rtx * loc,rtx_insn * insn,int tick,int replace)13569 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13570 {
13571 rtx x = *loc;
13572 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13573 int len = GET_RTX_LENGTH (GET_CODE (x));
13574 int i, j;
13575
13576 if (REG_P (x))
13577 {
13578 unsigned int regno = REGNO (x);
13579 unsigned int endregno = END_REGNO (x);
13580 unsigned int j;
13581
13582 for (j = regno; j < endregno; j++)
13583 {
13584 reg_stat_type *rsp = ®_stat[j];
13585 if (rsp->last_set_invalid
13586 /* If this is a pseudo-register that was only set once and not
13587 live at the beginning of the function, it is always valid. */
13588 || (! (regno >= FIRST_PSEUDO_REGISTER
13589 && regno < reg_n_sets_max
13590 && REG_N_SETS (regno) == 1
13591 && (!REGNO_REG_SET_P
13592 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13593 regno)))
13594 && rsp->last_set_label > tick))
13595 {
13596 if (replace)
13597 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13598 return replace;
13599 }
13600 }
13601
13602 return 1;
13603 }
13604 /* If this is a memory reference, make sure that there were no stores after
13605 it that might have clobbered the value. We don't have alias info, so we
13606 assume any store invalidates it. Moreover, we only have local UIDs, so
13607 we also assume that there were stores in the intervening basic blocks. */
13608 else if (MEM_P (x) && !MEM_READONLY_P (x)
13609 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13610 {
13611 if (replace)
13612 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13613 return replace;
13614 }
13615
13616 for (i = 0; i < len; i++)
13617 {
13618 if (fmt[i] == 'e')
13619 {
13620 /* Check for identical subexpressions. If x contains
13621 identical subexpression we only have to traverse one of
13622 them. */
13623 if (i == 1 && ARITHMETIC_P (x))
13624 {
13625 /* Note that at this point x0 has already been checked
13626 and found valid. */
13627 rtx x0 = XEXP (x, 0);
13628 rtx x1 = XEXP (x, 1);
13629
13630 /* If x0 and x1 are identical then x is also valid. */
13631 if (x0 == x1)
13632 return 1;
13633
13634 /* If x1 is identical to a subexpression of x0 then
13635 while checking x0, x1 has already been checked. Thus
13636 it is valid and so as x. */
13637 if (ARITHMETIC_P (x0)
13638 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13639 return 1;
13640
13641 /* If x0 is identical to a subexpression of x1 then x is
13642 valid iff the rest of x1 is valid. */
13643 if (ARITHMETIC_P (x1)
13644 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13645 return
13646 get_last_value_validate (&XEXP (x1,
13647 x0 == XEXP (x1, 0) ? 1 : 0),
13648 insn, tick, replace);
13649 }
13650
13651 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13652 replace) == 0)
13653 return 0;
13654 }
13655 else if (fmt[i] == 'E')
13656 for (j = 0; j < XVECLEN (x, i); j++)
13657 if (get_last_value_validate (&XVECEXP (x, i, j),
13658 insn, tick, replace) == 0)
13659 return 0;
13660 }
13661
13662 /* If we haven't found a reason for it to be invalid, it is valid. */
13663 return 1;
13664 }
13665
13666 /* Get the last value assigned to X, if known. Some registers
13667 in the value may be replaced with (clobber (const_int 0)) if their value
13668 is known longer known reliably. */
13669
13670 static rtx
get_last_value(const_rtx x)13671 get_last_value (const_rtx x)
13672 {
13673 unsigned int regno;
13674 rtx value;
13675 reg_stat_type *rsp;
13676
13677 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13678 then convert it to the desired mode. If this is a paradoxical SUBREG,
13679 we cannot predict what values the "extra" bits might have. */
13680 if (GET_CODE (x) == SUBREG
13681 && subreg_lowpart_p (x)
13682 && !paradoxical_subreg_p (x)
13683 && (value = get_last_value (SUBREG_REG (x))) != 0)
13684 return gen_lowpart (GET_MODE (x), value);
13685
13686 if (!REG_P (x))
13687 return 0;
13688
13689 regno = REGNO (x);
13690 rsp = ®_stat[regno];
13691 value = rsp->last_set_value;
13692
13693 /* If we don't have a value, or if it isn't for this basic block and
13694 it's either a hard register, set more than once, or it's a live
13695 at the beginning of the function, return 0.
13696
13697 Because if it's not live at the beginning of the function then the reg
13698 is always set before being used (is never used without being set).
13699 And, if it's set only once, and it's always set before use, then all
13700 uses must have the same last value, even if it's not from this basic
13701 block. */
13702
13703 if (value == 0
13704 || (rsp->last_set_label < label_tick_ebb_start
13705 && (regno < FIRST_PSEUDO_REGISTER
13706 || regno >= reg_n_sets_max
13707 || REG_N_SETS (regno) != 1
13708 || REGNO_REG_SET_P
13709 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13710 return 0;
13711
13712 /* If the value was set in a later insn than the ones we are processing,
13713 we can't use it even if the register was only set once. */
13714 if (rsp->last_set_label == label_tick
13715 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13716 return 0;
13717
13718 /* If fewer bits were set than what we are asked for now, we cannot use
13719 the value. */
13720 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13721 GET_MODE_PRECISION (GET_MODE (x))))
13722 return 0;
13723
13724 /* If the value has all its registers valid, return it. */
13725 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13726 return value;
13727
13728 /* Otherwise, make a copy and replace any invalid register with
13729 (clobber (const_int 0)). If that fails for some reason, return 0. */
13730
13731 value = copy_rtx (value);
13732 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13733 return value;
13734
13735 return 0;
13736 }
13737
13738 /* Define three variables used for communication between the following
13739 routines. */
13740
13741 static unsigned int reg_dead_regno, reg_dead_endregno;
13742 static int reg_dead_flag;
13743
13744 /* Function called via note_stores from reg_dead_at_p.
13745
13746 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13747 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13748
13749 static void
reg_dead_at_p_1(rtx dest,const_rtx x,void * data ATTRIBUTE_UNUSED)13750 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13751 {
13752 unsigned int regno, endregno;
13753
13754 if (!REG_P (dest))
13755 return;
13756
13757 regno = REGNO (dest);
13758 endregno = END_REGNO (dest);
13759 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13760 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13761 }
13762
13763 /* Return nonzero if REG is known to be dead at INSN.
13764
13765 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13766 referencing REG, it is dead. If we hit a SET referencing REG, it is
13767 live. Otherwise, see if it is live or dead at the start of the basic
13768 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13769 must be assumed to be always live. */
13770
13771 static int
reg_dead_at_p(rtx reg,rtx_insn * insn)13772 reg_dead_at_p (rtx reg, rtx_insn *insn)
13773 {
13774 basic_block block;
13775 unsigned int i;
13776
13777 /* Set variables for reg_dead_at_p_1. */
13778 reg_dead_regno = REGNO (reg);
13779 reg_dead_endregno = END_REGNO (reg);
13780
13781 reg_dead_flag = 0;
13782
13783 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13784 we allow the machine description to decide whether use-and-clobber
13785 patterns are OK. */
13786 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13787 {
13788 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13789 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13790 return 0;
13791 }
13792
13793 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13794 beginning of basic block. */
13795 block = BLOCK_FOR_INSN (insn);
13796 for (;;)
13797 {
13798 if (INSN_P (insn))
13799 {
13800 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13801 return 1;
13802
13803 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13804 if (reg_dead_flag)
13805 return reg_dead_flag == 1 ? 1 : 0;
13806
13807 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13808 return 1;
13809 }
13810
13811 if (insn == BB_HEAD (block))
13812 break;
13813
13814 insn = PREV_INSN (insn);
13815 }
13816
13817 /* Look at live-in sets for the basic block that we were in. */
13818 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13819 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13820 return 0;
13821
13822 return 1;
13823 }
13824
13825 /* Note hard registers in X that are used. */
13826
13827 static void
mark_used_regs_combine(rtx x)13828 mark_used_regs_combine (rtx x)
13829 {
13830 RTX_CODE code = GET_CODE (x);
13831 unsigned int regno;
13832 int i;
13833
13834 switch (code)
13835 {
13836 case LABEL_REF:
13837 case SYMBOL_REF:
13838 case CONST:
13839 CASE_CONST_ANY:
13840 case PC:
13841 case ADDR_VEC:
13842 case ADDR_DIFF_VEC:
13843 case ASM_INPUT:
13844 /* CC0 must die in the insn after it is set, so we don't need to take
13845 special note of it here. */
13846 case CC0:
13847 return;
13848
13849 case CLOBBER:
13850 /* If we are clobbering a MEM, mark any hard registers inside the
13851 address as used. */
13852 if (MEM_P (XEXP (x, 0)))
13853 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13854 return;
13855
13856 case REG:
13857 regno = REGNO (x);
13858 /* A hard reg in a wide mode may really be multiple registers.
13859 If so, mark all of them just like the first. */
13860 if (regno < FIRST_PSEUDO_REGISTER)
13861 {
13862 /* None of this applies to the stack, frame or arg pointers. */
13863 if (regno == STACK_POINTER_REGNUM
13864 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13865 && regno == HARD_FRAME_POINTER_REGNUM)
13866 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13867 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13868 || regno == FRAME_POINTER_REGNUM)
13869 return;
13870
13871 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13872 }
13873 return;
13874
13875 case SET:
13876 {
13877 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13878 the address. */
13879 rtx testreg = SET_DEST (x);
13880
13881 while (GET_CODE (testreg) == SUBREG
13882 || GET_CODE (testreg) == ZERO_EXTRACT
13883 || GET_CODE (testreg) == STRICT_LOW_PART)
13884 testreg = XEXP (testreg, 0);
13885
13886 if (MEM_P (testreg))
13887 mark_used_regs_combine (XEXP (testreg, 0));
13888
13889 mark_used_regs_combine (SET_SRC (x));
13890 }
13891 return;
13892
13893 default:
13894 break;
13895 }
13896
13897 /* Recursively scan the operands of this expression. */
13898
13899 {
13900 const char *fmt = GET_RTX_FORMAT (code);
13901
13902 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13903 {
13904 if (fmt[i] == 'e')
13905 mark_used_regs_combine (XEXP (x, i));
13906 else if (fmt[i] == 'E')
13907 {
13908 int j;
13909
13910 for (j = 0; j < XVECLEN (x, i); j++)
13911 mark_used_regs_combine (XVECEXP (x, i, j));
13912 }
13913 }
13914 }
13915 }
13916
13917 /* Remove register number REGNO from the dead registers list of INSN.
13918
13919 Return the note used to record the death, if there was one. */
13920
13921 rtx
remove_death(unsigned int regno,rtx_insn * insn)13922 remove_death (unsigned int regno, rtx_insn *insn)
13923 {
13924 rtx note = find_regno_note (insn, REG_DEAD, regno);
13925
13926 if (note)
13927 remove_note (insn, note);
13928
13929 return note;
13930 }
13931
13932 /* For each register (hardware or pseudo) used within expression X, if its
13933 death is in an instruction with luid between FROM_LUID (inclusive) and
13934 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13935 list headed by PNOTES.
13936
13937 That said, don't move registers killed by maybe_kill_insn.
13938
13939 This is done when X is being merged by combination into TO_INSN. These
13940 notes will then be distributed as needed. */
13941
13942 static void
move_deaths(rtx x,rtx maybe_kill_insn,int from_luid,rtx_insn * to_insn,rtx * pnotes)13943 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13944 rtx *pnotes)
13945 {
13946 const char *fmt;
13947 int len, i;
13948 enum rtx_code code = GET_CODE (x);
13949
13950 if (code == REG)
13951 {
13952 unsigned int regno = REGNO (x);
13953 rtx_insn *where_dead = reg_stat[regno].last_death;
13954
13955 /* If we do not know where the register died, it may still die between
13956 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13957 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
13958 {
13959 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
13960 while (insn
13961 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
13962 && DF_INSN_LUID (insn) >= from_luid)
13963 {
13964 if (dead_or_set_regno_p (insn, regno))
13965 {
13966 if (find_regno_note (insn, REG_DEAD, regno))
13967 where_dead = insn;
13968 break;
13969 }
13970
13971 insn = prev_real_nondebug_insn (insn);
13972 }
13973 }
13974
13975 /* Don't move the register if it gets killed in between from and to. */
13976 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13977 && ! reg_referenced_p (x, maybe_kill_insn))
13978 return;
13979
13980 if (where_dead
13981 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13982 && DF_INSN_LUID (where_dead) >= from_luid
13983 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13984 {
13985 rtx note = remove_death (regno, where_dead);
13986
13987 /* It is possible for the call above to return 0. This can occur
13988 when last_death points to I2 or I1 that we combined with.
13989 In that case make a new note.
13990
13991 We must also check for the case where X is a hard register
13992 and NOTE is a death note for a range of hard registers
13993 including X. In that case, we must put REG_DEAD notes for
13994 the remaining registers in place of NOTE. */
13995
13996 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13997 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13998 {
13999 unsigned int deadregno = REGNO (XEXP (note, 0));
14000 unsigned int deadend = END_REGNO (XEXP (note, 0));
14001 unsigned int ourend = END_REGNO (x);
14002 unsigned int i;
14003
14004 for (i = deadregno; i < deadend; i++)
14005 if (i < regno || i >= ourend)
14006 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14007 }
14008
14009 /* If we didn't find any note, or if we found a REG_DEAD note that
14010 covers only part of the given reg, and we have a multi-reg hard
14011 register, then to be safe we must check for REG_DEAD notes
14012 for each register other than the first. They could have
14013 their own REG_DEAD notes lying around. */
14014 else if ((note == 0
14015 || (note != 0
14016 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14017 GET_MODE (x))))
14018 && regno < FIRST_PSEUDO_REGISTER
14019 && REG_NREGS (x) > 1)
14020 {
14021 unsigned int ourend = END_REGNO (x);
14022 unsigned int i, offset;
14023 rtx oldnotes = 0;
14024
14025 if (note)
14026 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14027 else
14028 offset = 1;
14029
14030 for (i = regno + offset; i < ourend; i++)
14031 move_deaths (regno_reg_rtx[i],
14032 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14033 }
14034
14035 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14036 {
14037 XEXP (note, 1) = *pnotes;
14038 *pnotes = note;
14039 }
14040 else
14041 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14042 }
14043
14044 return;
14045 }
14046
14047 else if (GET_CODE (x) == SET)
14048 {
14049 rtx dest = SET_DEST (x);
14050
14051 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14052
14053 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14054 that accesses one word of a multi-word item, some
14055 piece of everything register in the expression is used by
14056 this insn, so remove any old death. */
14057 /* ??? So why do we test for equality of the sizes? */
14058
14059 if (GET_CODE (dest) == ZERO_EXTRACT
14060 || GET_CODE (dest) == STRICT_LOW_PART
14061 || (GET_CODE (dest) == SUBREG
14062 && !read_modify_subreg_p (dest)))
14063 {
14064 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14065 return;
14066 }
14067
14068 /* If this is some other SUBREG, we know it replaces the entire
14069 value, so use that as the destination. */
14070 if (GET_CODE (dest) == SUBREG)
14071 dest = SUBREG_REG (dest);
14072
14073 /* If this is a MEM, adjust deaths of anything used in the address.
14074 For a REG (the only other possibility), the entire value is
14075 being replaced so the old value is not used in this insn. */
14076
14077 if (MEM_P (dest))
14078 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14079 to_insn, pnotes);
14080 return;
14081 }
14082
14083 else if (GET_CODE (x) == CLOBBER)
14084 return;
14085
14086 len = GET_RTX_LENGTH (code);
14087 fmt = GET_RTX_FORMAT (code);
14088
14089 for (i = 0; i < len; i++)
14090 {
14091 if (fmt[i] == 'E')
14092 {
14093 int j;
14094 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14095 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14096 to_insn, pnotes);
14097 }
14098 else if (fmt[i] == 'e')
14099 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14100 }
14101 }
14102
14103 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14104 pattern of an insn. X must be a REG. */
14105
14106 static int
reg_bitfield_target_p(rtx x,rtx body)14107 reg_bitfield_target_p (rtx x, rtx body)
14108 {
14109 int i;
14110
14111 if (GET_CODE (body) == SET)
14112 {
14113 rtx dest = SET_DEST (body);
14114 rtx target;
14115 unsigned int regno, tregno, endregno, endtregno;
14116
14117 if (GET_CODE (dest) == ZERO_EXTRACT)
14118 target = XEXP (dest, 0);
14119 else if (GET_CODE (dest) == STRICT_LOW_PART)
14120 target = SUBREG_REG (XEXP (dest, 0));
14121 else
14122 return 0;
14123
14124 if (GET_CODE (target) == SUBREG)
14125 target = SUBREG_REG (target);
14126
14127 if (!REG_P (target))
14128 return 0;
14129
14130 tregno = REGNO (target), regno = REGNO (x);
14131 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14132 return target == x;
14133
14134 endtregno = end_hard_regno (GET_MODE (target), tregno);
14135 endregno = end_hard_regno (GET_MODE (x), regno);
14136
14137 return endregno > tregno && regno < endtregno;
14138 }
14139
14140 else if (GET_CODE (body) == PARALLEL)
14141 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14142 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14143 return 1;
14144
14145 return 0;
14146 }
14147
14148 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14149 as appropriate. I3 and I2 are the insns resulting from the combination
14150 insns including FROM (I2 may be zero).
14151
14152 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14153 not need REG_DEAD notes because they are being substituted for. This
14154 saves searching in the most common cases.
14155
14156 Each note in the list is either ignored or placed on some insns, depending
14157 on the type of note. */
14158
14159 static void
distribute_notes(rtx notes,rtx_insn * from_insn,rtx_insn * i3,rtx_insn * i2,rtx elim_i2,rtx elim_i1,rtx elim_i0)14160 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14161 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14162 {
14163 rtx note, next_note;
14164 rtx tem_note;
14165 rtx_insn *tem_insn;
14166
14167 for (note = notes; note; note = next_note)
14168 {
14169 rtx_insn *place = 0, *place2 = 0;
14170
14171 next_note = XEXP (note, 1);
14172 switch (REG_NOTE_KIND (note))
14173 {
14174 case REG_BR_PROB:
14175 case REG_BR_PRED:
14176 /* Doesn't matter much where we put this, as long as it's somewhere.
14177 It is preferable to keep these notes on branches, which is most
14178 likely to be i3. */
14179 place = i3;
14180 break;
14181
14182 case REG_NON_LOCAL_GOTO:
14183 if (JUMP_P (i3))
14184 place = i3;
14185 else
14186 {
14187 gcc_assert (i2 && JUMP_P (i2));
14188 place = i2;
14189 }
14190 break;
14191
14192 case REG_EH_REGION:
14193 /* These notes must remain with the call or trapping instruction. */
14194 if (CALL_P (i3))
14195 place = i3;
14196 else if (i2 && CALL_P (i2))
14197 place = i2;
14198 else
14199 {
14200 gcc_assert (cfun->can_throw_non_call_exceptions);
14201 if (may_trap_p (i3))
14202 place = i3;
14203 else if (i2 && may_trap_p (i2))
14204 place = i2;
14205 /* ??? Otherwise assume we've combined things such that we
14206 can now prove that the instructions can't trap. Drop the
14207 note in this case. */
14208 }
14209 break;
14210
14211 case REG_ARGS_SIZE:
14212 /* ??? How to distribute between i3-i1. Assume i3 contains the
14213 entire adjustment. Assert i3 contains at least some adjust. */
14214 if (!noop_move_p (i3))
14215 {
14216 poly_int64 old_size, args_size = get_args_size (note);
14217 /* fixup_args_size_notes looks at REG_NORETURN note,
14218 so ensure the note is placed there first. */
14219 if (CALL_P (i3))
14220 {
14221 rtx *np;
14222 for (np = &next_note; *np; np = &XEXP (*np, 1))
14223 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14224 {
14225 rtx n = *np;
14226 *np = XEXP (n, 1);
14227 XEXP (n, 1) = REG_NOTES (i3);
14228 REG_NOTES (i3) = n;
14229 break;
14230 }
14231 }
14232 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14233 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14234 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14235 gcc_assert (maybe_ne (old_size, args_size)
14236 || (CALL_P (i3)
14237 && !ACCUMULATE_OUTGOING_ARGS
14238 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14239 }
14240 break;
14241
14242 case REG_NORETURN:
14243 case REG_SETJMP:
14244 case REG_TM:
14245 case REG_CALL_DECL:
14246 case REG_CALL_NOCF_CHECK:
14247 /* These notes must remain with the call. It should not be
14248 possible for both I2 and I3 to be a call. */
14249 if (CALL_P (i3))
14250 place = i3;
14251 else
14252 {
14253 gcc_assert (i2 && CALL_P (i2));
14254 place = i2;
14255 }
14256 break;
14257
14258 case REG_UNUSED:
14259 /* Any clobbers for i3 may still exist, and so we must process
14260 REG_UNUSED notes from that insn.
14261
14262 Any clobbers from i2 or i1 can only exist if they were added by
14263 recog_for_combine. In that case, recog_for_combine created the
14264 necessary REG_UNUSED notes. Trying to keep any original
14265 REG_UNUSED notes from these insns can cause incorrect output
14266 if it is for the same register as the original i3 dest.
14267 In that case, we will notice that the register is set in i3,
14268 and then add a REG_UNUSED note for the destination of i3, which
14269 is wrong. However, it is possible to have REG_UNUSED notes from
14270 i2 or i1 for register which were both used and clobbered, so
14271 we keep notes from i2 or i1 if they will turn into REG_DEAD
14272 notes. */
14273
14274 /* If this register is set or clobbered in I3, put the note there
14275 unless there is one already. */
14276 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14277 {
14278 if (from_insn != i3)
14279 break;
14280
14281 if (! (REG_P (XEXP (note, 0))
14282 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14283 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14284 place = i3;
14285 }
14286 /* Otherwise, if this register is used by I3, then this register
14287 now dies here, so we must put a REG_DEAD note here unless there
14288 is one already. */
14289 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14290 && ! (REG_P (XEXP (note, 0))
14291 ? find_regno_note (i3, REG_DEAD,
14292 REGNO (XEXP (note, 0)))
14293 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14294 {
14295 PUT_REG_NOTE_KIND (note, REG_DEAD);
14296 place = i3;
14297 }
14298
14299 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14300 but we can't tell which at this point. We must reset any
14301 expectations we had about the value that was previously
14302 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14303 and, if appropriate, restore its previous value, but we
14304 don't have enough information for that at this point. */
14305 else
14306 {
14307 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14308
14309 /* Otherwise, if this register is now referenced in i2
14310 then the register used to be modified in one of the
14311 original insns. If it was i3 (say, in an unused
14312 parallel), it's now completely gone, so the note can
14313 be discarded. But if it was modified in i2, i1 or i0
14314 and we still reference it in i2, then we're
14315 referencing the previous value, and since the
14316 register was modified and REG_UNUSED, we know that
14317 the previous value is now dead. So, if we only
14318 reference the register in i2, we change the note to
14319 REG_DEAD, to reflect the previous value. However, if
14320 we're also setting or clobbering the register as
14321 scratch, we know (because the register was not
14322 referenced in i3) that it's unused, just as it was
14323 unused before, and we place the note in i2. */
14324 if (from_insn != i3 && i2 && INSN_P (i2)
14325 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14326 {
14327 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14328 PUT_REG_NOTE_KIND (note, REG_DEAD);
14329 if (! (REG_P (XEXP (note, 0))
14330 ? find_regno_note (i2, REG_NOTE_KIND (note),
14331 REGNO (XEXP (note, 0)))
14332 : find_reg_note (i2, REG_NOTE_KIND (note),
14333 XEXP (note, 0))))
14334 place = i2;
14335 }
14336 }
14337
14338 break;
14339
14340 case REG_EQUAL:
14341 case REG_EQUIV:
14342 case REG_NOALIAS:
14343 /* These notes say something about results of an insn. We can
14344 only support them if they used to be on I3 in which case they
14345 remain on I3. Otherwise they are ignored.
14346
14347 If the note refers to an expression that is not a constant, we
14348 must also ignore the note since we cannot tell whether the
14349 equivalence is still true. It might be possible to do
14350 slightly better than this (we only have a problem if I2DEST
14351 or I1DEST is present in the expression), but it doesn't
14352 seem worth the trouble. */
14353
14354 if (from_insn == i3
14355 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14356 place = i3;
14357 break;
14358
14359 case REG_INC:
14360 /* These notes say something about how a register is used. They must
14361 be present on any use of the register in I2 or I3. */
14362 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14363 place = i3;
14364
14365 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14366 {
14367 if (place)
14368 place2 = i2;
14369 else
14370 place = i2;
14371 }
14372 break;
14373
14374 case REG_LABEL_TARGET:
14375 case REG_LABEL_OPERAND:
14376 /* This can show up in several ways -- either directly in the
14377 pattern, or hidden off in the constant pool with (or without?)
14378 a REG_EQUAL note. */
14379 /* ??? Ignore the without-reg_equal-note problem for now. */
14380 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14381 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14382 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14383 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14384 place = i3;
14385
14386 if (i2
14387 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14388 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14389 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14390 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14391 {
14392 if (place)
14393 place2 = i2;
14394 else
14395 place = i2;
14396 }
14397
14398 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14399 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14400 there. */
14401 if (place && JUMP_P (place)
14402 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14403 && (JUMP_LABEL (place) == NULL
14404 || JUMP_LABEL (place) == XEXP (note, 0)))
14405 {
14406 rtx label = JUMP_LABEL (place);
14407
14408 if (!label)
14409 JUMP_LABEL (place) = XEXP (note, 0);
14410 else if (LABEL_P (label))
14411 LABEL_NUSES (label)--;
14412 }
14413
14414 if (place2 && JUMP_P (place2)
14415 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14416 && (JUMP_LABEL (place2) == NULL
14417 || JUMP_LABEL (place2) == XEXP (note, 0)))
14418 {
14419 rtx label = JUMP_LABEL (place2);
14420
14421 if (!label)
14422 JUMP_LABEL (place2) = XEXP (note, 0);
14423 else if (LABEL_P (label))
14424 LABEL_NUSES (label)--;
14425 place2 = 0;
14426 }
14427 break;
14428
14429 case REG_NONNEG:
14430 /* This note says something about the value of a register prior
14431 to the execution of an insn. It is too much trouble to see
14432 if the note is still correct in all situations. It is better
14433 to simply delete it. */
14434 break;
14435
14436 case REG_DEAD:
14437 /* If we replaced the right hand side of FROM_INSN with a
14438 REG_EQUAL note, the original use of the dying register
14439 will not have been combined into I3 and I2. In such cases,
14440 FROM_INSN is guaranteed to be the first of the combined
14441 instructions, so we simply need to search back before
14442 FROM_INSN for the previous use or set of this register,
14443 then alter the notes there appropriately.
14444
14445 If the register is used as an input in I3, it dies there.
14446 Similarly for I2, if it is nonzero and adjacent to I3.
14447
14448 If the register is not used as an input in either I3 or I2
14449 and it is not one of the registers we were supposed to eliminate,
14450 there are two possibilities. We might have a non-adjacent I2
14451 or we might have somehow eliminated an additional register
14452 from a computation. For example, we might have had A & B where
14453 we discover that B will always be zero. In this case we will
14454 eliminate the reference to A.
14455
14456 In both cases, we must search to see if we can find a previous
14457 use of A and put the death note there. */
14458
14459 if (from_insn
14460 && from_insn == i2mod
14461 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14462 tem_insn = from_insn;
14463 else
14464 {
14465 if (from_insn
14466 && CALL_P (from_insn)
14467 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14468 place = from_insn;
14469 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14470 {
14471 /* If the new I2 sets the same register that is marked
14472 dead in the note, we do not in general know where to
14473 put the note. One important case we _can_ handle is
14474 when the note comes from I3. */
14475 if (from_insn == i3)
14476 place = i3;
14477 else
14478 break;
14479 }
14480 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14481 place = i3;
14482 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14483 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14484 place = i2;
14485 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14486 && !(i2mod
14487 && reg_overlap_mentioned_p (XEXP (note, 0),
14488 i2mod_old_rhs)))
14489 || rtx_equal_p (XEXP (note, 0), elim_i1)
14490 || rtx_equal_p (XEXP (note, 0), elim_i0))
14491 break;
14492 tem_insn = i3;
14493 }
14494
14495 if (place == 0)
14496 {
14497 basic_block bb = this_basic_block;
14498
14499 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14500 {
14501 if (!NONDEBUG_INSN_P (tem_insn))
14502 {
14503 if (tem_insn == BB_HEAD (bb))
14504 break;
14505 continue;
14506 }
14507
14508 /* If the register is being set at TEM_INSN, see if that is all
14509 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14510 into a REG_UNUSED note instead. Don't delete sets to
14511 global register vars. */
14512 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14513 || !global_regs[REGNO (XEXP (note, 0))])
14514 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14515 {
14516 rtx set = single_set (tem_insn);
14517 rtx inner_dest = 0;
14518 rtx_insn *cc0_setter = NULL;
14519
14520 if (set != 0)
14521 for (inner_dest = SET_DEST (set);
14522 (GET_CODE (inner_dest) == STRICT_LOW_PART
14523 || GET_CODE (inner_dest) == SUBREG
14524 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14525 inner_dest = XEXP (inner_dest, 0))
14526 ;
14527
14528 /* Verify that it was the set, and not a clobber that
14529 modified the register.
14530
14531 CC0 targets must be careful to maintain setter/user
14532 pairs. If we cannot delete the setter due to side
14533 effects, mark the user with an UNUSED note instead
14534 of deleting it. */
14535
14536 if (set != 0 && ! side_effects_p (SET_SRC (set))
14537 && rtx_equal_p (XEXP (note, 0), inner_dest)
14538 && (!HAVE_cc0
14539 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14540 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14541 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14542 {
14543 /* Move the notes and links of TEM_INSN elsewhere.
14544 This might delete other dead insns recursively.
14545 First set the pattern to something that won't use
14546 any register. */
14547 rtx old_notes = REG_NOTES (tem_insn);
14548
14549 PATTERN (tem_insn) = pc_rtx;
14550 REG_NOTES (tem_insn) = NULL;
14551
14552 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14553 NULL_RTX, NULL_RTX, NULL_RTX);
14554 distribute_links (LOG_LINKS (tem_insn));
14555
14556 unsigned int regno = REGNO (XEXP (note, 0));
14557 reg_stat_type *rsp = ®_stat[regno];
14558 if (rsp->last_set == tem_insn)
14559 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14560
14561 SET_INSN_DELETED (tem_insn);
14562 if (tem_insn == i2)
14563 i2 = NULL;
14564
14565 /* Delete the setter too. */
14566 if (cc0_setter)
14567 {
14568 PATTERN (cc0_setter) = pc_rtx;
14569 old_notes = REG_NOTES (cc0_setter);
14570 REG_NOTES (cc0_setter) = NULL;
14571
14572 distribute_notes (old_notes, cc0_setter,
14573 cc0_setter, NULL,
14574 NULL_RTX, NULL_RTX, NULL_RTX);
14575 distribute_links (LOG_LINKS (cc0_setter));
14576
14577 SET_INSN_DELETED (cc0_setter);
14578 if (cc0_setter == i2)
14579 i2 = NULL;
14580 }
14581 }
14582 else
14583 {
14584 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14585
14586 /* If there isn't already a REG_UNUSED note, put one
14587 here. Do not place a REG_DEAD note, even if
14588 the register is also used here; that would not
14589 match the algorithm used in lifetime analysis
14590 and can cause the consistency check in the
14591 scheduler to fail. */
14592 if (! find_regno_note (tem_insn, REG_UNUSED,
14593 REGNO (XEXP (note, 0))))
14594 place = tem_insn;
14595 break;
14596 }
14597 }
14598 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14599 || (CALL_P (tem_insn)
14600 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14601 {
14602 place = tem_insn;
14603
14604 /* If we are doing a 3->2 combination, and we have a
14605 register which formerly died in i3 and was not used
14606 by i2, which now no longer dies in i3 and is used in
14607 i2 but does not die in i2, and place is between i2
14608 and i3, then we may need to move a link from place to
14609 i2. */
14610 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14611 && from_insn
14612 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14613 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14614 {
14615 struct insn_link *links = LOG_LINKS (place);
14616 LOG_LINKS (place) = NULL;
14617 distribute_links (links);
14618 }
14619 break;
14620 }
14621
14622 if (tem_insn == BB_HEAD (bb))
14623 break;
14624 }
14625
14626 }
14627
14628 /* If the register is set or already dead at PLACE, we needn't do
14629 anything with this note if it is still a REG_DEAD note.
14630 We check here if it is set at all, not if is it totally replaced,
14631 which is what `dead_or_set_p' checks, so also check for it being
14632 set partially. */
14633
14634 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14635 {
14636 unsigned int regno = REGNO (XEXP (note, 0));
14637 reg_stat_type *rsp = ®_stat[regno];
14638
14639 if (dead_or_set_p (place, XEXP (note, 0))
14640 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14641 {
14642 /* Unless the register previously died in PLACE, clear
14643 last_death. [I no longer understand why this is
14644 being done.] */
14645 if (rsp->last_death != place)
14646 rsp->last_death = 0;
14647 place = 0;
14648 }
14649 else
14650 rsp->last_death = place;
14651
14652 /* If this is a death note for a hard reg that is occupying
14653 multiple registers, ensure that we are still using all
14654 parts of the object. If we find a piece of the object
14655 that is unused, we must arrange for an appropriate REG_DEAD
14656 note to be added for it. However, we can't just emit a USE
14657 and tag the note to it, since the register might actually
14658 be dead; so we recourse, and the recursive call then finds
14659 the previous insn that used this register. */
14660
14661 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14662 {
14663 unsigned int endregno = END_REGNO (XEXP (note, 0));
14664 bool all_used = true;
14665 unsigned int i;
14666
14667 for (i = regno; i < endregno; i++)
14668 if ((! refers_to_regno_p (i, PATTERN (place))
14669 && ! find_regno_fusage (place, USE, i))
14670 || dead_or_set_regno_p (place, i))
14671 {
14672 all_used = false;
14673 break;
14674 }
14675
14676 if (! all_used)
14677 {
14678 /* Put only REG_DEAD notes for pieces that are
14679 not already dead or set. */
14680
14681 for (i = regno; i < endregno;
14682 i += hard_regno_nregs (i, reg_raw_mode[i]))
14683 {
14684 rtx piece = regno_reg_rtx[i];
14685 basic_block bb = this_basic_block;
14686
14687 if (! dead_or_set_p (place, piece)
14688 && ! reg_bitfield_target_p (piece,
14689 PATTERN (place)))
14690 {
14691 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14692 NULL_RTX);
14693
14694 distribute_notes (new_note, place, place,
14695 NULL, NULL_RTX, NULL_RTX,
14696 NULL_RTX);
14697 }
14698 else if (! refers_to_regno_p (i, PATTERN (place))
14699 && ! find_regno_fusage (place, USE, i))
14700 for (tem_insn = PREV_INSN (place); ;
14701 tem_insn = PREV_INSN (tem_insn))
14702 {
14703 if (!NONDEBUG_INSN_P (tem_insn))
14704 {
14705 if (tem_insn == BB_HEAD (bb))
14706 break;
14707 continue;
14708 }
14709 if (dead_or_set_p (tem_insn, piece)
14710 || reg_bitfield_target_p (piece,
14711 PATTERN (tem_insn)))
14712 {
14713 add_reg_note (tem_insn, REG_UNUSED, piece);
14714 break;
14715 }
14716 }
14717 }
14718
14719 place = 0;
14720 }
14721 }
14722 }
14723 break;
14724
14725 default:
14726 /* Any other notes should not be present at this point in the
14727 compilation. */
14728 gcc_unreachable ();
14729 }
14730
14731 if (place)
14732 {
14733 XEXP (note, 1) = REG_NOTES (place);
14734 REG_NOTES (place) = note;
14735
14736 /* Set added_notes_insn to the earliest insn we added a note to. */
14737 if (added_notes_insn == 0
14738 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14739 added_notes_insn = place;
14740 }
14741
14742 if (place2)
14743 {
14744 add_shallow_copy_of_reg_note (place2, note);
14745
14746 /* Set added_notes_insn to the earliest insn we added a note to. */
14747 if (added_notes_insn == 0
14748 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14749 added_notes_insn = place2;
14750 }
14751 }
14752 }
14753
14754 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14755 I3, I2, and I1 to new locations. This is also called to add a link
14756 pointing at I3 when I3's destination is changed. */
14757
14758 static void
distribute_links(struct insn_link * links)14759 distribute_links (struct insn_link *links)
14760 {
14761 struct insn_link *link, *next_link;
14762
14763 for (link = links; link; link = next_link)
14764 {
14765 rtx_insn *place = 0;
14766 rtx_insn *insn;
14767 rtx set, reg;
14768
14769 next_link = link->next;
14770
14771 /* If the insn that this link points to is a NOTE, ignore it. */
14772 if (NOTE_P (link->insn))
14773 continue;
14774
14775 set = 0;
14776 rtx pat = PATTERN (link->insn);
14777 if (GET_CODE (pat) == SET)
14778 set = pat;
14779 else if (GET_CODE (pat) == PARALLEL)
14780 {
14781 int i;
14782 for (i = 0; i < XVECLEN (pat, 0); i++)
14783 {
14784 set = XVECEXP (pat, 0, i);
14785 if (GET_CODE (set) != SET)
14786 continue;
14787
14788 reg = SET_DEST (set);
14789 while (GET_CODE (reg) == ZERO_EXTRACT
14790 || GET_CODE (reg) == STRICT_LOW_PART
14791 || GET_CODE (reg) == SUBREG)
14792 reg = XEXP (reg, 0);
14793
14794 if (!REG_P (reg))
14795 continue;
14796
14797 if (REGNO (reg) == link->regno)
14798 break;
14799 }
14800 if (i == XVECLEN (pat, 0))
14801 continue;
14802 }
14803 else
14804 continue;
14805
14806 reg = SET_DEST (set);
14807
14808 while (GET_CODE (reg) == ZERO_EXTRACT
14809 || GET_CODE (reg) == STRICT_LOW_PART
14810 || GET_CODE (reg) == SUBREG)
14811 reg = XEXP (reg, 0);
14812
14813 if (reg == pc_rtx)
14814 continue;
14815
14816 /* A LOG_LINK is defined as being placed on the first insn that uses
14817 a register and points to the insn that sets the register. Start
14818 searching at the next insn after the target of the link and stop
14819 when we reach a set of the register or the end of the basic block.
14820
14821 Note that this correctly handles the link that used to point from
14822 I3 to I2. Also note that not much searching is typically done here
14823 since most links don't point very far away. */
14824
14825 for (insn = NEXT_INSN (link->insn);
14826 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14827 || BB_HEAD (this_basic_block->next_bb) != insn));
14828 insn = NEXT_INSN (insn))
14829 if (DEBUG_INSN_P (insn))
14830 continue;
14831 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14832 {
14833 if (reg_referenced_p (reg, PATTERN (insn)))
14834 place = insn;
14835 break;
14836 }
14837 else if (CALL_P (insn)
14838 && find_reg_fusage (insn, USE, reg))
14839 {
14840 place = insn;
14841 break;
14842 }
14843 else if (INSN_P (insn) && reg_set_p (reg, insn))
14844 break;
14845
14846 /* If we found a place to put the link, place it there unless there
14847 is already a link to the same insn as LINK at that point. */
14848
14849 if (place)
14850 {
14851 struct insn_link *link2;
14852
14853 FOR_EACH_LOG_LINK (link2, place)
14854 if (link2->insn == link->insn && link2->regno == link->regno)
14855 break;
14856
14857 if (link2 == NULL)
14858 {
14859 link->next = LOG_LINKS (place);
14860 LOG_LINKS (place) = link;
14861
14862 /* Set added_links_insn to the earliest insn we added a
14863 link to. */
14864 if (added_links_insn == 0
14865 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14866 added_links_insn = place;
14867 }
14868 }
14869 }
14870 }
14871
14872 /* Check for any register or memory mentioned in EQUIV that is not
14873 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14874 of EXPR where some registers may have been replaced by constants. */
14875
14876 static bool
unmentioned_reg_p(rtx equiv,rtx expr)14877 unmentioned_reg_p (rtx equiv, rtx expr)
14878 {
14879 subrtx_iterator::array_type array;
14880 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14881 {
14882 const_rtx x = *iter;
14883 if ((REG_P (x) || MEM_P (x))
14884 && !reg_mentioned_p (x, expr))
14885 return true;
14886 }
14887 return false;
14888 }
14889
14890 DEBUG_FUNCTION void
dump_combine_stats(FILE * file)14891 dump_combine_stats (FILE *file)
14892 {
14893 fprintf
14894 (file,
14895 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14896 combine_attempts, combine_merges, combine_extras, combine_successes);
14897 }
14898
14899 void
dump_combine_total_stats(FILE * file)14900 dump_combine_total_stats (FILE *file)
14901 {
14902 fprintf
14903 (file,
14904 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14905 total_attempts, total_merges, total_extras, total_successes);
14906 }
14907
14908 /* Try combining insns through substitution. */
14909 static unsigned int
rest_of_handle_combine(void)14910 rest_of_handle_combine (void)
14911 {
14912 int rebuild_jump_labels_after_combine;
14913
14914 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14915 df_note_add_problem ();
14916 df_analyze ();
14917
14918 regstat_init_n_sets_and_refs ();
14919 reg_n_sets_max = max_reg_num ();
14920
14921 rebuild_jump_labels_after_combine
14922 = combine_instructions (get_insns (), max_reg_num ());
14923
14924 /* Combining insns may have turned an indirect jump into a
14925 direct jump. Rebuild the JUMP_LABEL fields of jumping
14926 instructions. */
14927 if (rebuild_jump_labels_after_combine)
14928 {
14929 if (dom_info_available_p (CDI_DOMINATORS))
14930 free_dominance_info (CDI_DOMINATORS);
14931 timevar_push (TV_JUMP);
14932 rebuild_jump_labels (get_insns ());
14933 cleanup_cfg (0);
14934 timevar_pop (TV_JUMP);
14935 }
14936
14937 regstat_free_n_sets_and_refs ();
14938 return 0;
14939 }
14940
14941 namespace {
14942
14943 const pass_data pass_data_combine =
14944 {
14945 RTL_PASS, /* type */
14946 "combine", /* name */
14947 OPTGROUP_NONE, /* optinfo_flags */
14948 TV_COMBINE, /* tv_id */
14949 PROP_cfglayout, /* properties_required */
14950 0, /* properties_provided */
14951 0, /* properties_destroyed */
14952 0, /* todo_flags_start */
14953 TODO_df_finish, /* todo_flags_finish */
14954 };
14955
14956 class pass_combine : public rtl_opt_pass
14957 {
14958 public:
pass_combine(gcc::context * ctxt)14959 pass_combine (gcc::context *ctxt)
14960 : rtl_opt_pass (pass_data_combine, ctxt)
14961 {}
14962
14963 /* opt_pass methods: */
gate(function *)14964 virtual bool gate (function *) { return (optimize > 0); }
execute(function *)14965 virtual unsigned int execute (function *)
14966 {
14967 return rest_of_handle_combine ();
14968 }
14969
14970 }; // class pass_combine
14971
14972 } // anon namespace
14973
14974 rtl_opt_pass *
make_pass_combine(gcc::context * ctxt)14975 make_pass_combine (gcc::context *ctxt)
14976 {
14977 return new pass_combine (ctxt);
14978 }
14979