1 /* Optimize by combining instructions for GNU compiler.
2 Copyright (C) 1987-2018 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21 Portable Optimizer, but redone to work on our list-structured
22 representation for RTL instead of their string representation.
23
24 The LOG_LINKS of each insn identify the most recent assignment
25 to each REG used in the insn. It is a list of previous insns,
26 each of which contains a SET for a REG that is used in this insn
27 and not used or set in between. LOG_LINKs never cross basic blocks.
28 They were set up by the preceding pass (lifetime analysis).
29
30 We try to combine each pair of insns joined by a logical link.
31 We also try to combine triplets of insns A, B and C when C has
32 a link back to B and B has a link back to A. Likewise for a
33 small number of quadruplets of insns A, B, C and D for which
34 there's high likelihood of success.
35
36 LOG_LINKS does not have links for use of the CC0. They don't
37 need to, because the insn that sets the CC0 is always immediately
38 before the insn that tests it. So we always regard a branch
39 insn as having a logical link to the preceding insn. The same is true
40 for an insn explicitly using CC0.
41
42 We check (with modified_between_p) to avoid combining in such a way
43 as to move a computation to a place where its value would be different.
44
45 Combination is done by mathematically substituting the previous
46 insn(s) values for the regs they set into the expressions in
47 the later insns that refer to these regs. If the result is a valid insn
48 for our target machine, according to the machine description,
49 we install it, delete the earlier insns, and update the data flow
50 information (LOG_LINKS and REG_NOTES) for what we did.
51
52 There are a few exceptions where the dataflow information isn't
53 completely updated (however this is only a local issue since it is
54 regenerated before the next pass that uses it):
55
56 - reg_live_length is not updated
57 - reg_n_refs is not adjusted in the rare case when a register is
58 no longer required in a computation
59 - there are extremely rare cases (see distribute_notes) when a
60 REG_DEAD note is lost
61 - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62 removed because there is no way to know which register it was
63 linking
64
65 To simplify substitution, we combine only when the earlier insn(s)
66 consist of only a single assignment. To simplify updating afterward,
67 we never combine when a subroutine call appears in the middle.
68
69 Since we do not represent assignments to CC0 explicitly except when that
70 is all an insn does, there is no LOG_LINKS entry in an insn that uses
71 the condition code for the insn that set the condition code.
72 Fortunately, these two insns must be consecutive.
73 Therefore, every JUMP_INSN is taken to have an implicit logical link
74 to the preceding insn. This is not quite right, since non-jumps can
75 also use the condition code; but in practice such insns would not
76 combine anyway. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "params.h"
103 #include "tree-pass.h"
104 #include "valtrack.h"
105 #include "rtl-iter.h"
106 #include "print-rtl.h"
107
108 /* Number of attempts to combine instructions in this function. */
109
110 static int combine_attempts;
111
112 /* Number of attempts that got as far as substitution in this function. */
113
114 static int combine_merges;
115
116 /* Number of instructions combined with added SETs in this function. */
117
118 static int combine_extras;
119
120 /* Number of instructions combined in this function. */
121
122 static int combine_successes;
123
124 /* Totals over entire compilation. */
125
126 static int total_attempts, total_merges, total_extras, total_successes;
127
128 /* combine_instructions may try to replace the right hand side of the
129 second instruction with the value of an associated REG_EQUAL note
130 before throwing it at try_combine. That is problematic when there
131 is a REG_DEAD note for a register used in the old right hand side
132 and can cause distribute_notes to do wrong things. This is the
133 second instruction if it has been so modified, null otherwise. */
134
135 static rtx_insn *i2mod;
136
137 /* When I2MOD is nonnull, this is a copy of the old right hand side. */
138
139 static rtx i2mod_old_rhs;
140
141 /* When I2MOD is nonnull, this is a copy of the new right hand side. */
142
143 static rtx i2mod_new_rhs;
144
145 struct reg_stat_type {
146 /* Record last point of death of (hard or pseudo) register n. */
147 rtx_insn *last_death;
148
149 /* Record last point of modification of (hard or pseudo) register n. */
150 rtx_insn *last_set;
151
152 /* The next group of fields allows the recording of the last value assigned
153 to (hard or pseudo) register n. We use this information to see if an
154 operation being processed is redundant given a prior operation performed
155 on the register. For example, an `and' with a constant is redundant if
156 all the zero bits are already known to be turned off.
157
158 We use an approach similar to that used by cse, but change it in the
159 following ways:
160
161 (1) We do not want to reinitialize at each label.
162 (2) It is useful, but not critical, to know the actual value assigned
163 to a register. Often just its form is helpful.
164
165 Therefore, we maintain the following fields:
166
167 last_set_value the last value assigned
168 last_set_label records the value of label_tick when the
169 register was assigned
170 last_set_table_tick records the value of label_tick when a
171 value using the register is assigned
172 last_set_invalid set to nonzero when it is not valid
173 to use the value of this register in some
174 register's value
175
176 To understand the usage of these tables, it is important to understand
177 the distinction between the value in last_set_value being valid and
178 the register being validly contained in some other expression in the
179 table.
180
181 (The next two parameters are out of date).
182
183 reg_stat[i].last_set_value is valid if it is nonzero, and either
184 reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
185
186 Register I may validly appear in any expression returned for the value
187 of another register if reg_n_sets[i] is 1. It may also appear in the
188 value for register J if reg_stat[j].last_set_invalid is zero, or
189 reg_stat[i].last_set_label < reg_stat[j].last_set_label.
190
191 If an expression is found in the table containing a register which may
192 not validly appear in an expression, the register is replaced by
193 something that won't match, (clobber (const_int 0)). */
194
195 /* Record last value assigned to (hard or pseudo) register n. */
196
197 rtx last_set_value;
198
199 /* Record the value of label_tick when an expression involving register n
200 is placed in last_set_value. */
201
202 int last_set_table_tick;
203
204 /* Record the value of label_tick when the value for register n is placed in
205 last_set_value. */
206
207 int last_set_label;
208
209 /* These fields are maintained in parallel with last_set_value and are
210 used to store the mode in which the register was last set, the bits
211 that were known to be zero when it was last set, and the number of
212 sign bits copies it was known to have when it was last set. */
213
214 unsigned HOST_WIDE_INT last_set_nonzero_bits;
215 char last_set_sign_bit_copies;
216 ENUM_BITFIELD(machine_mode) last_set_mode : 8;
217
218 /* Set nonzero if references to register n in expressions should not be
219 used. last_set_invalid is set nonzero when this register is being
220 assigned to and last_set_table_tick == label_tick. */
221
222 char last_set_invalid;
223
224 /* Some registers that are set more than once and used in more than one
225 basic block are nevertheless always set in similar ways. For example,
226 a QImode register may be loaded from memory in two places on a machine
227 where byte loads zero extend.
228
229 We record in the following fields if a register has some leading bits
230 that are always equal to the sign bit, and what we know about the
231 nonzero bits of a register, specifically which bits are known to be
232 zero.
233
234 If an entry is zero, it means that we don't know anything special. */
235
236 unsigned char sign_bit_copies;
237
238 unsigned HOST_WIDE_INT nonzero_bits;
239
240 /* Record the value of the label_tick when the last truncation
241 happened. The field truncated_to_mode is only valid if
242 truncation_label == label_tick. */
243
244 int truncation_label;
245
246 /* Record the last truncation seen for this register. If truncation
247 is not a nop to this mode we might be able to save an explicit
248 truncation if we know that value already contains a truncated
249 value. */
250
251 ENUM_BITFIELD(machine_mode) truncated_to_mode : 8;
252 };
253
254
255 static vec<reg_stat_type> reg_stat;
256
257 /* One plus the highest pseudo for which we track REG_N_SETS.
258 regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
259 but during combine_split_insns new pseudos can be created. As we don't have
260 updated DF information in that case, it is hard to initialize the array
261 after growing. The combiner only cares about REG_N_SETS (regno) == 1,
262 so instead of growing the arrays, just assume all newly created pseudos
263 during combine might be set multiple times. */
264
265 static unsigned int reg_n_sets_max;
266
267 /* Record the luid of the last insn that invalidated memory
268 (anything that writes memory, and subroutine calls, but not pushes). */
269
270 static int mem_last_set;
271
272 /* Record the luid of the last CALL_INSN
273 so we can tell whether a potential combination crosses any calls. */
274
275 static int last_call_luid;
276
277 /* When `subst' is called, this is the insn that is being modified
278 (by combining in a previous insn). The PATTERN of this insn
279 is still the old pattern partially modified and it should not be
280 looked at, but this may be used to examine the successors of the insn
281 to judge whether a simplification is valid. */
282
283 static rtx_insn *subst_insn;
284
285 /* This is the lowest LUID that `subst' is currently dealing with.
286 get_last_value will not return a value if the register was set at or
287 after this LUID. If not for this mechanism, we could get confused if
288 I2 or I1 in try_combine were an insn that used the old value of a register
289 to obtain a new value. In that case, we might erroneously get the
290 new value of the register when we wanted the old one. */
291
292 static int subst_low_luid;
293
294 /* This contains any hard registers that are used in newpat; reg_dead_at_p
295 must consider all these registers to be always live. */
296
297 static HARD_REG_SET newpat_used_regs;
298
299 /* This is an insn to which a LOG_LINKS entry has been added. If this
300 insn is the earlier than I2 or I3, combine should rescan starting at
301 that location. */
302
303 static rtx_insn *added_links_insn;
304
305 /* And similarly, for notes. */
306
307 static rtx_insn *added_notes_insn;
308
309 /* Basic block in which we are performing combines. */
310 static basic_block this_basic_block;
311 static bool optimize_this_for_speed_p;
312
313
314 /* Length of the currently allocated uid_insn_cost array. */
315
316 static int max_uid_known;
317
318 /* The following array records the insn_cost for every insn
319 in the instruction stream. */
320
321 static int *uid_insn_cost;
322
323 /* The following array records the LOG_LINKS for every insn in the
324 instruction stream as struct insn_link pointers. */
325
326 struct insn_link {
327 rtx_insn *insn;
328 unsigned int regno;
329 struct insn_link *next;
330 };
331
332 static struct insn_link **uid_log_links;
333
334 static inline int
insn_uid_check(const_rtx insn)335 insn_uid_check (const_rtx insn)
336 {
337 int uid = INSN_UID (insn);
338 gcc_checking_assert (uid <= max_uid_known);
339 return uid;
340 }
341
342 #define INSN_COST(INSN) (uid_insn_cost[insn_uid_check (INSN)])
343 #define LOG_LINKS(INSN) (uid_log_links[insn_uid_check (INSN)])
344
345 #define FOR_EACH_LOG_LINK(L, INSN) \
346 for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
347
348 /* Links for LOG_LINKS are allocated from this obstack. */
349
350 static struct obstack insn_link_obstack;
351
352 /* Allocate a link. */
353
354 static inline struct insn_link *
alloc_insn_link(rtx_insn * insn,unsigned int regno,struct insn_link * next)355 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
356 {
357 struct insn_link *l
358 = (struct insn_link *) obstack_alloc (&insn_link_obstack,
359 sizeof (struct insn_link));
360 l->insn = insn;
361 l->regno = regno;
362 l->next = next;
363 return l;
364 }
365
366 /* Incremented for each basic block. */
367
368 static int label_tick;
369
370 /* Reset to label_tick for each extended basic block in scanning order. */
371
372 static int label_tick_ebb_start;
373
374 /* Mode used to compute significance in reg_stat[].nonzero_bits. It is the
375 largest integer mode that can fit in HOST_BITS_PER_WIDE_INT. */
376
377 static scalar_int_mode nonzero_bits_mode;
378
379 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
380 be safely used. It is zero while computing them and after combine has
381 completed. This former test prevents propagating values based on
382 previously set values, which can be incorrect if a variable is modified
383 in a loop. */
384
385 static int nonzero_sign_valid;
386
387
388 /* Record one modification to rtl structure
389 to be undone by storing old_contents into *where. */
390
391 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
392
393 struct undo
394 {
395 struct undo *next;
396 enum undo_kind kind;
397 union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
398 union { rtx *r; int *i; struct insn_link **l; } where;
399 };
400
401 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
402 num_undo says how many are currently recorded.
403
404 other_insn is nonzero if we have modified some other insn in the process
405 of working on subst_insn. It must be verified too. */
406
407 struct undobuf
408 {
409 struct undo *undos;
410 struct undo *frees;
411 rtx_insn *other_insn;
412 };
413
414 static struct undobuf undobuf;
415
416 /* Number of times the pseudo being substituted for
417 was found and replaced. */
418
419 static int n_occurrences;
420
421 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
422 scalar_int_mode,
423 unsigned HOST_WIDE_INT *);
424 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
425 scalar_int_mode,
426 unsigned int *);
427 static void do_SUBST (rtx *, rtx);
428 static void do_SUBST_INT (int *, int);
429 static void init_reg_last (void);
430 static void setup_incoming_promotions (rtx_insn *);
431 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
432 static int cant_combine_insn_p (rtx_insn *);
433 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
434 rtx_insn *, rtx_insn *, rtx *, rtx *);
435 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
436 static int contains_muldiv (rtx);
437 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
438 int *, rtx_insn *);
439 static void undo_all (void);
440 static void undo_commit (void);
441 static rtx *find_split_point (rtx *, rtx_insn *, bool);
442 static rtx subst (rtx, rtx, rtx, int, int, int);
443 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
444 static rtx simplify_if_then_else (rtx);
445 static rtx simplify_set (rtx);
446 static rtx simplify_logical (rtx);
447 static rtx expand_compound_operation (rtx);
448 static const_rtx expand_field_assignment (const_rtx);
449 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
450 rtx, unsigned HOST_WIDE_INT, int, int, int);
451 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
452 unsigned HOST_WIDE_INT *);
453 static rtx canon_reg_for_combine (rtx, rtx);
454 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
455 scalar_int_mode, unsigned HOST_WIDE_INT, int);
456 static rtx force_to_mode (rtx, machine_mode,
457 unsigned HOST_WIDE_INT, int);
458 static rtx if_then_else_cond (rtx, rtx *, rtx *);
459 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
460 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
461 static rtx make_field_assignment (rtx);
462 static rtx apply_distributive_law (rtx);
463 static rtx distribute_and_simplify_rtx (rtx, int);
464 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
465 unsigned HOST_WIDE_INT);
466 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
467 unsigned HOST_WIDE_INT);
468 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
469 HOST_WIDE_INT, machine_mode, int *);
470 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
471 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
472 int);
473 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
474 static rtx gen_lowpart_for_combine (machine_mode, rtx);
475 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
476 rtx, rtx *);
477 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
478 static void update_table_tick (rtx);
479 static void record_value_for_reg (rtx, rtx_insn *, rtx);
480 static void check_promoted_subreg (rtx_insn *, rtx);
481 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
482 static void record_dead_and_set_regs (rtx_insn *);
483 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
484 static rtx get_last_value (const_rtx);
485 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
486 static int reg_dead_at_p (rtx, rtx_insn *);
487 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
488 static int reg_bitfield_target_p (rtx, rtx);
489 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
490 static void distribute_links (struct insn_link *);
491 static void mark_used_regs_combine (rtx);
492 static void record_promoted_value (rtx_insn *, rtx);
493 static bool unmentioned_reg_p (rtx, rtx);
494 static void record_truncated_values (rtx *, void *);
495 static bool reg_truncated_to_mode (machine_mode, const_rtx);
496 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
497
498
499 /* It is not safe to use ordinary gen_lowpart in combine.
500 See comments in gen_lowpart_for_combine. */
501 #undef RTL_HOOKS_GEN_LOWPART
502 #define RTL_HOOKS_GEN_LOWPART gen_lowpart_for_combine
503
504 /* Our implementation of gen_lowpart never emits a new pseudo. */
505 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
506 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT gen_lowpart_for_combine
507
508 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
509 #define RTL_HOOKS_REG_NONZERO_REG_BITS reg_nonzero_bits_for_combine
510
511 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
512 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES reg_num_sign_bit_copies_for_combine
513
514 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
515 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE reg_truncated_to_mode
516
517 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
518
519
520 /* Convenience wrapper for the canonicalize_comparison target hook.
521 Target hooks cannot use enum rtx_code. */
522 static inline void
target_canonicalize_comparison(enum rtx_code * code,rtx * op0,rtx * op1,bool op0_preserve_value)523 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
524 bool op0_preserve_value)
525 {
526 int code_int = (int)*code;
527 targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
528 *code = (enum rtx_code)code_int;
529 }
530
531 /* Try to split PATTERN found in INSN. This returns NULL_RTX if
532 PATTERN can not be split. Otherwise, it returns an insn sequence.
533 This is a wrapper around split_insns which ensures that the
534 reg_stat vector is made larger if the splitter creates a new
535 register. */
536
537 static rtx_insn *
combine_split_insns(rtx pattern,rtx_insn * insn)538 combine_split_insns (rtx pattern, rtx_insn *insn)
539 {
540 rtx_insn *ret;
541 unsigned int nregs;
542
543 ret = split_insns (pattern, insn);
544 nregs = max_reg_num ();
545 if (nregs > reg_stat.length ())
546 reg_stat.safe_grow_cleared (nregs);
547 return ret;
548 }
549
550 /* This is used by find_single_use to locate an rtx in LOC that
551 contains exactly one use of DEST, which is typically either a REG
552 or CC0. It returns a pointer to the innermost rtx expression
553 containing DEST. Appearances of DEST that are being used to
554 totally replace it are not counted. */
555
556 static rtx *
find_single_use_1(rtx dest,rtx * loc)557 find_single_use_1 (rtx dest, rtx *loc)
558 {
559 rtx x = *loc;
560 enum rtx_code code = GET_CODE (x);
561 rtx *result = NULL;
562 rtx *this_result;
563 int i;
564 const char *fmt;
565
566 switch (code)
567 {
568 case CONST:
569 case LABEL_REF:
570 case SYMBOL_REF:
571 CASE_CONST_ANY:
572 case CLOBBER:
573 return 0;
574
575 case SET:
576 /* If the destination is anything other than CC0, PC, a REG or a SUBREG
577 of a REG that occupies all of the REG, the insn uses DEST if
578 it is mentioned in the destination or the source. Otherwise, we
579 need just check the source. */
580 if (GET_CODE (SET_DEST (x)) != CC0
581 && GET_CODE (SET_DEST (x)) != PC
582 && !REG_P (SET_DEST (x))
583 && ! (GET_CODE (SET_DEST (x)) == SUBREG
584 && REG_P (SUBREG_REG (SET_DEST (x)))
585 && !read_modify_subreg_p (SET_DEST (x))))
586 break;
587
588 return find_single_use_1 (dest, &SET_SRC (x));
589
590 case MEM:
591 case SUBREG:
592 return find_single_use_1 (dest, &XEXP (x, 0));
593
594 default:
595 break;
596 }
597
598 /* If it wasn't one of the common cases above, check each expression and
599 vector of this code. Look for a unique usage of DEST. */
600
601 fmt = GET_RTX_FORMAT (code);
602 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
603 {
604 if (fmt[i] == 'e')
605 {
606 if (dest == XEXP (x, i)
607 || (REG_P (dest) && REG_P (XEXP (x, i))
608 && REGNO (dest) == REGNO (XEXP (x, i))))
609 this_result = loc;
610 else
611 this_result = find_single_use_1 (dest, &XEXP (x, i));
612
613 if (result == NULL)
614 result = this_result;
615 else if (this_result)
616 /* Duplicate usage. */
617 return NULL;
618 }
619 else if (fmt[i] == 'E')
620 {
621 int j;
622
623 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
624 {
625 if (XVECEXP (x, i, j) == dest
626 || (REG_P (dest)
627 && REG_P (XVECEXP (x, i, j))
628 && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
629 this_result = loc;
630 else
631 this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
632
633 if (result == NULL)
634 result = this_result;
635 else if (this_result)
636 return NULL;
637 }
638 }
639 }
640
641 return result;
642 }
643
644
645 /* See if DEST, produced in INSN, is used only a single time in the
646 sequel. If so, return a pointer to the innermost rtx expression in which
647 it is used.
648
649 If PLOC is nonzero, *PLOC is set to the insn containing the single use.
650
651 If DEST is cc0_rtx, we look only at the next insn. In that case, we don't
652 care about REG_DEAD notes or LOG_LINKS.
653
654 Otherwise, we find the single use by finding an insn that has a
655 LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST. If DEST is
656 only referenced once in that insn, we know that it must be the first
657 and last insn referencing DEST. */
658
659 static rtx *
find_single_use(rtx dest,rtx_insn * insn,rtx_insn ** ploc)660 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
661 {
662 basic_block bb;
663 rtx_insn *next;
664 rtx *result;
665 struct insn_link *link;
666
667 if (dest == cc0_rtx)
668 {
669 next = NEXT_INSN (insn);
670 if (next == 0
671 || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
672 return 0;
673
674 result = find_single_use_1 (dest, &PATTERN (next));
675 if (result && ploc)
676 *ploc = next;
677 return result;
678 }
679
680 if (!REG_P (dest))
681 return 0;
682
683 bb = BLOCK_FOR_INSN (insn);
684 for (next = NEXT_INSN (insn);
685 next && BLOCK_FOR_INSN (next) == bb;
686 next = NEXT_INSN (next))
687 if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
688 {
689 FOR_EACH_LOG_LINK (link, next)
690 if (link->insn == insn && link->regno == REGNO (dest))
691 break;
692
693 if (link)
694 {
695 result = find_single_use_1 (dest, &PATTERN (next));
696 if (ploc)
697 *ploc = next;
698 return result;
699 }
700 }
701
702 return 0;
703 }
704
705 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
706 insn. The substitution can be undone by undo_all. If INTO is already
707 set to NEWVAL, do not record this change. Because computing NEWVAL might
708 also call SUBST, we have to compute it before we put anything into
709 the undo table. */
710
711 static void
do_SUBST(rtx * into,rtx newval)712 do_SUBST (rtx *into, rtx newval)
713 {
714 struct undo *buf;
715 rtx oldval = *into;
716
717 if (oldval == newval)
718 return;
719
720 /* We'd like to catch as many invalid transformations here as
721 possible. Unfortunately, there are way too many mode changes
722 that are perfectly valid, so we'd waste too much effort for
723 little gain doing the checks here. Focus on catching invalid
724 transformations involving integer constants. */
725 if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
726 && CONST_INT_P (newval))
727 {
728 /* Sanity check that we're replacing oldval with a CONST_INT
729 that is a valid sign-extension for the original mode. */
730 gcc_assert (INTVAL (newval)
731 == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
732
733 /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
734 CONST_INT is not valid, because after the replacement, the
735 original mode would be gone. Unfortunately, we can't tell
736 when do_SUBST is called to replace the operand thereof, so we
737 perform this test on oldval instead, checking whether an
738 invalid replacement took place before we got here. */
739 gcc_assert (!(GET_CODE (oldval) == SUBREG
740 && CONST_INT_P (SUBREG_REG (oldval))));
741 gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
742 && CONST_INT_P (XEXP (oldval, 0))));
743 }
744
745 if (undobuf.frees)
746 buf = undobuf.frees, undobuf.frees = buf->next;
747 else
748 buf = XNEW (struct undo);
749
750 buf->kind = UNDO_RTX;
751 buf->where.r = into;
752 buf->old_contents.r = oldval;
753 *into = newval;
754
755 buf->next = undobuf.undos, undobuf.undos = buf;
756 }
757
758 #define SUBST(INTO, NEWVAL) do_SUBST (&(INTO), (NEWVAL))
759
760 /* Similar to SUBST, but NEWVAL is an int expression. Note that substitution
761 for the value of a HOST_WIDE_INT value (including CONST_INT) is
762 not safe. */
763
764 static void
do_SUBST_INT(int * into,int newval)765 do_SUBST_INT (int *into, int newval)
766 {
767 struct undo *buf;
768 int oldval = *into;
769
770 if (oldval == newval)
771 return;
772
773 if (undobuf.frees)
774 buf = undobuf.frees, undobuf.frees = buf->next;
775 else
776 buf = XNEW (struct undo);
777
778 buf->kind = UNDO_INT;
779 buf->where.i = into;
780 buf->old_contents.i = oldval;
781 *into = newval;
782
783 buf->next = undobuf.undos, undobuf.undos = buf;
784 }
785
786 #define SUBST_INT(INTO, NEWVAL) do_SUBST_INT (&(INTO), (NEWVAL))
787
788 /* Similar to SUBST, but just substitute the mode. This is used when
789 changing the mode of a pseudo-register, so that any other
790 references to the entry in the regno_reg_rtx array will change as
791 well. */
792
793 static void
do_SUBST_MODE(rtx * into,machine_mode newval)794 do_SUBST_MODE (rtx *into, machine_mode newval)
795 {
796 struct undo *buf;
797 machine_mode oldval = GET_MODE (*into);
798
799 if (oldval == newval)
800 return;
801
802 if (undobuf.frees)
803 buf = undobuf.frees, undobuf.frees = buf->next;
804 else
805 buf = XNEW (struct undo);
806
807 buf->kind = UNDO_MODE;
808 buf->where.r = into;
809 buf->old_contents.m = oldval;
810 adjust_reg_mode (*into, newval);
811
812 buf->next = undobuf.undos, undobuf.undos = buf;
813 }
814
815 #define SUBST_MODE(INTO, NEWVAL) do_SUBST_MODE (&(INTO), (NEWVAL))
816
817 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression. */
818
819 static void
do_SUBST_LINK(struct insn_link ** into,struct insn_link * newval)820 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
821 {
822 struct undo *buf;
823 struct insn_link * oldval = *into;
824
825 if (oldval == newval)
826 return;
827
828 if (undobuf.frees)
829 buf = undobuf.frees, undobuf.frees = buf->next;
830 else
831 buf = XNEW (struct undo);
832
833 buf->kind = UNDO_LINKS;
834 buf->where.l = into;
835 buf->old_contents.l = oldval;
836 *into = newval;
837
838 buf->next = undobuf.undos, undobuf.undos = buf;
839 }
840
841 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
842
843 /* Subroutine of try_combine. Determine whether the replacement patterns
844 NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
845 than the original sequence I0, I1, I2, I3 and undobuf.other_insn. Note
846 that I0, I1 and/or NEWI2PAT may be NULL_RTX. Similarly, NEWOTHERPAT and
847 undobuf.other_insn may also both be NULL_RTX. Return false if the cost
848 of all the instructions can be estimated and the replacements are more
849 expensive than the original sequence. */
850
851 static bool
combine_validate_cost(rtx_insn * i0,rtx_insn * i1,rtx_insn * i2,rtx_insn * i3,rtx newpat,rtx newi2pat,rtx newotherpat)852 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
853 rtx newpat, rtx newi2pat, rtx newotherpat)
854 {
855 int i0_cost, i1_cost, i2_cost, i3_cost;
856 int new_i2_cost, new_i3_cost;
857 int old_cost, new_cost;
858
859 /* Lookup the original insn_costs. */
860 i2_cost = INSN_COST (i2);
861 i3_cost = INSN_COST (i3);
862
863 if (i1)
864 {
865 i1_cost = INSN_COST (i1);
866 if (i0)
867 {
868 i0_cost = INSN_COST (i0);
869 old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
870 ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
871 }
872 else
873 {
874 old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
875 ? i1_cost + i2_cost + i3_cost : 0);
876 i0_cost = 0;
877 }
878 }
879 else
880 {
881 old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
882 i1_cost = i0_cost = 0;
883 }
884
885 /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
886 correct that. */
887 if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
888 old_cost -= i1_cost;
889
890
891 /* Calculate the replacement insn_costs. */
892 rtx tmp = PATTERN (i3);
893 PATTERN (i3) = newpat;
894 int tmpi = INSN_CODE (i3);
895 INSN_CODE (i3) = -1;
896 new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
897 PATTERN (i3) = tmp;
898 INSN_CODE (i3) = tmpi;
899 if (newi2pat)
900 {
901 tmp = PATTERN (i2);
902 PATTERN (i2) = newi2pat;
903 tmpi = INSN_CODE (i2);
904 INSN_CODE (i2) = -1;
905 new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
906 PATTERN (i2) = tmp;
907 INSN_CODE (i2) = tmpi;
908 new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
909 ? new_i2_cost + new_i3_cost : 0;
910 }
911 else
912 {
913 new_cost = new_i3_cost;
914 new_i2_cost = 0;
915 }
916
917 if (undobuf.other_insn)
918 {
919 int old_other_cost, new_other_cost;
920
921 old_other_cost = INSN_COST (undobuf.other_insn);
922 tmp = PATTERN (undobuf.other_insn);
923 PATTERN (undobuf.other_insn) = newotherpat;
924 tmpi = INSN_CODE (undobuf.other_insn);
925 INSN_CODE (undobuf.other_insn) = -1;
926 new_other_cost = insn_cost (undobuf.other_insn,
927 optimize_this_for_speed_p);
928 PATTERN (undobuf.other_insn) = tmp;
929 INSN_CODE (undobuf.other_insn) = tmpi;
930 if (old_other_cost > 0 && new_other_cost > 0)
931 {
932 old_cost += old_other_cost;
933 new_cost += new_other_cost;
934 }
935 else
936 old_cost = 0;
937 }
938
939 /* Disallow this combination if both new_cost and old_cost are greater than
940 zero, and new_cost is greater than old cost. */
941 int reject = old_cost > 0 && new_cost > old_cost;
942
943 if (dump_file)
944 {
945 fprintf (dump_file, "%s combination of insns ",
946 reject ? "rejecting" : "allowing");
947 if (i0)
948 fprintf (dump_file, "%d, ", INSN_UID (i0));
949 if (i1 && INSN_UID (i1) != INSN_UID (i2))
950 fprintf (dump_file, "%d, ", INSN_UID (i1));
951 fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
952
953 fprintf (dump_file, "original costs ");
954 if (i0)
955 fprintf (dump_file, "%d + ", i0_cost);
956 if (i1 && INSN_UID (i1) != INSN_UID (i2))
957 fprintf (dump_file, "%d + ", i1_cost);
958 fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
959
960 if (newi2pat)
961 fprintf (dump_file, "replacement costs %d + %d = %d\n",
962 new_i2_cost, new_i3_cost, new_cost);
963 else
964 fprintf (dump_file, "replacement cost %d\n", new_cost);
965 }
966
967 if (reject)
968 return false;
969
970 /* Update the uid_insn_cost array with the replacement costs. */
971 INSN_COST (i2) = new_i2_cost;
972 INSN_COST (i3) = new_i3_cost;
973 if (i1)
974 {
975 INSN_COST (i1) = 0;
976 if (i0)
977 INSN_COST (i0) = 0;
978 }
979
980 return true;
981 }
982
983
984 /* Delete any insns that copy a register to itself.
985 Return true if the CFG was changed. */
986
987 static bool
delete_noop_moves(void)988 delete_noop_moves (void)
989 {
990 rtx_insn *insn, *next;
991 basic_block bb;
992
993 bool edges_deleted = false;
994
995 FOR_EACH_BB_FN (bb, cfun)
996 {
997 for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
998 {
999 next = NEXT_INSN (insn);
1000 if (INSN_P (insn) && noop_move_p (insn))
1001 {
1002 if (dump_file)
1003 fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1004
1005 edges_deleted |= delete_insn_and_edges (insn);
1006 }
1007 }
1008 }
1009
1010 return edges_deleted;
1011 }
1012
1013
1014 /* Return false if we do not want to (or cannot) combine DEF. */
1015 static bool
can_combine_def_p(df_ref def)1016 can_combine_def_p (df_ref def)
1017 {
1018 /* Do not consider if it is pre/post modification in MEM. */
1019 if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1020 return false;
1021
1022 unsigned int regno = DF_REF_REGNO (def);
1023
1024 /* Do not combine frame pointer adjustments. */
1025 if ((regno == FRAME_POINTER_REGNUM
1026 && (!reload_completed || frame_pointer_needed))
1027 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1028 && regno == HARD_FRAME_POINTER_REGNUM
1029 && (!reload_completed || frame_pointer_needed))
1030 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1031 && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1032 return false;
1033
1034 return true;
1035 }
1036
1037 /* Return false if we do not want to (or cannot) combine USE. */
1038 static bool
can_combine_use_p(df_ref use)1039 can_combine_use_p (df_ref use)
1040 {
1041 /* Do not consider the usage of the stack pointer by function call. */
1042 if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1043 return false;
1044
1045 return true;
1046 }
1047
1048 /* Fill in log links field for all insns. */
1049
1050 static void
create_log_links(void)1051 create_log_links (void)
1052 {
1053 basic_block bb;
1054 rtx_insn **next_use;
1055 rtx_insn *insn;
1056 df_ref def, use;
1057
1058 next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1059
1060 /* Pass through each block from the end, recording the uses of each
1061 register and establishing log links when def is encountered.
1062 Note that we do not clear next_use array in order to save time,
1063 so we have to test whether the use is in the same basic block as def.
1064
1065 There are a few cases below when we do not consider the definition or
1066 usage -- these are taken from original flow.c did. Don't ask me why it is
1067 done this way; I don't know and if it works, I don't want to know. */
1068
1069 FOR_EACH_BB_FN (bb, cfun)
1070 {
1071 FOR_BB_INSNS_REVERSE (bb, insn)
1072 {
1073 if (!NONDEBUG_INSN_P (insn))
1074 continue;
1075
1076 /* Log links are created only once. */
1077 gcc_assert (!LOG_LINKS (insn));
1078
1079 FOR_EACH_INSN_DEF (def, insn)
1080 {
1081 unsigned int regno = DF_REF_REGNO (def);
1082 rtx_insn *use_insn;
1083
1084 if (!next_use[regno])
1085 continue;
1086
1087 if (!can_combine_def_p (def))
1088 continue;
1089
1090 use_insn = next_use[regno];
1091 next_use[regno] = NULL;
1092
1093 if (BLOCK_FOR_INSN (use_insn) != bb)
1094 continue;
1095
1096 /* flow.c claimed:
1097
1098 We don't build a LOG_LINK for hard registers contained
1099 in ASM_OPERANDs. If these registers get replaced,
1100 we might wind up changing the semantics of the insn,
1101 even if reload can make what appear to be valid
1102 assignments later. */
1103 if (regno < FIRST_PSEUDO_REGISTER
1104 && asm_noperands (PATTERN (use_insn)) >= 0)
1105 continue;
1106
1107 /* Don't add duplicate links between instructions. */
1108 struct insn_link *links;
1109 FOR_EACH_LOG_LINK (links, use_insn)
1110 if (insn == links->insn && regno == links->regno)
1111 break;
1112
1113 if (!links)
1114 LOG_LINKS (use_insn)
1115 = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1116 }
1117
1118 FOR_EACH_INSN_USE (use, insn)
1119 if (can_combine_use_p (use))
1120 next_use[DF_REF_REGNO (use)] = insn;
1121 }
1122 }
1123
1124 free (next_use);
1125 }
1126
1127 /* Walk the LOG_LINKS of insn B to see if we find a reference to A. Return
1128 true if we found a LOG_LINK that proves that A feeds B. This only works
1129 if there are no instructions between A and B which could have a link
1130 depending on A, since in that case we would not record a link for B.
1131 We also check the implicit dependency created by a cc0 setter/user
1132 pair. */
1133
1134 static bool
insn_a_feeds_b(rtx_insn * a,rtx_insn * b)1135 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1136 {
1137 struct insn_link *links;
1138 FOR_EACH_LOG_LINK (links, b)
1139 if (links->insn == a)
1140 return true;
1141 if (HAVE_cc0 && sets_cc0_p (a))
1142 return true;
1143 return false;
1144 }
1145
1146 /* Main entry point for combiner. F is the first insn of the function.
1147 NREGS is the first unused pseudo-reg number.
1148
1149 Return nonzero if the CFG was changed (e.g. if the combiner has
1150 turned an indirect jump instruction into a direct jump). */
1151 static int
combine_instructions(rtx_insn * f,unsigned int nregs)1152 combine_instructions (rtx_insn *f, unsigned int nregs)
1153 {
1154 rtx_insn *insn, *next;
1155 rtx_insn *prev;
1156 struct insn_link *links, *nextlinks;
1157 rtx_insn *first;
1158 basic_block last_bb;
1159
1160 int new_direct_jump_p = 0;
1161
1162 for (first = f; first && !NONDEBUG_INSN_P (first); )
1163 first = NEXT_INSN (first);
1164 if (!first)
1165 return 0;
1166
1167 combine_attempts = 0;
1168 combine_merges = 0;
1169 combine_extras = 0;
1170 combine_successes = 0;
1171
1172 rtl_hooks = combine_rtl_hooks;
1173
1174 reg_stat.safe_grow_cleared (nregs);
1175
1176 init_recog_no_volatile ();
1177
1178 /* Allocate array for insn info. */
1179 max_uid_known = get_max_uid ();
1180 uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1181 uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1182 gcc_obstack_init (&insn_link_obstack);
1183
1184 nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1185
1186 /* Don't use reg_stat[].nonzero_bits when computing it. This can cause
1187 problems when, for example, we have j <<= 1 in a loop. */
1188
1189 nonzero_sign_valid = 0;
1190 label_tick = label_tick_ebb_start = 1;
1191
1192 /* Scan all SETs and see if we can deduce anything about what
1193 bits are known to be zero for some registers and how many copies
1194 of the sign bit are known to exist for those registers.
1195
1196 Also set any known values so that we can use it while searching
1197 for what bits are known to be set. */
1198
1199 setup_incoming_promotions (first);
1200 /* Allow the entry block and the first block to fall into the same EBB.
1201 Conceptually the incoming promotions are assigned to the entry block. */
1202 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1203
1204 create_log_links ();
1205 FOR_EACH_BB_FN (this_basic_block, cfun)
1206 {
1207 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1208 last_call_luid = 0;
1209 mem_last_set = -1;
1210
1211 label_tick++;
1212 if (!single_pred_p (this_basic_block)
1213 || single_pred (this_basic_block) != last_bb)
1214 label_tick_ebb_start = label_tick;
1215 last_bb = this_basic_block;
1216
1217 FOR_BB_INSNS (this_basic_block, insn)
1218 if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1219 {
1220 rtx links;
1221
1222 subst_low_luid = DF_INSN_LUID (insn);
1223 subst_insn = insn;
1224
1225 note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1226 insn);
1227 record_dead_and_set_regs (insn);
1228
1229 if (AUTO_INC_DEC)
1230 for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1231 if (REG_NOTE_KIND (links) == REG_INC)
1232 set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1233 insn);
1234
1235 /* Record the current insn_cost of this instruction. */
1236 if (NONJUMP_INSN_P (insn))
1237 INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1238 if (dump_file)
1239 {
1240 fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1241 dump_insn_slim (dump_file, insn);
1242 }
1243 }
1244 }
1245
1246 nonzero_sign_valid = 1;
1247
1248 /* Now scan all the insns in forward order. */
1249 label_tick = label_tick_ebb_start = 1;
1250 init_reg_last ();
1251 setup_incoming_promotions (first);
1252 last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1253 int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1254
1255 FOR_EACH_BB_FN (this_basic_block, cfun)
1256 {
1257 rtx_insn *last_combined_insn = NULL;
1258
1259 /* Ignore instruction combination in basic blocks that are going to
1260 be removed as unreachable anyway. See PR82386. */
1261 if (EDGE_COUNT (this_basic_block->preds) == 0)
1262 continue;
1263
1264 optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1265 last_call_luid = 0;
1266 mem_last_set = -1;
1267
1268 label_tick++;
1269 if (!single_pred_p (this_basic_block)
1270 || single_pred (this_basic_block) != last_bb)
1271 label_tick_ebb_start = label_tick;
1272 last_bb = this_basic_block;
1273
1274 rtl_profile_for_bb (this_basic_block);
1275 for (insn = BB_HEAD (this_basic_block);
1276 insn != NEXT_INSN (BB_END (this_basic_block));
1277 insn = next ? next : NEXT_INSN (insn))
1278 {
1279 next = 0;
1280 if (!NONDEBUG_INSN_P (insn))
1281 continue;
1282
1283 while (last_combined_insn
1284 && (!NONDEBUG_INSN_P (last_combined_insn)
1285 || last_combined_insn->deleted ()))
1286 last_combined_insn = PREV_INSN (last_combined_insn);
1287 if (last_combined_insn == NULL_RTX
1288 || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1289 || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1290 last_combined_insn = insn;
1291
1292 /* See if we know about function return values before this
1293 insn based upon SUBREG flags. */
1294 check_promoted_subreg (insn, PATTERN (insn));
1295
1296 /* See if we can find hardregs and subreg of pseudos in
1297 narrower modes. This could help turning TRUNCATEs
1298 into SUBREGs. */
1299 note_uses (&PATTERN (insn), record_truncated_values, NULL);
1300
1301 /* Try this insn with each insn it links back to. */
1302
1303 FOR_EACH_LOG_LINK (links, insn)
1304 if ((next = try_combine (insn, links->insn, NULL,
1305 NULL, &new_direct_jump_p,
1306 last_combined_insn)) != 0)
1307 {
1308 statistics_counter_event (cfun, "two-insn combine", 1);
1309 goto retry;
1310 }
1311
1312 /* Try each sequence of three linked insns ending with this one. */
1313
1314 if (max_combine >= 3)
1315 FOR_EACH_LOG_LINK (links, insn)
1316 {
1317 rtx_insn *link = links->insn;
1318
1319 /* If the linked insn has been replaced by a note, then there
1320 is no point in pursuing this chain any further. */
1321 if (NOTE_P (link))
1322 continue;
1323
1324 FOR_EACH_LOG_LINK (nextlinks, link)
1325 if ((next = try_combine (insn, link, nextlinks->insn,
1326 NULL, &new_direct_jump_p,
1327 last_combined_insn)) != 0)
1328 {
1329 statistics_counter_event (cfun, "three-insn combine", 1);
1330 goto retry;
1331 }
1332 }
1333
1334 /* Try to combine a jump insn that uses CC0
1335 with a preceding insn that sets CC0, and maybe with its
1336 logical predecessor as well.
1337 This is how we make decrement-and-branch insns.
1338 We need this special code because data flow connections
1339 via CC0 do not get entered in LOG_LINKS. */
1340
1341 if (HAVE_cc0
1342 && JUMP_P (insn)
1343 && (prev = prev_nonnote_insn (insn)) != 0
1344 && NONJUMP_INSN_P (prev)
1345 && sets_cc0_p (PATTERN (prev)))
1346 {
1347 if ((next = try_combine (insn, prev, NULL, NULL,
1348 &new_direct_jump_p,
1349 last_combined_insn)) != 0)
1350 goto retry;
1351
1352 FOR_EACH_LOG_LINK (nextlinks, prev)
1353 if ((next = try_combine (insn, prev, nextlinks->insn,
1354 NULL, &new_direct_jump_p,
1355 last_combined_insn)) != 0)
1356 goto retry;
1357 }
1358
1359 /* Do the same for an insn that explicitly references CC0. */
1360 if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1361 && (prev = prev_nonnote_insn (insn)) != 0
1362 && NONJUMP_INSN_P (prev)
1363 && sets_cc0_p (PATTERN (prev))
1364 && GET_CODE (PATTERN (insn)) == SET
1365 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1366 {
1367 if ((next = try_combine (insn, prev, NULL, NULL,
1368 &new_direct_jump_p,
1369 last_combined_insn)) != 0)
1370 goto retry;
1371
1372 FOR_EACH_LOG_LINK (nextlinks, prev)
1373 if ((next = try_combine (insn, prev, nextlinks->insn,
1374 NULL, &new_direct_jump_p,
1375 last_combined_insn)) != 0)
1376 goto retry;
1377 }
1378
1379 /* Finally, see if any of the insns that this insn links to
1380 explicitly references CC0. If so, try this insn, that insn,
1381 and its predecessor if it sets CC0. */
1382 if (HAVE_cc0)
1383 {
1384 FOR_EACH_LOG_LINK (links, insn)
1385 if (NONJUMP_INSN_P (links->insn)
1386 && GET_CODE (PATTERN (links->insn)) == SET
1387 && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1388 && (prev = prev_nonnote_insn (links->insn)) != 0
1389 && NONJUMP_INSN_P (prev)
1390 && sets_cc0_p (PATTERN (prev))
1391 && (next = try_combine (insn, links->insn,
1392 prev, NULL, &new_direct_jump_p,
1393 last_combined_insn)) != 0)
1394 goto retry;
1395 }
1396
1397 /* Try combining an insn with two different insns whose results it
1398 uses. */
1399 if (max_combine >= 3)
1400 FOR_EACH_LOG_LINK (links, insn)
1401 for (nextlinks = links->next; nextlinks;
1402 nextlinks = nextlinks->next)
1403 if ((next = try_combine (insn, links->insn,
1404 nextlinks->insn, NULL,
1405 &new_direct_jump_p,
1406 last_combined_insn)) != 0)
1407
1408 {
1409 statistics_counter_event (cfun, "three-insn combine", 1);
1410 goto retry;
1411 }
1412
1413 /* Try four-instruction combinations. */
1414 if (max_combine >= 4)
1415 FOR_EACH_LOG_LINK (links, insn)
1416 {
1417 struct insn_link *next1;
1418 rtx_insn *link = links->insn;
1419
1420 /* If the linked insn has been replaced by a note, then there
1421 is no point in pursuing this chain any further. */
1422 if (NOTE_P (link))
1423 continue;
1424
1425 FOR_EACH_LOG_LINK (next1, link)
1426 {
1427 rtx_insn *link1 = next1->insn;
1428 if (NOTE_P (link1))
1429 continue;
1430 /* I0 -> I1 -> I2 -> I3. */
1431 FOR_EACH_LOG_LINK (nextlinks, link1)
1432 if ((next = try_combine (insn, link, link1,
1433 nextlinks->insn,
1434 &new_direct_jump_p,
1435 last_combined_insn)) != 0)
1436 {
1437 statistics_counter_event (cfun, "four-insn combine", 1);
1438 goto retry;
1439 }
1440 /* I0, I1 -> I2, I2 -> I3. */
1441 for (nextlinks = next1->next; nextlinks;
1442 nextlinks = nextlinks->next)
1443 if ((next = try_combine (insn, link, link1,
1444 nextlinks->insn,
1445 &new_direct_jump_p,
1446 last_combined_insn)) != 0)
1447 {
1448 statistics_counter_event (cfun, "four-insn combine", 1);
1449 goto retry;
1450 }
1451 }
1452
1453 for (next1 = links->next; next1; next1 = next1->next)
1454 {
1455 rtx_insn *link1 = next1->insn;
1456 if (NOTE_P (link1))
1457 continue;
1458 /* I0 -> I2; I1, I2 -> I3. */
1459 FOR_EACH_LOG_LINK (nextlinks, link)
1460 if ((next = try_combine (insn, link, link1,
1461 nextlinks->insn,
1462 &new_direct_jump_p,
1463 last_combined_insn)) != 0)
1464 {
1465 statistics_counter_event (cfun, "four-insn combine", 1);
1466 goto retry;
1467 }
1468 /* I0 -> I1; I1, I2 -> I3. */
1469 FOR_EACH_LOG_LINK (nextlinks, link1)
1470 if ((next = try_combine (insn, link, link1,
1471 nextlinks->insn,
1472 &new_direct_jump_p,
1473 last_combined_insn)) != 0)
1474 {
1475 statistics_counter_event (cfun, "four-insn combine", 1);
1476 goto retry;
1477 }
1478 }
1479 }
1480
1481 /* Try this insn with each REG_EQUAL note it links back to. */
1482 FOR_EACH_LOG_LINK (links, insn)
1483 {
1484 rtx set, note;
1485 rtx_insn *temp = links->insn;
1486 if ((set = single_set (temp)) != 0
1487 && (note = find_reg_equal_equiv_note (temp)) != 0
1488 && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1489 /* Avoid using a register that may already been marked
1490 dead by an earlier instruction. */
1491 && ! unmentioned_reg_p (note, SET_SRC (set))
1492 && (GET_MODE (note) == VOIDmode
1493 ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1494 : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1495 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1496 || (GET_MODE (XEXP (SET_DEST (set), 0))
1497 == GET_MODE (note))))))
1498 {
1499 /* Temporarily replace the set's source with the
1500 contents of the REG_EQUAL note. The insn will
1501 be deleted or recognized by try_combine. */
1502 rtx orig_src = SET_SRC (set);
1503 rtx orig_dest = SET_DEST (set);
1504 if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1505 SET_DEST (set) = XEXP (SET_DEST (set), 0);
1506 SET_SRC (set) = note;
1507 i2mod = temp;
1508 i2mod_old_rhs = copy_rtx (orig_src);
1509 i2mod_new_rhs = copy_rtx (note);
1510 next = try_combine (insn, i2mod, NULL, NULL,
1511 &new_direct_jump_p,
1512 last_combined_insn);
1513 i2mod = NULL;
1514 if (next)
1515 {
1516 statistics_counter_event (cfun, "insn-with-note combine", 1);
1517 goto retry;
1518 }
1519 SET_SRC (set) = orig_src;
1520 SET_DEST (set) = orig_dest;
1521 }
1522 }
1523
1524 if (!NOTE_P (insn))
1525 record_dead_and_set_regs (insn);
1526
1527 retry:
1528 ;
1529 }
1530 }
1531
1532 default_rtl_profile ();
1533 clear_bb_flags ();
1534 new_direct_jump_p |= purge_all_dead_edges ();
1535 new_direct_jump_p |= delete_noop_moves ();
1536
1537 /* Clean up. */
1538 obstack_free (&insn_link_obstack, NULL);
1539 free (uid_log_links);
1540 free (uid_insn_cost);
1541 reg_stat.release ();
1542
1543 {
1544 struct undo *undo, *next;
1545 for (undo = undobuf.frees; undo; undo = next)
1546 {
1547 next = undo->next;
1548 free (undo);
1549 }
1550 undobuf.frees = 0;
1551 }
1552
1553 total_attempts += combine_attempts;
1554 total_merges += combine_merges;
1555 total_extras += combine_extras;
1556 total_successes += combine_successes;
1557
1558 nonzero_sign_valid = 0;
1559 rtl_hooks = general_rtl_hooks;
1560
1561 /* Make recognizer allow volatile MEMs again. */
1562 init_recog ();
1563
1564 return new_direct_jump_p;
1565 }
1566
1567 /* Wipe the last_xxx fields of reg_stat in preparation for another pass. */
1568
1569 static void
init_reg_last(void)1570 init_reg_last (void)
1571 {
1572 unsigned int i;
1573 reg_stat_type *p;
1574
1575 FOR_EACH_VEC_ELT (reg_stat, i, p)
1576 memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1577 }
1578
1579 /* Set up any promoted values for incoming argument registers. */
1580
1581 static void
setup_incoming_promotions(rtx_insn * first)1582 setup_incoming_promotions (rtx_insn *first)
1583 {
1584 tree arg;
1585 bool strictly_local = false;
1586
1587 for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1588 arg = DECL_CHAIN (arg))
1589 {
1590 rtx x, reg = DECL_INCOMING_RTL (arg);
1591 int uns1, uns3;
1592 machine_mode mode1, mode2, mode3, mode4;
1593
1594 /* Only continue if the incoming argument is in a register. */
1595 if (!REG_P (reg))
1596 continue;
1597
1598 /* Determine, if possible, whether all call sites of the current
1599 function lie within the current compilation unit. (This does
1600 take into account the exporting of a function via taking its
1601 address, and so forth.) */
1602 strictly_local = cgraph_node::local_info (current_function_decl)->local;
1603
1604 /* The mode and signedness of the argument before any promotions happen
1605 (equal to the mode of the pseudo holding it at that stage). */
1606 mode1 = TYPE_MODE (TREE_TYPE (arg));
1607 uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1608
1609 /* The mode and signedness of the argument after any source language and
1610 TARGET_PROMOTE_PROTOTYPES-driven promotions. */
1611 mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1612 uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1613
1614 /* The mode and signedness of the argument as it is actually passed,
1615 see assign_parm_setup_reg in function.c. */
1616 mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1617 TREE_TYPE (cfun->decl), 0);
1618
1619 /* The mode of the register in which the argument is being passed. */
1620 mode4 = GET_MODE (reg);
1621
1622 /* Eliminate sign extensions in the callee when:
1623 (a) A mode promotion has occurred; */
1624 if (mode1 == mode3)
1625 continue;
1626 /* (b) The mode of the register is the same as the mode of
1627 the argument as it is passed; */
1628 if (mode3 != mode4)
1629 continue;
1630 /* (c) There's no language level extension; */
1631 if (mode1 == mode2)
1632 ;
1633 /* (c.1) All callers are from the current compilation unit. If that's
1634 the case we don't have to rely on an ABI, we only have to know
1635 what we're generating right now, and we know that we will do the
1636 mode1 to mode2 promotion with the given sign. */
1637 else if (!strictly_local)
1638 continue;
1639 /* (c.2) The combination of the two promotions is useful. This is
1640 true when the signs match, or if the first promotion is unsigned.
1641 In the later case, (sign_extend (zero_extend x)) is the same as
1642 (zero_extend (zero_extend x)), so make sure to force UNS3 true. */
1643 else if (uns1)
1644 uns3 = true;
1645 else if (uns3)
1646 continue;
1647
1648 /* Record that the value was promoted from mode1 to mode3,
1649 so that any sign extension at the head of the current
1650 function may be eliminated. */
1651 x = gen_rtx_CLOBBER (mode1, const0_rtx);
1652 x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1653 record_value_for_reg (reg, first, x);
1654 }
1655 }
1656
1657 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1658 that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1659 because some machines (maybe most) will actually do the sign-extension and
1660 this is the conservative approach.
1661
1662 ??? For 2.5, try to tighten up the MD files in this regard instead of this
1663 kludge. */
1664
1665 static rtx
sign_extend_short_imm(rtx src,machine_mode mode,unsigned int prec)1666 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1667 {
1668 scalar_int_mode int_mode;
1669 if (CONST_INT_P (src)
1670 && is_a <scalar_int_mode> (mode, &int_mode)
1671 && GET_MODE_PRECISION (int_mode) < prec
1672 && INTVAL (src) > 0
1673 && val_signbit_known_set_p (int_mode, INTVAL (src)))
1674 src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1675
1676 return src;
1677 }
1678
1679 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1680 and SET. */
1681
1682 static void
update_rsp_from_reg_equal(reg_stat_type * rsp,rtx_insn * insn,const_rtx set,rtx x)1683 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1684 rtx x)
1685 {
1686 rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1687 unsigned HOST_WIDE_INT bits = 0;
1688 rtx reg_equal = NULL, src = SET_SRC (set);
1689 unsigned int num = 0;
1690
1691 if (reg_equal_note)
1692 reg_equal = XEXP (reg_equal_note, 0);
1693
1694 if (SHORT_IMMEDIATES_SIGN_EXTEND)
1695 {
1696 src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1697 if (reg_equal)
1698 reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1699 }
1700
1701 /* Don't call nonzero_bits if it cannot change anything. */
1702 if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1703 {
1704 bits = nonzero_bits (src, nonzero_bits_mode);
1705 if (reg_equal && bits)
1706 bits &= nonzero_bits (reg_equal, nonzero_bits_mode);
1707 rsp->nonzero_bits |= bits;
1708 }
1709
1710 /* Don't call num_sign_bit_copies if it cannot change anything. */
1711 if (rsp->sign_bit_copies != 1)
1712 {
1713 num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1714 if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1715 {
1716 unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1717 if (num == 0 || numeq > num)
1718 num = numeq;
1719 }
1720 if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1721 rsp->sign_bit_copies = num;
1722 }
1723 }
1724
1725 /* Called via note_stores. If X is a pseudo that is narrower than
1726 HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1727
1728 If we are setting only a portion of X and we can't figure out what
1729 portion, assume all bits will be used since we don't know what will
1730 be happening.
1731
1732 Similarly, set how many bits of X are known to be copies of the sign bit
1733 at all locations in the function. This is the smallest number implied
1734 by any set of X. */
1735
1736 static void
set_nonzero_bits_and_sign_copies(rtx x,const_rtx set,void * data)1737 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1738 {
1739 rtx_insn *insn = (rtx_insn *) data;
1740 scalar_int_mode mode;
1741
1742 if (REG_P (x)
1743 && REGNO (x) >= FIRST_PSEUDO_REGISTER
1744 /* If this register is undefined at the start of the file, we can't
1745 say what its contents were. */
1746 && ! REGNO_REG_SET_P
1747 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1748 && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1749 && HWI_COMPUTABLE_MODE_P (mode))
1750 {
1751 reg_stat_type *rsp = ®_stat[REGNO (x)];
1752
1753 if (set == 0 || GET_CODE (set) == CLOBBER)
1754 {
1755 rsp->nonzero_bits = GET_MODE_MASK (mode);
1756 rsp->sign_bit_copies = 1;
1757 return;
1758 }
1759
1760 /* If this register is being initialized using itself, and the
1761 register is uninitialized in this basic block, and there are
1762 no LOG_LINKS which set the register, then part of the
1763 register is uninitialized. In that case we can't assume
1764 anything about the number of nonzero bits.
1765
1766 ??? We could do better if we checked this in
1767 reg_{nonzero_bits,num_sign_bit_copies}_for_combine. Then we
1768 could avoid making assumptions about the insn which initially
1769 sets the register, while still using the information in other
1770 insns. We would have to be careful to check every insn
1771 involved in the combination. */
1772
1773 if (insn
1774 && reg_referenced_p (x, PATTERN (insn))
1775 && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1776 REGNO (x)))
1777 {
1778 struct insn_link *link;
1779
1780 FOR_EACH_LOG_LINK (link, insn)
1781 if (dead_or_set_p (link->insn, x))
1782 break;
1783 if (!link)
1784 {
1785 rsp->nonzero_bits = GET_MODE_MASK (mode);
1786 rsp->sign_bit_copies = 1;
1787 return;
1788 }
1789 }
1790
1791 /* If this is a complex assignment, see if we can convert it into a
1792 simple assignment. */
1793 set = expand_field_assignment (set);
1794
1795 /* If this is a simple assignment, or we have a paradoxical SUBREG,
1796 set what we know about X. */
1797
1798 if (SET_DEST (set) == x
1799 || (paradoxical_subreg_p (SET_DEST (set))
1800 && SUBREG_REG (SET_DEST (set)) == x))
1801 update_rsp_from_reg_equal (rsp, insn, set, x);
1802 else
1803 {
1804 rsp->nonzero_bits = GET_MODE_MASK (mode);
1805 rsp->sign_bit_copies = 1;
1806 }
1807 }
1808 }
1809
1810 /* See if INSN can be combined into I3. PRED, PRED2, SUCC and SUCC2 are
1811 optionally insns that were previously combined into I3 or that will be
1812 combined into the merger of INSN and I3. The order is PRED, PRED2,
1813 INSN, SUCC, SUCC2, I3.
1814
1815 Return 0 if the combination is not allowed for any reason.
1816
1817 If the combination is allowed, *PDEST will be set to the single
1818 destination of INSN and *PSRC to the single source, and this function
1819 will return 1. */
1820
1821 static int
can_combine_p(rtx_insn * insn,rtx_insn * i3,rtx_insn * pred ATTRIBUTE_UNUSED,rtx_insn * pred2 ATTRIBUTE_UNUSED,rtx_insn * succ,rtx_insn * succ2,rtx * pdest,rtx * psrc)1822 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1823 rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1824 rtx *pdest, rtx *psrc)
1825 {
1826 int i;
1827 const_rtx set = 0;
1828 rtx src, dest;
1829 rtx_insn *p;
1830 rtx link;
1831 bool all_adjacent = true;
1832 int (*is_volatile_p) (const_rtx);
1833
1834 if (succ)
1835 {
1836 if (succ2)
1837 {
1838 if (next_active_insn (succ2) != i3)
1839 all_adjacent = false;
1840 if (next_active_insn (succ) != succ2)
1841 all_adjacent = false;
1842 }
1843 else if (next_active_insn (succ) != i3)
1844 all_adjacent = false;
1845 if (next_active_insn (insn) != succ)
1846 all_adjacent = false;
1847 }
1848 else if (next_active_insn (insn) != i3)
1849 all_adjacent = false;
1850
1851 /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1852 or a PARALLEL consisting of such a SET and CLOBBERs.
1853
1854 If INSN has CLOBBER parallel parts, ignore them for our processing.
1855 By definition, these happen during the execution of the insn. When it
1856 is merged with another insn, all bets are off. If they are, in fact,
1857 needed and aren't also supplied in I3, they may be added by
1858 recog_for_combine. Otherwise, it won't match.
1859
1860 We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1861 note.
1862
1863 Get the source and destination of INSN. If more than one, can't
1864 combine. */
1865
1866 if (GET_CODE (PATTERN (insn)) == SET)
1867 set = PATTERN (insn);
1868 else if (GET_CODE (PATTERN (insn)) == PARALLEL
1869 && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1870 {
1871 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1872 {
1873 rtx elt = XVECEXP (PATTERN (insn), 0, i);
1874
1875 switch (GET_CODE (elt))
1876 {
1877 /* This is important to combine floating point insns
1878 for the SH4 port. */
1879 case USE:
1880 /* Combining an isolated USE doesn't make sense.
1881 We depend here on combinable_i3pat to reject them. */
1882 /* The code below this loop only verifies that the inputs of
1883 the SET in INSN do not change. We call reg_set_between_p
1884 to verify that the REG in the USE does not change between
1885 I3 and INSN.
1886 If the USE in INSN was for a pseudo register, the matching
1887 insn pattern will likely match any register; combining this
1888 with any other USE would only be safe if we knew that the
1889 used registers have identical values, or if there was
1890 something to tell them apart, e.g. different modes. For
1891 now, we forgo such complicated tests and simply disallow
1892 combining of USES of pseudo registers with any other USE. */
1893 if (REG_P (XEXP (elt, 0))
1894 && GET_CODE (PATTERN (i3)) == PARALLEL)
1895 {
1896 rtx i3pat = PATTERN (i3);
1897 int i = XVECLEN (i3pat, 0) - 1;
1898 unsigned int regno = REGNO (XEXP (elt, 0));
1899
1900 do
1901 {
1902 rtx i3elt = XVECEXP (i3pat, 0, i);
1903
1904 if (GET_CODE (i3elt) == USE
1905 && REG_P (XEXP (i3elt, 0))
1906 && (REGNO (XEXP (i3elt, 0)) == regno
1907 ? reg_set_between_p (XEXP (elt, 0),
1908 PREV_INSN (insn), i3)
1909 : regno >= FIRST_PSEUDO_REGISTER))
1910 return 0;
1911 }
1912 while (--i >= 0);
1913 }
1914 break;
1915
1916 /* We can ignore CLOBBERs. */
1917 case CLOBBER:
1918 break;
1919
1920 case SET:
1921 /* Ignore SETs whose result isn't used but not those that
1922 have side-effects. */
1923 if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1924 && insn_nothrow_p (insn)
1925 && !side_effects_p (elt))
1926 break;
1927
1928 /* If we have already found a SET, this is a second one and
1929 so we cannot combine with this insn. */
1930 if (set)
1931 return 0;
1932
1933 set = elt;
1934 break;
1935
1936 default:
1937 /* Anything else means we can't combine. */
1938 return 0;
1939 }
1940 }
1941
1942 if (set == 0
1943 /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1944 so don't do anything with it. */
1945 || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1946 return 0;
1947 }
1948 else
1949 return 0;
1950
1951 if (set == 0)
1952 return 0;
1953
1954 /* The simplification in expand_field_assignment may call back to
1955 get_last_value, so set safe guard here. */
1956 subst_low_luid = DF_INSN_LUID (insn);
1957
1958 set = expand_field_assignment (set);
1959 src = SET_SRC (set), dest = SET_DEST (set);
1960
1961 /* Do not eliminate user-specified register if it is in an
1962 asm input because we may break the register asm usage defined
1963 in GCC manual if allow to do so.
1964 Be aware that this may cover more cases than we expect but this
1965 should be harmless. */
1966 if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1967 && extract_asm_operands (PATTERN (i3)))
1968 return 0;
1969
1970 /* Don't eliminate a store in the stack pointer. */
1971 if (dest == stack_pointer_rtx
1972 /* Don't combine with an insn that sets a register to itself if it has
1973 a REG_EQUAL note. This may be part of a LIBCALL sequence. */
1974 || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1975 /* Can't merge an ASM_OPERANDS. */
1976 || GET_CODE (src) == ASM_OPERANDS
1977 /* Can't merge a function call. */
1978 || GET_CODE (src) == CALL
1979 /* Don't eliminate a function call argument. */
1980 || (CALL_P (i3)
1981 && (find_reg_fusage (i3, USE, dest)
1982 || (REG_P (dest)
1983 && REGNO (dest) < FIRST_PSEUDO_REGISTER
1984 && global_regs[REGNO (dest)])))
1985 /* Don't substitute into an incremented register. */
1986 || FIND_REG_INC_NOTE (i3, dest)
1987 || (succ && FIND_REG_INC_NOTE (succ, dest))
1988 || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1989 /* Don't substitute into a non-local goto, this confuses CFG. */
1990 || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
1991 /* Make sure that DEST is not used after INSN but before SUCC, or
1992 after SUCC and before SUCC2, or after SUCC2 but before I3. */
1993 || (!all_adjacent
1994 && ((succ2
1995 && (reg_used_between_p (dest, succ2, i3)
1996 || reg_used_between_p (dest, succ, succ2)))
1997 || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
1998 || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
1999 || (succ
2000 /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2001 that case SUCC is not in the insn stream, so use SUCC2
2002 instead for this test. */
2003 && reg_used_between_p (dest, insn,
2004 succ2
2005 && INSN_UID (succ) == INSN_UID (succ2)
2006 ? succ2 : succ))))
2007 /* Make sure that the value that is to be substituted for the register
2008 does not use any registers whose values alter in between. However,
2009 If the insns are adjacent, a use can't cross a set even though we
2010 think it might (this can happen for a sequence of insns each setting
2011 the same destination; last_set of that register might point to
2012 a NOTE). If INSN has a REG_EQUIV note, the register is always
2013 equivalent to the memory so the substitution is valid even if there
2014 are intervening stores. Also, don't move a volatile asm or
2015 UNSPEC_VOLATILE across any other insns. */
2016 || (! all_adjacent
2017 && (((!MEM_P (src)
2018 || ! find_reg_note (insn, REG_EQUIV, src))
2019 && modified_between_p (src, insn, i3))
2020 || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2021 || GET_CODE (src) == UNSPEC_VOLATILE))
2022 /* Don't combine across a CALL_INSN, because that would possibly
2023 change whether the life span of some REGs crosses calls or not,
2024 and it is a pain to update that information.
2025 Exception: if source is a constant, moving it later can't hurt.
2026 Accept that as a special case. */
2027 || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2028 return 0;
2029
2030 /* DEST must either be a REG or CC0. */
2031 if (REG_P (dest))
2032 {
2033 /* If register alignment is being enforced for multi-word items in all
2034 cases except for parameters, it is possible to have a register copy
2035 insn referencing a hard register that is not allowed to contain the
2036 mode being copied and which would not be valid as an operand of most
2037 insns. Eliminate this problem by not combining with such an insn.
2038
2039 Also, on some machines we don't want to extend the life of a hard
2040 register. */
2041
2042 if (REG_P (src)
2043 && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2044 && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2045 /* Don't extend the life of a hard register unless it is
2046 user variable (if we have few registers) or it can't
2047 fit into the desired register (meaning something special
2048 is going on).
2049 Also avoid substituting a return register into I3, because
2050 reload can't handle a conflict with constraints of other
2051 inputs. */
2052 || (REGNO (src) < FIRST_PSEUDO_REGISTER
2053 && !targetm.hard_regno_mode_ok (REGNO (src),
2054 GET_MODE (src)))))
2055 return 0;
2056 }
2057 else if (GET_CODE (dest) != CC0)
2058 return 0;
2059
2060
2061 if (GET_CODE (PATTERN (i3)) == PARALLEL)
2062 for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2063 if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2064 {
2065 rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2066
2067 /* If the clobber represents an earlyclobber operand, we must not
2068 substitute an expression containing the clobbered register.
2069 As we do not analyze the constraint strings here, we have to
2070 make the conservative assumption. However, if the register is
2071 a fixed hard reg, the clobber cannot represent any operand;
2072 we leave it up to the machine description to either accept or
2073 reject use-and-clobber patterns. */
2074 if (!REG_P (reg)
2075 || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2076 || !fixed_regs[REGNO (reg)])
2077 if (reg_overlap_mentioned_p (reg, src))
2078 return 0;
2079 }
2080
2081 /* If INSN contains anything volatile, or is an `asm' (whether volatile
2082 or not), reject, unless nothing volatile comes between it and I3 */
2083
2084 if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2085 {
2086 /* Make sure neither succ nor succ2 contains a volatile reference. */
2087 if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2088 return 0;
2089 if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2090 return 0;
2091 /* We'll check insns between INSN and I3 below. */
2092 }
2093
2094 /* If INSN is an asm, and DEST is a hard register, reject, since it has
2095 to be an explicit register variable, and was chosen for a reason. */
2096
2097 if (GET_CODE (src) == ASM_OPERANDS
2098 && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2099 return 0;
2100
2101 /* If INSN contains volatile references (specifically volatile MEMs),
2102 we cannot combine across any other volatile references.
2103 Even if INSN doesn't contain volatile references, any intervening
2104 volatile insn might affect machine state. */
2105
2106 is_volatile_p = volatile_refs_p (PATTERN (insn))
2107 ? volatile_refs_p
2108 : volatile_insn_p;
2109
2110 for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2111 if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2112 return 0;
2113
2114 /* If INSN contains an autoincrement or autodecrement, make sure that
2115 register is not used between there and I3, and not already used in
2116 I3 either. Neither must it be used in PRED or SUCC, if they exist.
2117 Also insist that I3 not be a jump; if it were one
2118 and the incremented register were spilled, we would lose. */
2119
2120 if (AUTO_INC_DEC)
2121 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2122 if (REG_NOTE_KIND (link) == REG_INC
2123 && (JUMP_P (i3)
2124 || reg_used_between_p (XEXP (link, 0), insn, i3)
2125 || (pred != NULL_RTX
2126 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2127 || (pred2 != NULL_RTX
2128 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2129 || (succ != NULL_RTX
2130 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2131 || (succ2 != NULL_RTX
2132 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2133 || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2134 return 0;
2135
2136 /* Don't combine an insn that follows a CC0-setting insn.
2137 An insn that uses CC0 must not be separated from the one that sets it.
2138 We do, however, allow I2 to follow a CC0-setting insn if that insn
2139 is passed as I1; in that case it will be deleted also.
2140 We also allow combining in this case if all the insns are adjacent
2141 because that would leave the two CC0 insns adjacent as well.
2142 It would be more logical to test whether CC0 occurs inside I1 or I2,
2143 but that would be much slower, and this ought to be equivalent. */
2144
2145 if (HAVE_cc0)
2146 {
2147 p = prev_nonnote_insn (insn);
2148 if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2149 && ! all_adjacent)
2150 return 0;
2151 }
2152
2153 /* If we get here, we have passed all the tests and the combination is
2154 to be allowed. */
2155
2156 *pdest = dest;
2157 *psrc = src;
2158
2159 return 1;
2160 }
2161
2162 /* LOC is the location within I3 that contains its pattern or the component
2163 of a PARALLEL of the pattern. We validate that it is valid for combining.
2164
2165 One problem is if I3 modifies its output, as opposed to replacing it
2166 entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2167 doing so would produce an insn that is not equivalent to the original insns.
2168
2169 Consider:
2170
2171 (set (reg:DI 101) (reg:DI 100))
2172 (set (subreg:SI (reg:DI 101) 0) <foo>)
2173
2174 This is NOT equivalent to:
2175
2176 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2177 (set (reg:DI 101) (reg:DI 100))])
2178
2179 Not only does this modify 100 (in which case it might still be valid
2180 if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2181
2182 We can also run into a problem if I2 sets a register that I1
2183 uses and I1 gets directly substituted into I3 (not via I2). In that
2184 case, we would be getting the wrong value of I2DEST into I3, so we
2185 must reject the combination. This case occurs when I2 and I1 both
2186 feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2187 If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2188 of a SET must prevent combination from occurring. The same situation
2189 can occur for I0, in which case I0_NOT_IN_SRC is set.
2190
2191 Before doing the above check, we first try to expand a field assignment
2192 into a set of logical operations.
2193
2194 If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2195 we place a register that is both set and used within I3. If more than one
2196 such register is detected, we fail.
2197
2198 Return 1 if the combination is valid, zero otherwise. */
2199
2200 static int
combinable_i3pat(rtx_insn * i3,rtx * loc,rtx i2dest,rtx i1dest,rtx i0dest,int i1_not_in_src,int i0_not_in_src,rtx * pi3dest_killed)2201 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2202 int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2203 {
2204 rtx x = *loc;
2205
2206 if (GET_CODE (x) == SET)
2207 {
2208 rtx set = x ;
2209 rtx dest = SET_DEST (set);
2210 rtx src = SET_SRC (set);
2211 rtx inner_dest = dest;
2212 rtx subdest;
2213
2214 while (GET_CODE (inner_dest) == STRICT_LOW_PART
2215 || GET_CODE (inner_dest) == SUBREG
2216 || GET_CODE (inner_dest) == ZERO_EXTRACT)
2217 inner_dest = XEXP (inner_dest, 0);
2218
2219 /* Check for the case where I3 modifies its output, as discussed
2220 above. We don't want to prevent pseudos from being combined
2221 into the address of a MEM, so only prevent the combination if
2222 i1 or i2 set the same MEM. */
2223 if ((inner_dest != dest &&
2224 (!MEM_P (inner_dest)
2225 || rtx_equal_p (i2dest, inner_dest)
2226 || (i1dest && rtx_equal_p (i1dest, inner_dest))
2227 || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2228 && (reg_overlap_mentioned_p (i2dest, inner_dest)
2229 || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2230 || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2231
2232 /* This is the same test done in can_combine_p except we can't test
2233 all_adjacent; we don't have to, since this instruction will stay
2234 in place, thus we are not considering increasing the lifetime of
2235 INNER_DEST.
2236
2237 Also, if this insn sets a function argument, combining it with
2238 something that might need a spill could clobber a previous
2239 function argument; the all_adjacent test in can_combine_p also
2240 checks this; here, we do a more specific test for this case. */
2241
2242 || (REG_P (inner_dest)
2243 && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2244 && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2245 GET_MODE (inner_dest)))
2246 || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2247 || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2248 return 0;
2249
2250 /* If DEST is used in I3, it is being killed in this insn, so
2251 record that for later. We have to consider paradoxical
2252 subregs here, since they kill the whole register, but we
2253 ignore partial subregs, STRICT_LOW_PART, etc.
2254 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2255 STACK_POINTER_REGNUM, since these are always considered to be
2256 live. Similarly for ARG_POINTER_REGNUM if it is fixed. */
2257 subdest = dest;
2258 if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2259 subdest = SUBREG_REG (subdest);
2260 if (pi3dest_killed
2261 && REG_P (subdest)
2262 && reg_referenced_p (subdest, PATTERN (i3))
2263 && REGNO (subdest) != FRAME_POINTER_REGNUM
2264 && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2265 || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2266 && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2267 || (REGNO (subdest) != ARG_POINTER_REGNUM
2268 || ! fixed_regs [REGNO (subdest)]))
2269 && REGNO (subdest) != STACK_POINTER_REGNUM)
2270 {
2271 if (*pi3dest_killed)
2272 return 0;
2273
2274 *pi3dest_killed = subdest;
2275 }
2276 }
2277
2278 else if (GET_CODE (x) == PARALLEL)
2279 {
2280 int i;
2281
2282 for (i = 0; i < XVECLEN (x, 0); i++)
2283 if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2284 i1_not_in_src, i0_not_in_src, pi3dest_killed))
2285 return 0;
2286 }
2287
2288 return 1;
2289 }
2290
2291 /* Return 1 if X is an arithmetic expression that contains a multiplication
2292 and division. We don't count multiplications by powers of two here. */
2293
2294 static int
contains_muldiv(rtx x)2295 contains_muldiv (rtx x)
2296 {
2297 switch (GET_CODE (x))
2298 {
2299 case MOD: case DIV: case UMOD: case UDIV:
2300 return 1;
2301
2302 case MULT:
2303 return ! (CONST_INT_P (XEXP (x, 1))
2304 && pow2p_hwi (UINTVAL (XEXP (x, 1))));
2305 default:
2306 if (BINARY_P (x))
2307 return contains_muldiv (XEXP (x, 0))
2308 || contains_muldiv (XEXP (x, 1));
2309
2310 if (UNARY_P (x))
2311 return contains_muldiv (XEXP (x, 0));
2312
2313 return 0;
2314 }
2315 }
2316
2317 /* Determine whether INSN can be used in a combination. Return nonzero if
2318 not. This is used in try_combine to detect early some cases where we
2319 can't perform combinations. */
2320
2321 static int
cant_combine_insn_p(rtx_insn * insn)2322 cant_combine_insn_p (rtx_insn *insn)
2323 {
2324 rtx set;
2325 rtx src, dest;
2326
2327 /* If this isn't really an insn, we can't do anything.
2328 This can occur when flow deletes an insn that it has merged into an
2329 auto-increment address. */
2330 if (!NONDEBUG_INSN_P (insn))
2331 return 1;
2332
2333 /* Never combine loads and stores involving hard regs that are likely
2334 to be spilled. The register allocator can usually handle such
2335 reg-reg moves by tying. If we allow the combiner to make
2336 substitutions of likely-spilled regs, reload might die.
2337 As an exception, we allow combinations involving fixed regs; these are
2338 not available to the register allocator so there's no risk involved. */
2339
2340 set = single_set (insn);
2341 if (! set)
2342 return 0;
2343 src = SET_SRC (set);
2344 dest = SET_DEST (set);
2345 if (GET_CODE (src) == SUBREG)
2346 src = SUBREG_REG (src);
2347 if (GET_CODE (dest) == SUBREG)
2348 dest = SUBREG_REG (dest);
2349 if (REG_P (src) && REG_P (dest)
2350 && ((HARD_REGISTER_P (src)
2351 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2352 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (src))))
2353 || (HARD_REGISTER_P (dest)
2354 && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2355 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2356 return 1;
2357
2358 return 0;
2359 }
2360
2361 struct likely_spilled_retval_info
2362 {
2363 unsigned regno, nregs;
2364 unsigned mask;
2365 };
2366
2367 /* Called via note_stores by likely_spilled_retval_p. Remove from info->mask
2368 hard registers that are known to be written to / clobbered in full. */
2369 static void
likely_spilled_retval_1(rtx x,const_rtx set,void * data)2370 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2371 {
2372 struct likely_spilled_retval_info *const info =
2373 (struct likely_spilled_retval_info *) data;
2374 unsigned regno, nregs;
2375 unsigned new_mask;
2376
2377 if (!REG_P (XEXP (set, 0)))
2378 return;
2379 regno = REGNO (x);
2380 if (regno >= info->regno + info->nregs)
2381 return;
2382 nregs = REG_NREGS (x);
2383 if (regno + nregs <= info->regno)
2384 return;
2385 new_mask = (2U << (nregs - 1)) - 1;
2386 if (regno < info->regno)
2387 new_mask >>= info->regno - regno;
2388 else
2389 new_mask <<= regno - info->regno;
2390 info->mask &= ~new_mask;
2391 }
2392
2393 /* Return nonzero iff part of the return value is live during INSN, and
2394 it is likely spilled. This can happen when more than one insn is needed
2395 to copy the return value, e.g. when we consider to combine into the
2396 second copy insn for a complex value. */
2397
2398 static int
likely_spilled_retval_p(rtx_insn * insn)2399 likely_spilled_retval_p (rtx_insn *insn)
2400 {
2401 rtx_insn *use = BB_END (this_basic_block);
2402 rtx reg;
2403 rtx_insn *p;
2404 unsigned regno, nregs;
2405 /* We assume here that no machine mode needs more than
2406 32 hard registers when the value overlaps with a register
2407 for which TARGET_FUNCTION_VALUE_REGNO_P is true. */
2408 unsigned mask;
2409 struct likely_spilled_retval_info info;
2410
2411 if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2412 return 0;
2413 reg = XEXP (PATTERN (use), 0);
2414 if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2415 return 0;
2416 regno = REGNO (reg);
2417 nregs = REG_NREGS (reg);
2418 if (nregs == 1)
2419 return 0;
2420 mask = (2U << (nregs - 1)) - 1;
2421
2422 /* Disregard parts of the return value that are set later. */
2423 info.regno = regno;
2424 info.nregs = nregs;
2425 info.mask = mask;
2426 for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2427 if (INSN_P (p))
2428 note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2429 mask = info.mask;
2430
2431 /* Check if any of the (probably) live return value registers is
2432 likely spilled. */
2433 nregs --;
2434 do
2435 {
2436 if ((mask & 1 << nregs)
2437 && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2438 return 1;
2439 } while (nregs--);
2440 return 0;
2441 }
2442
2443 /* Adjust INSN after we made a change to its destination.
2444
2445 Changing the destination can invalidate notes that say something about
2446 the results of the insn and a LOG_LINK pointing to the insn. */
2447
2448 static void
adjust_for_new_dest(rtx_insn * insn)2449 adjust_for_new_dest (rtx_insn *insn)
2450 {
2451 /* For notes, be conservative and simply remove them. */
2452 remove_reg_equal_equiv_notes (insn);
2453
2454 /* The new insn will have a destination that was previously the destination
2455 of an insn just above it. Call distribute_links to make a LOG_LINK from
2456 the next use of that destination. */
2457
2458 rtx set = single_set (insn);
2459 gcc_assert (set);
2460
2461 rtx reg = SET_DEST (set);
2462
2463 while (GET_CODE (reg) == ZERO_EXTRACT
2464 || GET_CODE (reg) == STRICT_LOW_PART
2465 || GET_CODE (reg) == SUBREG)
2466 reg = XEXP (reg, 0);
2467 gcc_assert (REG_P (reg));
2468
2469 distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2470
2471 df_insn_rescan (insn);
2472 }
2473
2474 /* Return TRUE if combine can reuse reg X in mode MODE.
2475 ADDED_SETS is nonzero if the original set is still required. */
2476 static bool
can_change_dest_mode(rtx x,int added_sets,machine_mode mode)2477 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2478 {
2479 unsigned int regno;
2480
2481 if (!REG_P (x))
2482 return false;
2483
2484 /* Don't change between modes with different underlying register sizes,
2485 since this could lead to invalid subregs. */
2486 if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2487 REGMODE_NATURAL_SIZE (GET_MODE (x))))
2488 return false;
2489
2490 regno = REGNO (x);
2491 /* Allow hard registers if the new mode is legal, and occupies no more
2492 registers than the old mode. */
2493 if (regno < FIRST_PSEUDO_REGISTER)
2494 return (targetm.hard_regno_mode_ok (regno, mode)
2495 && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2496
2497 /* Or a pseudo that is only used once. */
2498 return (regno < reg_n_sets_max
2499 && REG_N_SETS (regno) == 1
2500 && !added_sets
2501 && !REG_USERVAR_P (x));
2502 }
2503
2504
2505 /* Check whether X, the destination of a set, refers to part of
2506 the register specified by REG. */
2507
2508 static bool
reg_subword_p(rtx x,rtx reg)2509 reg_subword_p (rtx x, rtx reg)
2510 {
2511 /* Check that reg is an integer mode register. */
2512 if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2513 return false;
2514
2515 if (GET_CODE (x) == STRICT_LOW_PART
2516 || GET_CODE (x) == ZERO_EXTRACT)
2517 x = XEXP (x, 0);
2518
2519 return GET_CODE (x) == SUBREG
2520 && SUBREG_REG (x) == reg
2521 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2522 }
2523
2524 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2525 Note that the INSN should be deleted *after* removing dead edges, so
2526 that the kept edge is the fallthrough edge for a (set (pc) (pc))
2527 but not for a (set (pc) (label_ref FOO)). */
2528
2529 static void
update_cfg_for_uncondjump(rtx_insn * insn)2530 update_cfg_for_uncondjump (rtx_insn *insn)
2531 {
2532 basic_block bb = BLOCK_FOR_INSN (insn);
2533 gcc_assert (BB_END (bb) == insn);
2534
2535 purge_dead_edges (bb);
2536
2537 delete_insn (insn);
2538 if (EDGE_COUNT (bb->succs) == 1)
2539 {
2540 rtx_insn *insn;
2541
2542 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2543
2544 /* Remove barriers from the footer if there are any. */
2545 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2546 if (BARRIER_P (insn))
2547 {
2548 if (PREV_INSN (insn))
2549 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2550 else
2551 BB_FOOTER (bb) = NEXT_INSN (insn);
2552 if (NEXT_INSN (insn))
2553 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2554 }
2555 else if (LABEL_P (insn))
2556 break;
2557 }
2558 }
2559
2560 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2561 by an arbitrary number of CLOBBERs. */
2562 static bool
is_parallel_of_n_reg_sets(rtx pat,int n)2563 is_parallel_of_n_reg_sets (rtx pat, int n)
2564 {
2565 if (GET_CODE (pat) != PARALLEL)
2566 return false;
2567
2568 int len = XVECLEN (pat, 0);
2569 if (len < n)
2570 return false;
2571
2572 int i;
2573 for (i = 0; i < n; i++)
2574 if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2575 || !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2576 return false;
2577 for ( ; i < len; i++)
2578 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER
2579 || XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2580 return false;
2581
2582 return true;
2583 }
2584
2585 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2586 CLOBBERs), can be split into individual SETs in that order, without
2587 changing semantics. */
2588 static bool
can_split_parallel_of_n_reg_sets(rtx_insn * insn,int n)2589 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2590 {
2591 if (!insn_nothrow_p (insn))
2592 return false;
2593
2594 rtx pat = PATTERN (insn);
2595
2596 int i, j;
2597 for (i = 0; i < n; i++)
2598 {
2599 if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2600 return false;
2601
2602 rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2603
2604 for (j = i + 1; j < n; j++)
2605 if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2606 return false;
2607 }
2608
2609 return true;
2610 }
2611
2612 /* Try to combine the insns I0, I1 and I2 into I3.
2613 Here I0, I1 and I2 appear earlier than I3.
2614 I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2615 I3.
2616
2617 If we are combining more than two insns and the resulting insn is not
2618 recognized, try splitting it into two insns. If that happens, I2 and I3
2619 are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2620 Otherwise, I0, I1 and I2 are pseudo-deleted.
2621
2622 Return 0 if the combination does not work. Then nothing is changed.
2623 If we did the combination, return the insn at which combine should
2624 resume scanning.
2625
2626 Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2627 new direct jump instruction.
2628
2629 LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2630 been I3 passed to an earlier try_combine within the same basic
2631 block. */
2632
2633 static rtx_insn *
try_combine(rtx_insn * i3,rtx_insn * i2,rtx_insn * i1,rtx_insn * i0,int * new_direct_jump_p,rtx_insn * last_combined_insn)2634 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2635 int *new_direct_jump_p, rtx_insn *last_combined_insn)
2636 {
2637 /* New patterns for I3 and I2, respectively. */
2638 rtx newpat, newi2pat = 0;
2639 rtvec newpat_vec_with_clobbers = 0;
2640 int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2641 /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2642 dead. */
2643 int added_sets_0, added_sets_1, added_sets_2;
2644 /* Total number of SETs to put into I3. */
2645 int total_sets;
2646 /* Nonzero if I2's or I1's body now appears in I3. */
2647 int i2_is_used = 0, i1_is_used = 0;
2648 /* INSN_CODEs for new I3, new I2, and user of condition code. */
2649 int insn_code_number, i2_code_number = 0, other_code_number = 0;
2650 /* Contains I3 if the destination of I3 is used in its source, which means
2651 that the old life of I3 is being killed. If that usage is placed into
2652 I2 and not in I3, a REG_DEAD note must be made. */
2653 rtx i3dest_killed = 0;
2654 /* SET_DEST and SET_SRC of I2, I1 and I0. */
2655 rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2656 /* Copy of SET_SRC of I1 and I0, if needed. */
2657 rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2658 /* Set if I2DEST was reused as a scratch register. */
2659 bool i2scratch = false;
2660 /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases. */
2661 rtx i0pat = 0, i1pat = 0, i2pat = 0;
2662 /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC. */
2663 int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2664 int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2665 int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2666 int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2667 /* Notes that must be added to REG_NOTES in I3 and I2. */
2668 rtx new_i3_notes, new_i2_notes;
2669 /* Notes that we substituted I3 into I2 instead of the normal case. */
2670 int i3_subst_into_i2 = 0;
2671 /* Notes that I1, I2 or I3 is a MULT operation. */
2672 int have_mult = 0;
2673 int swap_i2i3 = 0;
2674 int split_i2i3 = 0;
2675 int changed_i3_dest = 0;
2676
2677 int maxreg;
2678 rtx_insn *temp_insn;
2679 rtx temp_expr;
2680 struct insn_link *link;
2681 rtx other_pat = 0;
2682 rtx new_other_notes;
2683 int i;
2684 scalar_int_mode dest_mode, temp_mode;
2685
2686 /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2687 never be). */
2688 if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2689 return 0;
2690
2691 /* Only try four-insn combinations when there's high likelihood of
2692 success. Look for simple insns, such as loads of constants or
2693 binary operations involving a constant. */
2694 if (i0)
2695 {
2696 int i;
2697 int ngood = 0;
2698 int nshift = 0;
2699 rtx set0, set3;
2700
2701 if (!flag_expensive_optimizations)
2702 return 0;
2703
2704 for (i = 0; i < 4; i++)
2705 {
2706 rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2707 rtx set = single_set (insn);
2708 rtx src;
2709 if (!set)
2710 continue;
2711 src = SET_SRC (set);
2712 if (CONSTANT_P (src))
2713 {
2714 ngood += 2;
2715 break;
2716 }
2717 else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2718 ngood++;
2719 else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2720 || GET_CODE (src) == LSHIFTRT)
2721 nshift++;
2722 }
2723
2724 /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2725 are likely manipulating its value. Ideally we'll be able to combine
2726 all four insns into a bitfield insertion of some kind.
2727
2728 Note the source in I0 might be inside a sign/zero extension and the
2729 memory modes in I0 and I3 might be different. So extract the address
2730 from the destination of I3 and search for it in the source of I0.
2731
2732 In the event that there's a match but the source/dest do not actually
2733 refer to the same memory, the worst that happens is we try some
2734 combinations that we wouldn't have otherwise. */
2735 if ((set0 = single_set (i0))
2736 /* Ensure the source of SET0 is a MEM, possibly buried inside
2737 an extension. */
2738 && (GET_CODE (SET_SRC (set0)) == MEM
2739 || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2740 || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2741 && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2742 && (set3 = single_set (i3))
2743 /* Ensure the destination of SET3 is a MEM. */
2744 && GET_CODE (SET_DEST (set3)) == MEM
2745 /* Would it be better to extract the base address for the MEM
2746 in SET3 and look for that? I don't have cases where it matters
2747 but I could envision such cases. */
2748 && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2749 ngood += 2;
2750
2751 if (ngood < 2 && nshift < 2)
2752 return 0;
2753 }
2754
2755 /* Exit early if one of the insns involved can't be used for
2756 combinations. */
2757 if (CALL_P (i2)
2758 || (i1 && CALL_P (i1))
2759 || (i0 && CALL_P (i0))
2760 || cant_combine_insn_p (i3)
2761 || cant_combine_insn_p (i2)
2762 || (i1 && cant_combine_insn_p (i1))
2763 || (i0 && cant_combine_insn_p (i0))
2764 || likely_spilled_retval_p (i3))
2765 return 0;
2766
2767 combine_attempts++;
2768 undobuf.other_insn = 0;
2769
2770 /* Reset the hard register usage information. */
2771 CLEAR_HARD_REG_SET (newpat_used_regs);
2772
2773 if (dump_file && (dump_flags & TDF_DETAILS))
2774 {
2775 if (i0)
2776 fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2777 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2778 else if (i1)
2779 fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2780 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2781 else
2782 fprintf (dump_file, "\nTrying %d -> %d:\n",
2783 INSN_UID (i2), INSN_UID (i3));
2784
2785 if (i0)
2786 dump_insn_slim (dump_file, i0);
2787 if (i1)
2788 dump_insn_slim (dump_file, i1);
2789 dump_insn_slim (dump_file, i2);
2790 dump_insn_slim (dump_file, i3);
2791 }
2792
2793 /* If multiple insns feed into one of I2 or I3, they can be in any
2794 order. To simplify the code below, reorder them in sequence. */
2795 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2796 std::swap (i0, i2);
2797 if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2798 std::swap (i0, i1);
2799 if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2800 std::swap (i1, i2);
2801
2802 added_links_insn = 0;
2803 added_notes_insn = 0;
2804
2805 /* First check for one important special case that the code below will
2806 not handle. Namely, the case where I1 is zero, I2 is a PARALLEL
2807 and I3 is a SET whose SET_SRC is a SET_DEST in I2. In that case,
2808 we may be able to replace that destination with the destination of I3.
2809 This occurs in the common code where we compute both a quotient and
2810 remainder into a structure, in which case we want to do the computation
2811 directly into the structure to avoid register-register copies.
2812
2813 Note that this case handles both multiple sets in I2 and also cases
2814 where I2 has a number of CLOBBERs inside the PARALLEL.
2815
2816 We make very conservative checks below and only try to handle the
2817 most common cases of this. For example, we only handle the case
2818 where I2 and I3 are adjacent to avoid making difficult register
2819 usage tests. */
2820
2821 if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2822 && REG_P (SET_SRC (PATTERN (i3)))
2823 && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2824 && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2825 && GET_CODE (PATTERN (i2)) == PARALLEL
2826 && ! side_effects_p (SET_DEST (PATTERN (i3)))
2827 /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2828 below would need to check what is inside (and reg_overlap_mentioned_p
2829 doesn't support those codes anyway). Don't allow those destinations;
2830 the resulting insn isn't likely to be recognized anyway. */
2831 && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2832 && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2833 && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2834 SET_DEST (PATTERN (i3)))
2835 && next_active_insn (i2) == i3)
2836 {
2837 rtx p2 = PATTERN (i2);
2838
2839 /* Make sure that the destination of I3,
2840 which we are going to substitute into one output of I2,
2841 is not used within another output of I2. We must avoid making this:
2842 (parallel [(set (mem (reg 69)) ...)
2843 (set (reg 69) ...)])
2844 which is not well-defined as to order of actions.
2845 (Besides, reload can't handle output reloads for this.)
2846
2847 The problem can also happen if the dest of I3 is a memory ref,
2848 if another dest in I2 is an indirect memory ref.
2849
2850 Neither can this PARALLEL be an asm. We do not allow combining
2851 that usually (see can_combine_p), so do not here either. */
2852 bool ok = true;
2853 for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2854 {
2855 if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2856 || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER)
2857 && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2858 SET_DEST (XVECEXP (p2, 0, i))))
2859 ok = false;
2860 else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2861 && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2862 ok = false;
2863 }
2864
2865 if (ok)
2866 for (i = 0; i < XVECLEN (p2, 0); i++)
2867 if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2868 && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2869 {
2870 combine_merges++;
2871
2872 subst_insn = i3;
2873 subst_low_luid = DF_INSN_LUID (i2);
2874
2875 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2876 i2src = SET_SRC (XVECEXP (p2, 0, i));
2877 i2dest = SET_DEST (XVECEXP (p2, 0, i));
2878 i2dest_killed = dead_or_set_p (i2, i2dest);
2879
2880 /* Replace the dest in I2 with our dest and make the resulting
2881 insn the new pattern for I3. Then skip to where we validate
2882 the pattern. Everything was set up above. */
2883 SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2884 newpat = p2;
2885 i3_subst_into_i2 = 1;
2886 goto validate_replacement;
2887 }
2888 }
2889
2890 /* If I2 is setting a pseudo to a constant and I3 is setting some
2891 sub-part of it to another constant, merge them by making a new
2892 constant. */
2893 if (i1 == 0
2894 && (temp_expr = single_set (i2)) != 0
2895 && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2896 && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2897 && GET_CODE (PATTERN (i3)) == SET
2898 && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2899 && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2900 {
2901 rtx dest = SET_DEST (PATTERN (i3));
2902 rtx temp_dest = SET_DEST (temp_expr);
2903 int offset = -1;
2904 int width = 0;
2905
2906 if (GET_CODE (dest) == ZERO_EXTRACT)
2907 {
2908 if (CONST_INT_P (XEXP (dest, 1))
2909 && CONST_INT_P (XEXP (dest, 2))
2910 && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2911 &dest_mode))
2912 {
2913 width = INTVAL (XEXP (dest, 1));
2914 offset = INTVAL (XEXP (dest, 2));
2915 dest = XEXP (dest, 0);
2916 if (BITS_BIG_ENDIAN)
2917 offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2918 }
2919 }
2920 else
2921 {
2922 if (GET_CODE (dest) == STRICT_LOW_PART)
2923 dest = XEXP (dest, 0);
2924 if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2925 {
2926 width = GET_MODE_PRECISION (dest_mode);
2927 offset = 0;
2928 }
2929 }
2930
2931 if (offset >= 0)
2932 {
2933 /* If this is the low part, we're done. */
2934 if (subreg_lowpart_p (dest))
2935 ;
2936 /* Handle the case where inner is twice the size of outer. */
2937 else if (GET_MODE_PRECISION (temp_mode)
2938 == 2 * GET_MODE_PRECISION (dest_mode))
2939 offset += GET_MODE_PRECISION (dest_mode);
2940 /* Otherwise give up for now. */
2941 else
2942 offset = -1;
2943 }
2944
2945 if (offset >= 0)
2946 {
2947 rtx inner = SET_SRC (PATTERN (i3));
2948 rtx outer = SET_SRC (temp_expr);
2949
2950 wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2951 rtx_mode_t (inner, dest_mode),
2952 offset, width);
2953
2954 combine_merges++;
2955 subst_insn = i3;
2956 subst_low_luid = DF_INSN_LUID (i2);
2957 added_sets_2 = added_sets_1 = added_sets_0 = 0;
2958 i2dest = temp_dest;
2959 i2dest_killed = dead_or_set_p (i2, i2dest);
2960
2961 /* Replace the source in I2 with the new constant and make the
2962 resulting insn the new pattern for I3. Then skip to where we
2963 validate the pattern. Everything was set up above. */
2964 SUBST (SET_SRC (temp_expr),
2965 immed_wide_int_const (o, temp_mode));
2966
2967 newpat = PATTERN (i2);
2968
2969 /* The dest of I3 has been replaced with the dest of I2. */
2970 changed_i3_dest = 1;
2971 goto validate_replacement;
2972 }
2973 }
2974
2975 /* If we have no I1 and I2 looks like:
2976 (parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
2977 (set Y OP)])
2978 make up a dummy I1 that is
2979 (set Y OP)
2980 and change I2 to be
2981 (set (reg:CC X) (compare:CC Y (const_int 0)))
2982
2983 (We can ignore any trailing CLOBBERs.)
2984
2985 This undoes a previous combination and allows us to match a branch-and-
2986 decrement insn. */
2987
2988 if (!HAVE_cc0 && i1 == 0
2989 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
2990 && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
2991 == MODE_CC)
2992 && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
2993 && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
2994 && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
2995 SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
2996 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
2997 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
2998 {
2999 /* We make I1 with the same INSN_UID as I2. This gives it
3000 the same DF_INSN_LUID for value tracking. Our fake I1 will
3001 never appear in the insn stream so giving it the same INSN_UID
3002 as I2 will not cause a problem. */
3003
3004 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3005 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3006 -1, NULL_RTX);
3007 INSN_UID (i1) = INSN_UID (i2);
3008
3009 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3010 SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3011 SET_DEST (PATTERN (i1)));
3012 unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3013 SUBST_LINK (LOG_LINKS (i2),
3014 alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3015 }
3016
3017 /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3018 make those two SETs separate I1 and I2 insns, and make an I0 that is
3019 the original I1. */
3020 if (!HAVE_cc0 && i0 == 0
3021 && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3022 && can_split_parallel_of_n_reg_sets (i2, 2)
3023 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3024 && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3025 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3026 && !reg_set_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3027 {
3028 /* If there is no I1, there is no I0 either. */
3029 i0 = i1;
3030
3031 /* We make I1 with the same INSN_UID as I2. This gives it
3032 the same DF_INSN_LUID for value tracking. Our fake I1 will
3033 never appear in the insn stream so giving it the same INSN_UID
3034 as I2 will not cause a problem. */
3035
3036 i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3037 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3038 -1, NULL_RTX);
3039 INSN_UID (i1) = INSN_UID (i2);
3040
3041 SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3042 }
3043
3044 /* Verify that I2 and maybe I1 and I0 can be combined into I3. */
3045 if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3046 {
3047 if (dump_file)
3048 fprintf (dump_file, "Can't combine i2 into i3\n");
3049 undo_all ();
3050 return 0;
3051 }
3052 if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3053 {
3054 if (dump_file)
3055 fprintf (dump_file, "Can't combine i1 into i3\n");
3056 undo_all ();
3057 return 0;
3058 }
3059 if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3060 {
3061 if (dump_file)
3062 fprintf (dump_file, "Can't combine i0 into i3\n");
3063 undo_all ();
3064 return 0;
3065 }
3066
3067 /* Record whether I2DEST is used in I2SRC and similarly for the other
3068 cases. Knowing this will help in register status updating below. */
3069 i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3070 i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3071 i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3072 i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3073 i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3074 i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3075 i2dest_killed = dead_or_set_p (i2, i2dest);
3076 i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3077 i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3078
3079 /* For the earlier insns, determine which of the subsequent ones they
3080 feed. */
3081 i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3082 i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3083 i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3084 : (!reg_overlap_mentioned_p (i1dest, i0dest)
3085 && reg_overlap_mentioned_p (i0dest, i2src))));
3086
3087 /* Ensure that I3's pattern can be the destination of combines. */
3088 if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3089 i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3090 i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3091 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3092 &i3dest_killed))
3093 {
3094 undo_all ();
3095 return 0;
3096 }
3097
3098 /* See if any of the insns is a MULT operation. Unless one is, we will
3099 reject a combination that is, since it must be slower. Be conservative
3100 here. */
3101 if (GET_CODE (i2src) == MULT
3102 || (i1 != 0 && GET_CODE (i1src) == MULT)
3103 || (i0 != 0 && GET_CODE (i0src) == MULT)
3104 || (GET_CODE (PATTERN (i3)) == SET
3105 && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3106 have_mult = 1;
3107
3108 /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3109 We used to do this EXCEPT in one case: I3 has a post-inc in an
3110 output operand. However, that exception can give rise to insns like
3111 mov r3,(r3)+
3112 which is a famous insn on the PDP-11 where the value of r3 used as the
3113 source was model-dependent. Avoid this sort of thing. */
3114
3115 #if 0
3116 if (!(GET_CODE (PATTERN (i3)) == SET
3117 && REG_P (SET_SRC (PATTERN (i3)))
3118 && MEM_P (SET_DEST (PATTERN (i3)))
3119 && (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3120 || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3121 /* It's not the exception. */
3122 #endif
3123 if (AUTO_INC_DEC)
3124 {
3125 rtx link;
3126 for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3127 if (REG_NOTE_KIND (link) == REG_INC
3128 && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3129 || (i1 != 0
3130 && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3131 {
3132 undo_all ();
3133 return 0;
3134 }
3135 }
3136
3137 /* See if the SETs in I1 or I2 need to be kept around in the merged
3138 instruction: whenever the value set there is still needed past I3.
3139 For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3140
3141 For the SET in I1, we have two cases: if I1 and I2 independently feed
3142 into I3, the set in I1 needs to be kept around unless I1DEST dies
3143 or is set in I3. Otherwise (if I1 feeds I2 which feeds I3), the set
3144 in I1 needs to be kept around unless I1DEST dies or is set in either
3145 I2 or I3. The same considerations apply to I0. */
3146
3147 added_sets_2 = !dead_or_set_p (i3, i2dest);
3148
3149 if (i1)
3150 added_sets_1 = !(dead_or_set_p (i3, i1dest)
3151 || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3152 else
3153 added_sets_1 = 0;
3154
3155 if (i0)
3156 added_sets_0 = !(dead_or_set_p (i3, i0dest)
3157 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3158 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3159 && dead_or_set_p (i2, i0dest)));
3160 else
3161 added_sets_0 = 0;
3162
3163 /* We are about to copy insns for the case where they need to be kept
3164 around. Check that they can be copied in the merged instruction. */
3165
3166 if (targetm.cannot_copy_insn_p
3167 && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3168 || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3169 || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3170 {
3171 undo_all ();
3172 return 0;
3173 }
3174
3175 /* If the set in I2 needs to be kept around, we must make a copy of
3176 PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3177 PATTERN (I2), we are only substituting for the original I1DEST, not into
3178 an already-substituted copy. This also prevents making self-referential
3179 rtx. If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3180 I2DEST. */
3181
3182 if (added_sets_2)
3183 {
3184 if (GET_CODE (PATTERN (i2)) == PARALLEL)
3185 i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3186 else
3187 i2pat = copy_rtx (PATTERN (i2));
3188 }
3189
3190 if (added_sets_1)
3191 {
3192 if (GET_CODE (PATTERN (i1)) == PARALLEL)
3193 i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3194 else
3195 i1pat = copy_rtx (PATTERN (i1));
3196 }
3197
3198 if (added_sets_0)
3199 {
3200 if (GET_CODE (PATTERN (i0)) == PARALLEL)
3201 i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3202 else
3203 i0pat = copy_rtx (PATTERN (i0));
3204 }
3205
3206 combine_merges++;
3207
3208 /* Substitute in the latest insn for the regs set by the earlier ones. */
3209
3210 maxreg = max_reg_num ();
3211
3212 subst_insn = i3;
3213
3214 /* Many machines that don't use CC0 have insns that can both perform an
3215 arithmetic operation and set the condition code. These operations will
3216 be represented as a PARALLEL with the first element of the vector
3217 being a COMPARE of an arithmetic operation with the constant zero.
3218 The second element of the vector will set some pseudo to the result
3219 of the same arithmetic operation. If we simplify the COMPARE, we won't
3220 match such a pattern and so will generate an extra insn. Here we test
3221 for this case, where both the comparison and the operation result are
3222 needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3223 I2SRC. Later we will make the PARALLEL that contains I2. */
3224
3225 if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3226 && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3227 && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3228 && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3229 {
3230 rtx newpat_dest;
3231 rtx *cc_use_loc = NULL;
3232 rtx_insn *cc_use_insn = NULL;
3233 rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3234 machine_mode compare_mode, orig_compare_mode;
3235 enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3236 scalar_int_mode mode;
3237
3238 newpat = PATTERN (i3);
3239 newpat_dest = SET_DEST (newpat);
3240 compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3241
3242 if (undobuf.other_insn == 0
3243 && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3244 &cc_use_insn)))
3245 {
3246 compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3247 if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3248 compare_code = simplify_compare_const (compare_code, mode,
3249 op0, &op1);
3250 target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3251 }
3252
3253 /* Do the rest only if op1 is const0_rtx, which may be the
3254 result of simplification. */
3255 if (op1 == const0_rtx)
3256 {
3257 /* If a single use of the CC is found, prepare to modify it
3258 when SELECT_CC_MODE returns a new CC-class mode, or when
3259 the above simplify_compare_const() returned a new comparison
3260 operator. undobuf.other_insn is assigned the CC use insn
3261 when modifying it. */
3262 if (cc_use_loc)
3263 {
3264 #ifdef SELECT_CC_MODE
3265 machine_mode new_mode
3266 = SELECT_CC_MODE (compare_code, op0, op1);
3267 if (new_mode != orig_compare_mode
3268 && can_change_dest_mode (SET_DEST (newpat),
3269 added_sets_2, new_mode))
3270 {
3271 unsigned int regno = REGNO (newpat_dest);
3272 compare_mode = new_mode;
3273 if (regno < FIRST_PSEUDO_REGISTER)
3274 newpat_dest = gen_rtx_REG (compare_mode, regno);
3275 else
3276 {
3277 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3278 newpat_dest = regno_reg_rtx[regno];
3279 }
3280 }
3281 #endif
3282 /* Cases for modifying the CC-using comparison. */
3283 if (compare_code != orig_compare_code
3284 /* ??? Do we need to verify the zero rtx? */
3285 && XEXP (*cc_use_loc, 1) == const0_rtx)
3286 {
3287 /* Replace cc_use_loc with entire new RTX. */
3288 SUBST (*cc_use_loc,
3289 gen_rtx_fmt_ee (compare_code, compare_mode,
3290 newpat_dest, const0_rtx));
3291 undobuf.other_insn = cc_use_insn;
3292 }
3293 else if (compare_mode != orig_compare_mode)
3294 {
3295 /* Just replace the CC reg with a new mode. */
3296 SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3297 undobuf.other_insn = cc_use_insn;
3298 }
3299 }
3300
3301 /* Now we modify the current newpat:
3302 First, SET_DEST(newpat) is updated if the CC mode has been
3303 altered. For targets without SELECT_CC_MODE, this should be
3304 optimized away. */
3305 if (compare_mode != orig_compare_mode)
3306 SUBST (SET_DEST (newpat), newpat_dest);
3307 /* This is always done to propagate i2src into newpat. */
3308 SUBST (SET_SRC (newpat),
3309 gen_rtx_COMPARE (compare_mode, op0, op1));
3310 /* Create new version of i2pat if needed; the below PARALLEL
3311 creation needs this to work correctly. */
3312 if (! rtx_equal_p (i2src, op0))
3313 i2pat = gen_rtx_SET (i2dest, op0);
3314 i2_is_used = 1;
3315 }
3316 }
3317
3318 if (i2_is_used == 0)
3319 {
3320 /* It is possible that the source of I2 or I1 may be performing
3321 an unneeded operation, such as a ZERO_EXTEND of something
3322 that is known to have the high part zero. Handle that case
3323 by letting subst look at the inner insns.
3324
3325 Another way to do this would be to have a function that tries
3326 to simplify a single insn instead of merging two or more
3327 insns. We don't do this because of the potential of infinite
3328 loops and because of the potential extra memory required.
3329 However, doing it the way we are is a bit of a kludge and
3330 doesn't catch all cases.
3331
3332 But only do this if -fexpensive-optimizations since it slows
3333 things down and doesn't usually win.
3334
3335 This is not done in the COMPARE case above because the
3336 unmodified I2PAT is used in the PARALLEL and so a pattern
3337 with a modified I2SRC would not match. */
3338
3339 if (flag_expensive_optimizations)
3340 {
3341 /* Pass pc_rtx so no substitutions are done, just
3342 simplifications. */
3343 if (i1)
3344 {
3345 subst_low_luid = DF_INSN_LUID (i1);
3346 i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3347 }
3348
3349 subst_low_luid = DF_INSN_LUID (i2);
3350 i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3351 }
3352
3353 n_occurrences = 0; /* `subst' counts here */
3354 subst_low_luid = DF_INSN_LUID (i2);
3355
3356 /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3357 copy of I2SRC each time we substitute it, in order to avoid creating
3358 self-referential RTL when we will be substituting I1SRC for I1DEST
3359 later. Likewise if I0 feeds into I2, either directly or indirectly
3360 through I1, and I0DEST is in I0SRC. */
3361 newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3362 (i1_feeds_i2_n && i1dest_in_i1src)
3363 || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3364 && i0dest_in_i0src));
3365 substed_i2 = 1;
3366
3367 /* Record whether I2's body now appears within I3's body. */
3368 i2_is_used = n_occurrences;
3369 }
3370
3371 /* If we already got a failure, don't try to do more. Otherwise, try to
3372 substitute I1 if we have it. */
3373
3374 if (i1 && GET_CODE (newpat) != CLOBBER)
3375 {
3376 /* Check that an autoincrement side-effect on I1 has not been lost.
3377 This happens if I1DEST is mentioned in I2 and dies there, and
3378 has disappeared from the new pattern. */
3379 if ((FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3380 && i1_feeds_i2_n
3381 && dead_or_set_p (i2, i1dest)
3382 && !reg_overlap_mentioned_p (i1dest, newpat))
3383 /* Before we can do this substitution, we must redo the test done
3384 above (see detailed comments there) that ensures I1DEST isn't
3385 mentioned in any SETs in NEWPAT that are field assignments. */
3386 || !combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3387 0, 0, 0))
3388 {
3389 undo_all ();
3390 return 0;
3391 }
3392
3393 n_occurrences = 0;
3394 subst_low_luid = DF_INSN_LUID (i1);
3395
3396 /* If the following substitution will modify I1SRC, make a copy of it
3397 for the case where it is substituted for I1DEST in I2PAT later. */
3398 if (added_sets_2 && i1_feeds_i2_n)
3399 i1src_copy = copy_rtx (i1src);
3400
3401 /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3402 copy of I1SRC each time we substitute it, in order to avoid creating
3403 self-referential RTL when we will be substituting I0SRC for I0DEST
3404 later. */
3405 newpat = subst (newpat, i1dest, i1src, 0, 0,
3406 i0_feeds_i1_n && i0dest_in_i0src);
3407 substed_i1 = 1;
3408
3409 /* Record whether I1's body now appears within I3's body. */
3410 i1_is_used = n_occurrences;
3411 }
3412
3413 /* Likewise for I0 if we have it. */
3414
3415 if (i0 && GET_CODE (newpat) != CLOBBER)
3416 {
3417 if ((FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3418 && ((i0_feeds_i2_n && dead_or_set_p (i2, i0dest))
3419 || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest)))
3420 && !reg_overlap_mentioned_p (i0dest, newpat))
3421 || !combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3422 0, 0, 0))
3423 {
3424 undo_all ();
3425 return 0;
3426 }
3427
3428 /* If the following substitution will modify I0SRC, make a copy of it
3429 for the case where it is substituted for I0DEST in I1PAT later. */
3430 if (added_sets_1 && i0_feeds_i1_n)
3431 i0src_copy = copy_rtx (i0src);
3432 /* And a copy for I0DEST in I2PAT substitution. */
3433 if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3434 || (i0_feeds_i2_n)))
3435 i0src_copy2 = copy_rtx (i0src);
3436
3437 n_occurrences = 0;
3438 subst_low_luid = DF_INSN_LUID (i0);
3439 newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3440 substed_i0 = 1;
3441 }
3442
3443 /* Fail if an autoincrement side-effect has been duplicated. Be careful
3444 to count all the ways that I2SRC and I1SRC can be used. */
3445 if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3446 && i2_is_used + added_sets_2 > 1)
3447 || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3448 && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3449 > 1))
3450 || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3451 && (n_occurrences + added_sets_0
3452 + (added_sets_1 && i0_feeds_i1_n)
3453 + (added_sets_2 && i0_feeds_i2_n)
3454 > 1))
3455 /* Fail if we tried to make a new register. */
3456 || max_reg_num () != maxreg
3457 /* Fail if we couldn't do something and have a CLOBBER. */
3458 || GET_CODE (newpat) == CLOBBER
3459 /* Fail if this new pattern is a MULT and we didn't have one before
3460 at the outer level. */
3461 || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3462 && ! have_mult))
3463 {
3464 undo_all ();
3465 return 0;
3466 }
3467
3468 /* If the actions of the earlier insns must be kept
3469 in addition to substituting them into the latest one,
3470 we must make a new PARALLEL for the latest insn
3471 to hold additional the SETs. */
3472
3473 if (added_sets_0 || added_sets_1 || added_sets_2)
3474 {
3475 int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3476 combine_extras++;
3477
3478 if (GET_CODE (newpat) == PARALLEL)
3479 {
3480 rtvec old = XVEC (newpat, 0);
3481 total_sets = XVECLEN (newpat, 0) + extra_sets;
3482 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3483 memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3484 sizeof (old->elem[0]) * old->num_elem);
3485 }
3486 else
3487 {
3488 rtx old = newpat;
3489 total_sets = 1 + extra_sets;
3490 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3491 XVECEXP (newpat, 0, 0) = old;
3492 }
3493
3494 if (added_sets_0)
3495 XVECEXP (newpat, 0, --total_sets) = i0pat;
3496
3497 if (added_sets_1)
3498 {
3499 rtx t = i1pat;
3500 if (i0_feeds_i1_n)
3501 t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3502
3503 XVECEXP (newpat, 0, --total_sets) = t;
3504 }
3505 if (added_sets_2)
3506 {
3507 rtx t = i2pat;
3508 if (i1_feeds_i2_n)
3509 t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3510 i0_feeds_i1_n && i0dest_in_i0src);
3511 if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3512 t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3513
3514 XVECEXP (newpat, 0, --total_sets) = t;
3515 }
3516 }
3517
3518 validate_replacement:
3519
3520 /* Note which hard regs this insn has as inputs. */
3521 mark_used_regs_combine (newpat);
3522
3523 /* If recog_for_combine fails, it strips existing clobbers. If we'll
3524 consider splitting this pattern, we might need these clobbers. */
3525 if (i1 && GET_CODE (newpat) == PARALLEL
3526 && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3527 {
3528 int len = XVECLEN (newpat, 0);
3529
3530 newpat_vec_with_clobbers = rtvec_alloc (len);
3531 for (i = 0; i < len; i++)
3532 RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3533 }
3534
3535 /* We have recognized nothing yet. */
3536 insn_code_number = -1;
3537
3538 /* See if this is a PARALLEL of two SETs where one SET's destination is
3539 a register that is unused and this isn't marked as an instruction that
3540 might trap in an EH region. In that case, we just need the other SET.
3541 We prefer this over the PARALLEL.
3542
3543 This can occur when simplifying a divmod insn. We *must* test for this
3544 case here because the code below that splits two independent SETs doesn't
3545 handle this case correctly when it updates the register status.
3546
3547 It's pointless doing this if we originally had two sets, one from
3548 i3, and one from i2. Combining then splitting the parallel results
3549 in the original i2 again plus an invalid insn (which we delete).
3550 The net effect is only to move instructions around, which makes
3551 debug info less accurate.
3552
3553 If the remaining SET came from I2 its destination should not be used
3554 between I2 and I3. See PR82024. */
3555
3556 if (!(added_sets_2 && i1 == 0)
3557 && is_parallel_of_n_reg_sets (newpat, 2)
3558 && asm_noperands (newpat) < 0)
3559 {
3560 rtx set0 = XVECEXP (newpat, 0, 0);
3561 rtx set1 = XVECEXP (newpat, 0, 1);
3562 rtx oldpat = newpat;
3563
3564 if (((REG_P (SET_DEST (set1))
3565 && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3566 || (GET_CODE (SET_DEST (set1)) == SUBREG
3567 && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3568 && insn_nothrow_p (i3)
3569 && !side_effects_p (SET_SRC (set1)))
3570 {
3571 newpat = set0;
3572 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3573 }
3574
3575 else if (((REG_P (SET_DEST (set0))
3576 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3577 || (GET_CODE (SET_DEST (set0)) == SUBREG
3578 && find_reg_note (i3, REG_UNUSED,
3579 SUBREG_REG (SET_DEST (set0)))))
3580 && insn_nothrow_p (i3)
3581 && !side_effects_p (SET_SRC (set0)))
3582 {
3583 rtx dest = SET_DEST (set1);
3584 if (GET_CODE (dest) == SUBREG)
3585 dest = SUBREG_REG (dest);
3586 if (!reg_used_between_p (dest, i2, i3))
3587 {
3588 newpat = set1;
3589 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3590
3591 if (insn_code_number >= 0)
3592 changed_i3_dest = 1;
3593 }
3594 }
3595
3596 if (insn_code_number < 0)
3597 newpat = oldpat;
3598 }
3599
3600 /* Is the result of combination a valid instruction? */
3601 if (insn_code_number < 0)
3602 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3603
3604 /* If we were combining three insns and the result is a simple SET
3605 with no ASM_OPERANDS that wasn't recognized, try to split it into two
3606 insns. There are two ways to do this. It can be split using a
3607 machine-specific method (like when you have an addition of a large
3608 constant) or by combine in the function find_split_point. */
3609
3610 if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3611 && asm_noperands (newpat) < 0)
3612 {
3613 rtx parallel, *split;
3614 rtx_insn *m_split_insn;
3615
3616 /* See if the MD file can split NEWPAT. If it can't, see if letting it
3617 use I2DEST as a scratch register will help. In the latter case,
3618 convert I2DEST to the mode of the source of NEWPAT if we can. */
3619
3620 m_split_insn = combine_split_insns (newpat, i3);
3621
3622 /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3623 inputs of NEWPAT. */
3624
3625 /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3626 possible to try that as a scratch reg. This would require adding
3627 more code to make it work though. */
3628
3629 if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3630 {
3631 machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3632
3633 /* ??? Reusing i2dest without resetting the reg_stat entry for it
3634 (temporarily, until we are committed to this instruction
3635 combination) does not work: for example, any call to nonzero_bits
3636 on the register (from a splitter in the MD file, for example)
3637 will get the old information, which is invalid.
3638
3639 Since nowadays we can create registers during combine just fine,
3640 we should just create a new one here, not reuse i2dest. */
3641
3642 /* First try to split using the original register as a
3643 scratch register. */
3644 parallel = gen_rtx_PARALLEL (VOIDmode,
3645 gen_rtvec (2, newpat,
3646 gen_rtx_CLOBBER (VOIDmode,
3647 i2dest)));
3648 m_split_insn = combine_split_insns (parallel, i3);
3649
3650 /* If that didn't work, try changing the mode of I2DEST if
3651 we can. */
3652 if (m_split_insn == 0
3653 && new_mode != GET_MODE (i2dest)
3654 && new_mode != VOIDmode
3655 && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3656 {
3657 machine_mode old_mode = GET_MODE (i2dest);
3658 rtx ni2dest;
3659
3660 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3661 ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3662 else
3663 {
3664 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3665 ni2dest = regno_reg_rtx[REGNO (i2dest)];
3666 }
3667
3668 parallel = (gen_rtx_PARALLEL
3669 (VOIDmode,
3670 gen_rtvec (2, newpat,
3671 gen_rtx_CLOBBER (VOIDmode,
3672 ni2dest))));
3673 m_split_insn = combine_split_insns (parallel, i3);
3674
3675 if (m_split_insn == 0
3676 && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3677 {
3678 struct undo *buf;
3679
3680 adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3681 buf = undobuf.undos;
3682 undobuf.undos = buf->next;
3683 buf->next = undobuf.frees;
3684 undobuf.frees = buf;
3685 }
3686 }
3687
3688 i2scratch = m_split_insn != 0;
3689 }
3690
3691 /* If recog_for_combine has discarded clobbers, try to use them
3692 again for the split. */
3693 if (m_split_insn == 0 && newpat_vec_with_clobbers)
3694 {
3695 parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3696 m_split_insn = combine_split_insns (parallel, i3);
3697 }
3698
3699 if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3700 {
3701 rtx m_split_pat = PATTERN (m_split_insn);
3702 insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3703 if (insn_code_number >= 0)
3704 newpat = m_split_pat;
3705 }
3706 else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3707 && (next_nonnote_nondebug_insn (i2) == i3
3708 || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3709 {
3710 rtx i2set, i3set;
3711 rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3712 newi2pat = PATTERN (m_split_insn);
3713
3714 i3set = single_set (NEXT_INSN (m_split_insn));
3715 i2set = single_set (m_split_insn);
3716
3717 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3718
3719 /* If I2 or I3 has multiple SETs, we won't know how to track
3720 register status, so don't use these insns. If I2's destination
3721 is used between I2 and I3, we also can't use these insns. */
3722
3723 if (i2_code_number >= 0 && i2set && i3set
3724 && (next_nonnote_nondebug_insn (i2) == i3
3725 || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3726 insn_code_number = recog_for_combine (&newi3pat, i3,
3727 &new_i3_notes);
3728 if (insn_code_number >= 0)
3729 newpat = newi3pat;
3730
3731 /* It is possible that both insns now set the destination of I3.
3732 If so, we must show an extra use of it. */
3733
3734 if (insn_code_number >= 0)
3735 {
3736 rtx new_i3_dest = SET_DEST (i3set);
3737 rtx new_i2_dest = SET_DEST (i2set);
3738
3739 while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3740 || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3741 || GET_CODE (new_i3_dest) == SUBREG)
3742 new_i3_dest = XEXP (new_i3_dest, 0);
3743
3744 while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3745 || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3746 || GET_CODE (new_i2_dest) == SUBREG)
3747 new_i2_dest = XEXP (new_i2_dest, 0);
3748
3749 if (REG_P (new_i3_dest)
3750 && REG_P (new_i2_dest)
3751 && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3752 && REGNO (new_i2_dest) < reg_n_sets_max)
3753 INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3754 }
3755 }
3756
3757 /* If we can split it and use I2DEST, go ahead and see if that
3758 helps things be recognized. Verify that none of the registers
3759 are set between I2 and I3. */
3760 if (insn_code_number < 0
3761 && (split = find_split_point (&newpat, i3, false)) != 0
3762 && (!HAVE_cc0 || REG_P (i2dest))
3763 /* We need I2DEST in the proper mode. If it is a hard register
3764 or the only use of a pseudo, we can change its mode.
3765 Make sure we don't change a hard register to have a mode that
3766 isn't valid for it, or change the number of registers. */
3767 && (GET_MODE (*split) == GET_MODE (i2dest)
3768 || GET_MODE (*split) == VOIDmode
3769 || can_change_dest_mode (i2dest, added_sets_2,
3770 GET_MODE (*split)))
3771 && (next_nonnote_nondebug_insn (i2) == i3
3772 || !modified_between_p (*split, i2, i3))
3773 /* We can't overwrite I2DEST if its value is still used by
3774 NEWPAT. */
3775 && ! reg_referenced_p (i2dest, newpat))
3776 {
3777 rtx newdest = i2dest;
3778 enum rtx_code split_code = GET_CODE (*split);
3779 machine_mode split_mode = GET_MODE (*split);
3780 bool subst_done = false;
3781 newi2pat = NULL_RTX;
3782
3783 i2scratch = true;
3784
3785 /* *SPLIT may be part of I2SRC, so make sure we have the
3786 original expression around for later debug processing.
3787 We should not need I2SRC any more in other cases. */
3788 if (MAY_HAVE_DEBUG_BIND_INSNS)
3789 i2src = copy_rtx (i2src);
3790 else
3791 i2src = NULL;
3792
3793 /* Get NEWDEST as a register in the proper mode. We have already
3794 validated that we can do this. */
3795 if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3796 {
3797 if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3798 newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3799 else
3800 {
3801 SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3802 newdest = regno_reg_rtx[REGNO (i2dest)];
3803 }
3804 }
3805
3806 /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3807 an ASHIFT. This can occur if it was inside a PLUS and hence
3808 appeared to be a memory address. This is a kludge. */
3809 if (split_code == MULT
3810 && CONST_INT_P (XEXP (*split, 1))
3811 && INTVAL (XEXP (*split, 1)) > 0
3812 && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3813 {
3814 rtx i_rtx = gen_int_shift_amount (split_mode, i);
3815 SUBST (*split, gen_rtx_ASHIFT (split_mode,
3816 XEXP (*split, 0), i_rtx));
3817 /* Update split_code because we may not have a multiply
3818 anymore. */
3819 split_code = GET_CODE (*split);
3820 }
3821
3822 /* Similarly for (plus (mult FOO (const_int pow2))). */
3823 if (split_code == PLUS
3824 && GET_CODE (XEXP (*split, 0)) == MULT
3825 && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3826 && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3827 && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3828 {
3829 rtx nsplit = XEXP (*split, 0);
3830 rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3831 SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3832 XEXP (nsplit, 0),
3833 i_rtx));
3834 /* Update split_code because we may not have a multiply
3835 anymore. */
3836 split_code = GET_CODE (*split);
3837 }
3838
3839 #ifdef INSN_SCHEDULING
3840 /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3841 be written as a ZERO_EXTEND. */
3842 if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3843 {
3844 /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3845 what it really is. */
3846 if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3847 == SIGN_EXTEND)
3848 SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3849 SUBREG_REG (*split)));
3850 else
3851 SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3852 SUBREG_REG (*split)));
3853 }
3854 #endif
3855
3856 /* Attempt to split binary operators using arithmetic identities. */
3857 if (BINARY_P (SET_SRC (newpat))
3858 && split_mode == GET_MODE (SET_SRC (newpat))
3859 && ! side_effects_p (SET_SRC (newpat)))
3860 {
3861 rtx setsrc = SET_SRC (newpat);
3862 machine_mode mode = GET_MODE (setsrc);
3863 enum rtx_code code = GET_CODE (setsrc);
3864 rtx src_op0 = XEXP (setsrc, 0);
3865 rtx src_op1 = XEXP (setsrc, 1);
3866
3867 /* Split "X = Y op Y" as "Z = Y; X = Z op Z". */
3868 if (rtx_equal_p (src_op0, src_op1))
3869 {
3870 newi2pat = gen_rtx_SET (newdest, src_op0);
3871 SUBST (XEXP (setsrc, 0), newdest);
3872 SUBST (XEXP (setsrc, 1), newdest);
3873 subst_done = true;
3874 }
3875 /* Split "((P op Q) op R) op S" where op is PLUS or MULT. */
3876 else if ((code == PLUS || code == MULT)
3877 && GET_CODE (src_op0) == code
3878 && GET_CODE (XEXP (src_op0, 0)) == code
3879 && (INTEGRAL_MODE_P (mode)
3880 || (FLOAT_MODE_P (mode)
3881 && flag_unsafe_math_optimizations)))
3882 {
3883 rtx p = XEXP (XEXP (src_op0, 0), 0);
3884 rtx q = XEXP (XEXP (src_op0, 0), 1);
3885 rtx r = XEXP (src_op0, 1);
3886 rtx s = src_op1;
3887
3888 /* Split both "((X op Y) op X) op Y" and
3889 "((X op Y) op Y) op X" as "T op T" where T is
3890 "X op Y". */
3891 if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3892 || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3893 {
3894 newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3895 SUBST (XEXP (setsrc, 0), newdest);
3896 SUBST (XEXP (setsrc, 1), newdest);
3897 subst_done = true;
3898 }
3899 /* Split "((X op X) op Y) op Y)" as "T op T" where
3900 T is "X op Y". */
3901 else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3902 {
3903 rtx tmp = simplify_gen_binary (code, mode, p, r);
3904 newi2pat = gen_rtx_SET (newdest, tmp);
3905 SUBST (XEXP (setsrc, 0), newdest);
3906 SUBST (XEXP (setsrc, 1), newdest);
3907 subst_done = true;
3908 }
3909 }
3910 }
3911
3912 if (!subst_done)
3913 {
3914 newi2pat = gen_rtx_SET (newdest, *split);
3915 SUBST (*split, newdest);
3916 }
3917
3918 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3919
3920 /* recog_for_combine might have added CLOBBERs to newi2pat.
3921 Make sure NEWPAT does not depend on the clobbered regs. */
3922 if (GET_CODE (newi2pat) == PARALLEL)
3923 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3924 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3925 {
3926 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3927 if (reg_overlap_mentioned_p (reg, newpat))
3928 {
3929 undo_all ();
3930 return 0;
3931 }
3932 }
3933
3934 /* If the split point was a MULT and we didn't have one before,
3935 don't use one now. */
3936 if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3937 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3938 }
3939 }
3940
3941 /* Check for a case where we loaded from memory in a narrow mode and
3942 then sign extended it, but we need both registers. In that case,
3943 we have a PARALLEL with both loads from the same memory location.
3944 We can split this into a load from memory followed by a register-register
3945 copy. This saves at least one insn, more if register allocation can
3946 eliminate the copy.
3947
3948 We cannot do this if the destination of the first assignment is a
3949 condition code register or cc0. We eliminate this case by making sure
3950 the SET_DEST and SET_SRC have the same mode.
3951
3952 We cannot do this if the destination of the second assignment is
3953 a register that we have already assumed is zero-extended. Similarly
3954 for a SUBREG of such a register. */
3955
3956 else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
3957 && GET_CODE (newpat) == PARALLEL
3958 && XVECLEN (newpat, 0) == 2
3959 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
3960 && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
3961 && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
3962 == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
3963 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
3964 && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
3965 XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
3966 && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
3967 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
3968 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
3969 && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
3970 (REG_P (temp_expr)
3971 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3972 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3973 BITS_PER_WORD)
3974 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3975 HOST_BITS_PER_INT)
3976 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3977 != GET_MODE_MASK (word_mode))))
3978 && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
3979 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
3980 (REG_P (temp_expr)
3981 && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
3982 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3983 BITS_PER_WORD)
3984 && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
3985 HOST_BITS_PER_INT)
3986 && (reg_stat[REGNO (temp_expr)].nonzero_bits
3987 != GET_MODE_MASK (word_mode)))))
3988 && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
3989 SET_SRC (XVECEXP (newpat, 0, 1)))
3990 && ! find_reg_note (i3, REG_UNUSED,
3991 SET_DEST (XVECEXP (newpat, 0, 0))))
3992 {
3993 rtx ni2dest;
3994
3995 newi2pat = XVECEXP (newpat, 0, 0);
3996 ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
3997 newpat = XVECEXP (newpat, 0, 1);
3998 SUBST (SET_SRC (newpat),
3999 gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4000 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4001
4002 if (i2_code_number >= 0)
4003 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4004
4005 if (insn_code_number >= 0)
4006 swap_i2i3 = 1;
4007 }
4008
4009 /* Similarly, check for a case where we have a PARALLEL of two independent
4010 SETs but we started with three insns. In this case, we can do the sets
4011 as two separate insns. This case occurs when some SET allows two
4012 other insns to combine, but the destination of that SET is still live.
4013
4014 Also do this if we started with two insns and (at least) one of the
4015 resulting sets is a noop; this noop will be deleted later.
4016
4017 Also do this if we started with two insns neither of which was a simple
4018 move. */
4019
4020 else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4021 && GET_CODE (newpat) == PARALLEL
4022 && XVECLEN (newpat, 0) == 2
4023 && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4024 && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4025 && (i1 || set_noop_p (XVECEXP (newpat, 0, 0))
4026 || set_noop_p (XVECEXP (newpat, 0, 1)))
4027 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4028 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4029 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4030 && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4031 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4032 XVECEXP (newpat, 0, 0))
4033 && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4034 XVECEXP (newpat, 0, 1))
4035 && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4036 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4037 {
4038 rtx set0 = XVECEXP (newpat, 0, 0);
4039 rtx set1 = XVECEXP (newpat, 0, 1);
4040
4041 /* Normally, it doesn't matter which of the two is done first,
4042 but the one that references cc0 can't be the second, and
4043 one which uses any regs/memory set in between i2 and i3 can't
4044 be first. The PARALLEL might also have been pre-existing in i3,
4045 so we need to make sure that we won't wrongly hoist a SET to i2
4046 that would conflict with a death note present in there, or would
4047 have its dest modified between i2 and i3. */
4048 if (!modified_between_p (SET_SRC (set1), i2, i3)
4049 && !(REG_P (SET_DEST (set1))
4050 && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4051 && !(GET_CODE (SET_DEST (set1)) == SUBREG
4052 && find_reg_note (i2, REG_DEAD,
4053 SUBREG_REG (SET_DEST (set1))))
4054 && !modified_between_p (SET_DEST (set1), i2, i3)
4055 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4056 /* If I3 is a jump, ensure that set0 is a jump so that
4057 we do not create invalid RTL. */
4058 && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4059 )
4060 {
4061 newi2pat = set1;
4062 newpat = set0;
4063 }
4064 else if (!modified_between_p (SET_SRC (set0), i2, i3)
4065 && !(REG_P (SET_DEST (set0))
4066 && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4067 && !(GET_CODE (SET_DEST (set0)) == SUBREG
4068 && find_reg_note (i2, REG_DEAD,
4069 SUBREG_REG (SET_DEST (set0))))
4070 && !modified_between_p (SET_DEST (set0), i2, i3)
4071 && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4072 /* If I3 is a jump, ensure that set1 is a jump so that
4073 we do not create invalid RTL. */
4074 && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4075 )
4076 {
4077 newi2pat = set0;
4078 newpat = set1;
4079 }
4080 else
4081 {
4082 undo_all ();
4083 return 0;
4084 }
4085
4086 i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4087
4088 if (i2_code_number >= 0)
4089 {
4090 /* recog_for_combine might have added CLOBBERs to newi2pat.
4091 Make sure NEWPAT does not depend on the clobbered regs. */
4092 if (GET_CODE (newi2pat) == PARALLEL)
4093 {
4094 for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4095 if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4096 {
4097 rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4098 if (reg_overlap_mentioned_p (reg, newpat))
4099 {
4100 undo_all ();
4101 return 0;
4102 }
4103 }
4104 }
4105
4106 insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4107
4108 if (insn_code_number >= 0)
4109 split_i2i3 = 1;
4110 }
4111 }
4112
4113 /* If it still isn't recognized, fail and change things back the way they
4114 were. */
4115 if ((insn_code_number < 0
4116 /* Is the result a reasonable ASM_OPERANDS? */
4117 && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4118 {
4119 undo_all ();
4120 return 0;
4121 }
4122
4123 /* If we had to change another insn, make sure it is valid also. */
4124 if (undobuf.other_insn)
4125 {
4126 CLEAR_HARD_REG_SET (newpat_used_regs);
4127
4128 other_pat = PATTERN (undobuf.other_insn);
4129 other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4130 &new_other_notes);
4131
4132 if (other_code_number < 0 && ! check_asm_operands (other_pat))
4133 {
4134 undo_all ();
4135 return 0;
4136 }
4137 }
4138
4139 /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4140 they are adjacent to each other or not. */
4141 if (HAVE_cc0)
4142 {
4143 rtx_insn *p = prev_nonnote_insn (i3);
4144 if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4145 && sets_cc0_p (newi2pat))
4146 {
4147 undo_all ();
4148 return 0;
4149 }
4150 }
4151
4152 /* Only allow this combination if insn_cost reports that the
4153 replacement instructions are cheaper than the originals. */
4154 if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4155 {
4156 undo_all ();
4157 return 0;
4158 }
4159
4160 if (MAY_HAVE_DEBUG_BIND_INSNS)
4161 {
4162 struct undo *undo;
4163
4164 for (undo = undobuf.undos; undo; undo = undo->next)
4165 if (undo->kind == UNDO_MODE)
4166 {
4167 rtx reg = *undo->where.r;
4168 machine_mode new_mode = GET_MODE (reg);
4169 machine_mode old_mode = undo->old_contents.m;
4170
4171 /* Temporarily revert mode back. */
4172 adjust_reg_mode (reg, old_mode);
4173
4174 if (reg == i2dest && i2scratch)
4175 {
4176 /* If we used i2dest as a scratch register with a
4177 different mode, substitute it for the original
4178 i2src while its original mode is temporarily
4179 restored, and then clear i2scratch so that we don't
4180 do it again later. */
4181 propagate_for_debug (i2, last_combined_insn, reg, i2src,
4182 this_basic_block);
4183 i2scratch = false;
4184 /* Put back the new mode. */
4185 adjust_reg_mode (reg, new_mode);
4186 }
4187 else
4188 {
4189 rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4190 rtx_insn *first, *last;
4191
4192 if (reg == i2dest)
4193 {
4194 first = i2;
4195 last = last_combined_insn;
4196 }
4197 else
4198 {
4199 first = i3;
4200 last = undobuf.other_insn;
4201 gcc_assert (last);
4202 if (DF_INSN_LUID (last)
4203 < DF_INSN_LUID (last_combined_insn))
4204 last = last_combined_insn;
4205 }
4206
4207 /* We're dealing with a reg that changed mode but not
4208 meaning, so we want to turn it into a subreg for
4209 the new mode. However, because of REG sharing and
4210 because its mode had already changed, we have to do
4211 it in two steps. First, replace any debug uses of
4212 reg, with its original mode temporarily restored,
4213 with this copy we have created; then, replace the
4214 copy with the SUBREG of the original shared reg,
4215 once again changed to the new mode. */
4216 propagate_for_debug (first, last, reg, tempreg,
4217 this_basic_block);
4218 adjust_reg_mode (reg, new_mode);
4219 propagate_for_debug (first, last, tempreg,
4220 lowpart_subreg (old_mode, reg, new_mode),
4221 this_basic_block);
4222 }
4223 }
4224 }
4225
4226 /* If we will be able to accept this, we have made a
4227 change to the destination of I3. This requires us to
4228 do a few adjustments. */
4229
4230 if (changed_i3_dest)
4231 {
4232 PATTERN (i3) = newpat;
4233 adjust_for_new_dest (i3);
4234 }
4235
4236 /* We now know that we can do this combination. Merge the insns and
4237 update the status of registers and LOG_LINKS. */
4238
4239 if (undobuf.other_insn)
4240 {
4241 rtx note, next;
4242
4243 PATTERN (undobuf.other_insn) = other_pat;
4244
4245 /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4246 ensure that they are still valid. Then add any non-duplicate
4247 notes added by recog_for_combine. */
4248 for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4249 {
4250 next = XEXP (note, 1);
4251
4252 if ((REG_NOTE_KIND (note) == REG_DEAD
4253 && !reg_referenced_p (XEXP (note, 0),
4254 PATTERN (undobuf.other_insn)))
4255 ||(REG_NOTE_KIND (note) == REG_UNUSED
4256 && !reg_set_p (XEXP (note, 0),
4257 PATTERN (undobuf.other_insn)))
4258 /* Simply drop equal note since it may be no longer valid
4259 for other_insn. It may be possible to record that CC
4260 register is changed and only discard those notes, but
4261 in practice it's unnecessary complication and doesn't
4262 give any meaningful improvement.
4263
4264 See PR78559. */
4265 || REG_NOTE_KIND (note) == REG_EQUAL
4266 || REG_NOTE_KIND (note) == REG_EQUIV)
4267 remove_note (undobuf.other_insn, note);
4268 }
4269
4270 distribute_notes (new_other_notes, undobuf.other_insn,
4271 undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4272 NULL_RTX);
4273 }
4274
4275 if (swap_i2i3)
4276 {
4277 /* I3 now uses what used to be its destination and which is now
4278 I2's destination. This requires us to do a few adjustments. */
4279 PATTERN (i3) = newpat;
4280 adjust_for_new_dest (i3);
4281 }
4282
4283 if (swap_i2i3 || split_i2i3)
4284 {
4285 /* We might need a LOG_LINK from I3 to I2. But then we used to
4286 have one, so we still will.
4287
4288 However, some later insn might be using I2's dest and have
4289 a LOG_LINK pointing at I3. We should change it to point at
4290 I2 instead. */
4291
4292 /* newi2pat is usually a SET here; however, recog_for_combine might
4293 have added some clobbers. */
4294 rtx x = newi2pat;
4295 if (GET_CODE (x) == PARALLEL)
4296 x = XVECEXP (newi2pat, 0, 0);
4297
4298 /* It can only be a SET of a REG or of a SUBREG of a REG. */
4299 unsigned int regno = reg_or_subregno (SET_DEST (x));
4300
4301 bool done = false;
4302 for (rtx_insn *insn = NEXT_INSN (i3);
4303 !done
4304 && insn
4305 && NONDEBUG_INSN_P (insn)
4306 && BLOCK_FOR_INSN (insn) == this_basic_block;
4307 insn = NEXT_INSN (insn))
4308 {
4309 struct insn_link *link;
4310 FOR_EACH_LOG_LINK (link, insn)
4311 if (link->insn == i3 && link->regno == regno)
4312 {
4313 link->insn = i2;
4314 done = true;
4315 break;
4316 }
4317 }
4318 }
4319
4320 {
4321 rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4322 struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4323 rtx midnotes = 0;
4324 int from_luid;
4325 /* Compute which registers we expect to eliminate. newi2pat may be setting
4326 either i3dest or i2dest, so we must check it. */
4327 rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4328 || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4329 || !i2dest_killed
4330 ? 0 : i2dest);
4331 /* For i1, we need to compute both local elimination and global
4332 elimination information with respect to newi2pat because i1dest
4333 may be the same as i3dest, in which case newi2pat may be setting
4334 i1dest. Global information is used when distributing REG_DEAD
4335 note for i2 and i3, in which case it does matter if newi2pat sets
4336 i1dest or not.
4337
4338 Local information is used when distributing REG_DEAD note for i1,
4339 in which case it doesn't matter if newi2pat sets i1dest or not.
4340 See PR62151, if we have four insns combination:
4341 i0: r0 <- i0src
4342 i1: r1 <- i1src (using r0)
4343 REG_DEAD (r0)
4344 i2: r0 <- i2src (using r1)
4345 i3: r3 <- i3src (using r0)
4346 ix: using r0
4347 From i1's point of view, r0 is eliminated, no matter if it is set
4348 by newi2pat or not. In other words, REG_DEAD info for r0 in i1
4349 should be discarded.
4350
4351 Note local information only affects cases in forms like "I1->I2->I3",
4352 "I0->I1->I2->I3" or "I0&I1->I2, I2->I3". For other cases like
4353 "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4354 i0dest anyway. */
4355 rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4356 || !i1dest_killed
4357 ? 0 : i1dest);
4358 rtx elim_i1 = (local_elim_i1 == 0
4359 || (newi2pat && reg_set_p (i1dest, newi2pat))
4360 ? 0 : i1dest);
4361 /* Same case as i1. */
4362 rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4363 ? 0 : i0dest);
4364 rtx elim_i0 = (local_elim_i0 == 0
4365 || (newi2pat && reg_set_p (i0dest, newi2pat))
4366 ? 0 : i0dest);
4367
4368 /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4369 clear them. */
4370 i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4371 i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4372 if (i1)
4373 i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4374 if (i0)
4375 i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4376
4377 /* Ensure that we do not have something that should not be shared but
4378 occurs multiple times in the new insns. Check this by first
4379 resetting all the `used' flags and then copying anything is shared. */
4380
4381 reset_used_flags (i3notes);
4382 reset_used_flags (i2notes);
4383 reset_used_flags (i1notes);
4384 reset_used_flags (i0notes);
4385 reset_used_flags (newpat);
4386 reset_used_flags (newi2pat);
4387 if (undobuf.other_insn)
4388 reset_used_flags (PATTERN (undobuf.other_insn));
4389
4390 i3notes = copy_rtx_if_shared (i3notes);
4391 i2notes = copy_rtx_if_shared (i2notes);
4392 i1notes = copy_rtx_if_shared (i1notes);
4393 i0notes = copy_rtx_if_shared (i0notes);
4394 newpat = copy_rtx_if_shared (newpat);
4395 newi2pat = copy_rtx_if_shared (newi2pat);
4396 if (undobuf.other_insn)
4397 reset_used_flags (PATTERN (undobuf.other_insn));
4398
4399 INSN_CODE (i3) = insn_code_number;
4400 PATTERN (i3) = newpat;
4401
4402 if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4403 {
4404 for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4405 link = XEXP (link, 1))
4406 {
4407 if (substed_i2)
4408 {
4409 /* I2SRC must still be meaningful at this point. Some
4410 splitting operations can invalidate I2SRC, but those
4411 operations do not apply to calls. */
4412 gcc_assert (i2src);
4413 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4414 i2dest, i2src);
4415 }
4416 if (substed_i1)
4417 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4418 i1dest, i1src);
4419 if (substed_i0)
4420 XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4421 i0dest, i0src);
4422 }
4423 }
4424
4425 if (undobuf.other_insn)
4426 INSN_CODE (undobuf.other_insn) = other_code_number;
4427
4428 /* We had one special case above where I2 had more than one set and
4429 we replaced a destination of one of those sets with the destination
4430 of I3. In that case, we have to update LOG_LINKS of insns later
4431 in this basic block. Note that this (expensive) case is rare.
4432
4433 Also, in this case, we must pretend that all REG_NOTEs for I2
4434 actually came from I3, so that REG_UNUSED notes from I2 will be
4435 properly handled. */
4436
4437 if (i3_subst_into_i2)
4438 {
4439 for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4440 if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4441 || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4442 && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4443 && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4444 && ! find_reg_note (i2, REG_UNUSED,
4445 SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4446 for (temp_insn = NEXT_INSN (i2);
4447 temp_insn
4448 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4449 || BB_HEAD (this_basic_block) != temp_insn);
4450 temp_insn = NEXT_INSN (temp_insn))
4451 if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4452 FOR_EACH_LOG_LINK (link, temp_insn)
4453 if (link->insn == i2)
4454 link->insn = i3;
4455
4456 if (i3notes)
4457 {
4458 rtx link = i3notes;
4459 while (XEXP (link, 1))
4460 link = XEXP (link, 1);
4461 XEXP (link, 1) = i2notes;
4462 }
4463 else
4464 i3notes = i2notes;
4465 i2notes = 0;
4466 }
4467
4468 LOG_LINKS (i3) = NULL;
4469 REG_NOTES (i3) = 0;
4470 LOG_LINKS (i2) = NULL;
4471 REG_NOTES (i2) = 0;
4472
4473 if (newi2pat)
4474 {
4475 if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4476 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4477 this_basic_block);
4478 INSN_CODE (i2) = i2_code_number;
4479 PATTERN (i2) = newi2pat;
4480 }
4481 else
4482 {
4483 if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4484 propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4485 this_basic_block);
4486 SET_INSN_DELETED (i2);
4487 }
4488
4489 if (i1)
4490 {
4491 LOG_LINKS (i1) = NULL;
4492 REG_NOTES (i1) = 0;
4493 if (MAY_HAVE_DEBUG_BIND_INSNS)
4494 propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4495 this_basic_block);
4496 SET_INSN_DELETED (i1);
4497 }
4498
4499 if (i0)
4500 {
4501 LOG_LINKS (i0) = NULL;
4502 REG_NOTES (i0) = 0;
4503 if (MAY_HAVE_DEBUG_BIND_INSNS)
4504 propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4505 this_basic_block);
4506 SET_INSN_DELETED (i0);
4507 }
4508
4509 /* Get death notes for everything that is now used in either I3 or
4510 I2 and used to die in a previous insn. If we built two new
4511 patterns, move from I1 to I2 then I2 to I3 so that we get the
4512 proper movement on registers that I2 modifies. */
4513
4514 if (i0)
4515 from_luid = DF_INSN_LUID (i0);
4516 else if (i1)
4517 from_luid = DF_INSN_LUID (i1);
4518 else
4519 from_luid = DF_INSN_LUID (i2);
4520 if (newi2pat)
4521 move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4522 move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4523
4524 /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3. */
4525 if (i3notes)
4526 distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4527 elim_i2, elim_i1, elim_i0);
4528 if (i2notes)
4529 distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4530 elim_i2, elim_i1, elim_i0);
4531 if (i1notes)
4532 distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4533 elim_i2, local_elim_i1, local_elim_i0);
4534 if (i0notes)
4535 distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4536 elim_i2, elim_i1, local_elim_i0);
4537 if (midnotes)
4538 distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4539 elim_i2, elim_i1, elim_i0);
4540
4541 /* Distribute any notes added to I2 or I3 by recog_for_combine. We
4542 know these are REG_UNUSED and want them to go to the desired insn,
4543 so we always pass it as i3. */
4544
4545 if (newi2pat && new_i2_notes)
4546 distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4547 NULL_RTX);
4548
4549 if (new_i3_notes)
4550 distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4551 NULL_RTX);
4552
4553 /* If I3DEST was used in I3SRC, it really died in I3. We may need to
4554 put a REG_DEAD note for it somewhere. If NEWI2PAT exists and sets
4555 I3DEST, the death must be somewhere before I2, not I3. If we passed I3
4556 in that case, it might delete I2. Similarly for I2 and I1.
4557 Show an additional death due to the REG_DEAD note we make here. If
4558 we discard it in distribute_notes, we will decrement it again. */
4559
4560 if (i3dest_killed)
4561 {
4562 rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4563 if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4564 distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4565 elim_i1, elim_i0);
4566 else
4567 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4568 elim_i2, elim_i1, elim_i0);
4569 }
4570
4571 if (i2dest_in_i2src)
4572 {
4573 rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4574 if (newi2pat && reg_set_p (i2dest, newi2pat))
4575 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4576 NULL_RTX, NULL_RTX);
4577 else
4578 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4579 NULL_RTX, NULL_RTX, NULL_RTX);
4580 }
4581
4582 if (i1dest_in_i1src)
4583 {
4584 rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4585 if (newi2pat && reg_set_p (i1dest, newi2pat))
4586 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4587 NULL_RTX, NULL_RTX);
4588 else
4589 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4590 NULL_RTX, NULL_RTX, NULL_RTX);
4591 }
4592
4593 if (i0dest_in_i0src)
4594 {
4595 rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4596 if (newi2pat && reg_set_p (i0dest, newi2pat))
4597 distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4598 NULL_RTX, NULL_RTX);
4599 else
4600 distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4601 NULL_RTX, NULL_RTX, NULL_RTX);
4602 }
4603
4604 distribute_links (i3links);
4605 distribute_links (i2links);
4606 distribute_links (i1links);
4607 distribute_links (i0links);
4608
4609 if (REG_P (i2dest))
4610 {
4611 struct insn_link *link;
4612 rtx_insn *i2_insn = 0;
4613 rtx i2_val = 0, set;
4614
4615 /* The insn that used to set this register doesn't exist, and
4616 this life of the register may not exist either. See if one of
4617 I3's links points to an insn that sets I2DEST. If it does,
4618 that is now the last known value for I2DEST. If we don't update
4619 this and I2 set the register to a value that depended on its old
4620 contents, we will get confused. If this insn is used, thing
4621 will be set correctly in combine_instructions. */
4622 FOR_EACH_LOG_LINK (link, i3)
4623 if ((set = single_set (link->insn)) != 0
4624 && rtx_equal_p (i2dest, SET_DEST (set)))
4625 i2_insn = link->insn, i2_val = SET_SRC (set);
4626
4627 record_value_for_reg (i2dest, i2_insn, i2_val);
4628
4629 /* If the reg formerly set in I2 died only once and that was in I3,
4630 zero its use count so it won't make `reload' do any work. */
4631 if (! added_sets_2
4632 && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4633 && ! i2dest_in_i2src
4634 && REGNO (i2dest) < reg_n_sets_max)
4635 INC_REG_N_SETS (REGNO (i2dest), -1);
4636 }
4637
4638 if (i1 && REG_P (i1dest))
4639 {
4640 struct insn_link *link;
4641 rtx_insn *i1_insn = 0;
4642 rtx i1_val = 0, set;
4643
4644 FOR_EACH_LOG_LINK (link, i3)
4645 if ((set = single_set (link->insn)) != 0
4646 && rtx_equal_p (i1dest, SET_DEST (set)))
4647 i1_insn = link->insn, i1_val = SET_SRC (set);
4648
4649 record_value_for_reg (i1dest, i1_insn, i1_val);
4650
4651 if (! added_sets_1
4652 && ! i1dest_in_i1src
4653 && REGNO (i1dest) < reg_n_sets_max)
4654 INC_REG_N_SETS (REGNO (i1dest), -1);
4655 }
4656
4657 if (i0 && REG_P (i0dest))
4658 {
4659 struct insn_link *link;
4660 rtx_insn *i0_insn = 0;
4661 rtx i0_val = 0, set;
4662
4663 FOR_EACH_LOG_LINK (link, i3)
4664 if ((set = single_set (link->insn)) != 0
4665 && rtx_equal_p (i0dest, SET_DEST (set)))
4666 i0_insn = link->insn, i0_val = SET_SRC (set);
4667
4668 record_value_for_reg (i0dest, i0_insn, i0_val);
4669
4670 if (! added_sets_0
4671 && ! i0dest_in_i0src
4672 && REGNO (i0dest) < reg_n_sets_max)
4673 INC_REG_N_SETS (REGNO (i0dest), -1);
4674 }
4675
4676 /* Update reg_stat[].nonzero_bits et al for any changes that may have
4677 been made to this insn. The order is important, because newi2pat
4678 can affect nonzero_bits of newpat. */
4679 if (newi2pat)
4680 note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4681 note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4682 }
4683
4684 if (undobuf.other_insn != NULL_RTX)
4685 {
4686 if (dump_file)
4687 {
4688 fprintf (dump_file, "modifying other_insn ");
4689 dump_insn_slim (dump_file, undobuf.other_insn);
4690 }
4691 df_insn_rescan (undobuf.other_insn);
4692 }
4693
4694 if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4695 {
4696 if (dump_file)
4697 {
4698 fprintf (dump_file, "modifying insn i0 ");
4699 dump_insn_slim (dump_file, i0);
4700 }
4701 df_insn_rescan (i0);
4702 }
4703
4704 if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4705 {
4706 if (dump_file)
4707 {
4708 fprintf (dump_file, "modifying insn i1 ");
4709 dump_insn_slim (dump_file, i1);
4710 }
4711 df_insn_rescan (i1);
4712 }
4713
4714 if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4715 {
4716 if (dump_file)
4717 {
4718 fprintf (dump_file, "modifying insn i2 ");
4719 dump_insn_slim (dump_file, i2);
4720 }
4721 df_insn_rescan (i2);
4722 }
4723
4724 if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4725 {
4726 if (dump_file)
4727 {
4728 fprintf (dump_file, "modifying insn i3 ");
4729 dump_insn_slim (dump_file, i3);
4730 }
4731 df_insn_rescan (i3);
4732 }
4733
4734 /* Set new_direct_jump_p if a new return or simple jump instruction
4735 has been created. Adjust the CFG accordingly. */
4736 if (returnjump_p (i3) || any_uncondjump_p (i3))
4737 {
4738 *new_direct_jump_p = 1;
4739 mark_jump_label (PATTERN (i3), i3, 0);
4740 update_cfg_for_uncondjump (i3);
4741 }
4742
4743 if (undobuf.other_insn != NULL_RTX
4744 && (returnjump_p (undobuf.other_insn)
4745 || any_uncondjump_p (undobuf.other_insn)))
4746 {
4747 *new_direct_jump_p = 1;
4748 update_cfg_for_uncondjump (undobuf.other_insn);
4749 }
4750
4751 if (GET_CODE (PATTERN (i3)) == TRAP_IF
4752 && XEXP (PATTERN (i3), 0) == const1_rtx)
4753 {
4754 basic_block bb = BLOCK_FOR_INSN (i3);
4755 gcc_assert (bb);
4756 remove_edge (split_block (bb, i3));
4757 emit_barrier_after_bb (bb);
4758 *new_direct_jump_p = 1;
4759 }
4760
4761 if (undobuf.other_insn
4762 && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4763 && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4764 {
4765 basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4766 gcc_assert (bb);
4767 remove_edge (split_block (bb, undobuf.other_insn));
4768 emit_barrier_after_bb (bb);
4769 *new_direct_jump_p = 1;
4770 }
4771
4772 /* A noop might also need cleaning up of CFG, if it comes from the
4773 simplification of a jump. */
4774 if (JUMP_P (i3)
4775 && GET_CODE (newpat) == SET
4776 && SET_SRC (newpat) == pc_rtx
4777 && SET_DEST (newpat) == pc_rtx)
4778 {
4779 *new_direct_jump_p = 1;
4780 update_cfg_for_uncondjump (i3);
4781 }
4782
4783 if (undobuf.other_insn != NULL_RTX
4784 && JUMP_P (undobuf.other_insn)
4785 && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4786 && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4787 && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4788 {
4789 *new_direct_jump_p = 1;
4790 update_cfg_for_uncondjump (undobuf.other_insn);
4791 }
4792
4793 combine_successes++;
4794 undo_commit ();
4795
4796 rtx_insn *ret = newi2pat ? i2 : i3;
4797 if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4798 ret = added_links_insn;
4799 if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4800 ret = added_notes_insn;
4801
4802 return ret;
4803 }
4804
4805 /* Get a marker for undoing to the current state. */
4806
4807 static void *
get_undo_marker(void)4808 get_undo_marker (void)
4809 {
4810 return undobuf.undos;
4811 }
4812
4813 /* Undo the modifications up to the marker. */
4814
4815 static void
undo_to_marker(void * marker)4816 undo_to_marker (void *marker)
4817 {
4818 struct undo *undo, *next;
4819
4820 for (undo = undobuf.undos; undo != marker; undo = next)
4821 {
4822 gcc_assert (undo);
4823
4824 next = undo->next;
4825 switch (undo->kind)
4826 {
4827 case UNDO_RTX:
4828 *undo->where.r = undo->old_contents.r;
4829 break;
4830 case UNDO_INT:
4831 *undo->where.i = undo->old_contents.i;
4832 break;
4833 case UNDO_MODE:
4834 adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4835 break;
4836 case UNDO_LINKS:
4837 *undo->where.l = undo->old_contents.l;
4838 break;
4839 default:
4840 gcc_unreachable ();
4841 }
4842
4843 undo->next = undobuf.frees;
4844 undobuf.frees = undo;
4845 }
4846
4847 undobuf.undos = (struct undo *) marker;
4848 }
4849
4850 /* Undo all the modifications recorded in undobuf. */
4851
4852 static void
undo_all(void)4853 undo_all (void)
4854 {
4855 undo_to_marker (0);
4856 }
4857
4858 /* We've committed to accepting the changes we made. Move all
4859 of the undos to the free list. */
4860
4861 static void
undo_commit(void)4862 undo_commit (void)
4863 {
4864 struct undo *undo, *next;
4865
4866 for (undo = undobuf.undos; undo; undo = next)
4867 {
4868 next = undo->next;
4869 undo->next = undobuf.frees;
4870 undobuf.frees = undo;
4871 }
4872 undobuf.undos = 0;
4873 }
4874
4875 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4876 where we have an arithmetic expression and return that point. LOC will
4877 be inside INSN.
4878
4879 try_combine will call this function to see if an insn can be split into
4880 two insns. */
4881
4882 static rtx *
find_split_point(rtx * loc,rtx_insn * insn,bool set_src)4883 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4884 {
4885 rtx x = *loc;
4886 enum rtx_code code = GET_CODE (x);
4887 rtx *split;
4888 unsigned HOST_WIDE_INT len = 0;
4889 HOST_WIDE_INT pos = 0;
4890 int unsignedp = 0;
4891 rtx inner = NULL_RTX;
4892 scalar_int_mode mode, inner_mode;
4893
4894 /* First special-case some codes. */
4895 switch (code)
4896 {
4897 case SUBREG:
4898 #ifdef INSN_SCHEDULING
4899 /* If we are making a paradoxical SUBREG invalid, it becomes a split
4900 point. */
4901 if (MEM_P (SUBREG_REG (x)))
4902 return loc;
4903 #endif
4904 return find_split_point (&SUBREG_REG (x), insn, false);
4905
4906 case MEM:
4907 /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4908 using LO_SUM and HIGH. */
4909 if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4910 || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4911 {
4912 machine_mode address_mode = get_address_mode (x);
4913
4914 SUBST (XEXP (x, 0),
4915 gen_rtx_LO_SUM (address_mode,
4916 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4917 XEXP (x, 0)));
4918 return &XEXP (XEXP (x, 0), 0);
4919 }
4920
4921 /* If we have a PLUS whose second operand is a constant and the
4922 address is not valid, perhaps will can split it up using
4923 the machine-specific way to split large constants. We use
4924 the first pseudo-reg (one of the virtual regs) as a placeholder;
4925 it will not remain in the result. */
4926 if (GET_CODE (XEXP (x, 0)) == PLUS
4927 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4928 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4929 MEM_ADDR_SPACE (x)))
4930 {
4931 rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4932 rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4933 subst_insn);
4934
4935 /* This should have produced two insns, each of which sets our
4936 placeholder. If the source of the second is a valid address,
4937 we can make put both sources together and make a split point
4938 in the middle. */
4939
4940 if (seq
4941 && NEXT_INSN (seq) != NULL_RTX
4942 && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
4943 && NONJUMP_INSN_P (seq)
4944 && GET_CODE (PATTERN (seq)) == SET
4945 && SET_DEST (PATTERN (seq)) == reg
4946 && ! reg_mentioned_p (reg,
4947 SET_SRC (PATTERN (seq)))
4948 && NONJUMP_INSN_P (NEXT_INSN (seq))
4949 && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
4950 && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
4951 && memory_address_addr_space_p
4952 (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
4953 MEM_ADDR_SPACE (x)))
4954 {
4955 rtx src1 = SET_SRC (PATTERN (seq));
4956 rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
4957
4958 /* Replace the placeholder in SRC2 with SRC1. If we can
4959 find where in SRC2 it was placed, that can become our
4960 split point and we can replace this address with SRC2.
4961 Just try two obvious places. */
4962
4963 src2 = replace_rtx (src2, reg, src1);
4964 split = 0;
4965 if (XEXP (src2, 0) == src1)
4966 split = &XEXP (src2, 0);
4967 else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
4968 && XEXP (XEXP (src2, 0), 0) == src1)
4969 split = &XEXP (XEXP (src2, 0), 0);
4970
4971 if (split)
4972 {
4973 SUBST (XEXP (x, 0), src2);
4974 return split;
4975 }
4976 }
4977
4978 /* If that didn't work, perhaps the first operand is complex and
4979 needs to be computed separately, so make a split point there.
4980 This will occur on machines that just support REG + CONST
4981 and have a constant moved through some previous computation. */
4982
4983 else if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
4984 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4985 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4986 return &XEXP (XEXP (x, 0), 0);
4987 }
4988
4989 /* If we have a PLUS whose first operand is complex, try computing it
4990 separately by making a split there. */
4991 if (GET_CODE (XEXP (x, 0)) == PLUS
4992 && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4993 MEM_ADDR_SPACE (x))
4994 && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
4995 && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
4996 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
4997 return &XEXP (XEXP (x, 0), 0);
4998 break;
4999
5000 case SET:
5001 /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5002 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5003 we need to put the operand into a register. So split at that
5004 point. */
5005
5006 if (SET_DEST (x) == cc0_rtx
5007 && GET_CODE (SET_SRC (x)) != COMPARE
5008 && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5009 && !OBJECT_P (SET_SRC (x))
5010 && ! (GET_CODE (SET_SRC (x)) == SUBREG
5011 && OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5012 return &SET_SRC (x);
5013
5014 /* See if we can split SET_SRC as it stands. */
5015 split = find_split_point (&SET_SRC (x), insn, true);
5016 if (split && split != &SET_SRC (x))
5017 return split;
5018
5019 /* See if we can split SET_DEST as it stands. */
5020 split = find_split_point (&SET_DEST (x), insn, false);
5021 if (split && split != &SET_DEST (x))
5022 return split;
5023
5024 /* See if this is a bitfield assignment with everything constant. If
5025 so, this is an IOR of an AND, so split it into that. */
5026 if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5027 && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5028 &inner_mode)
5029 && HWI_COMPUTABLE_MODE_P (inner_mode)
5030 && CONST_INT_P (XEXP (SET_DEST (x), 1))
5031 && CONST_INT_P (XEXP (SET_DEST (x), 2))
5032 && CONST_INT_P (SET_SRC (x))
5033 && ((INTVAL (XEXP (SET_DEST (x), 1))
5034 + INTVAL (XEXP (SET_DEST (x), 2)))
5035 <= GET_MODE_PRECISION (inner_mode))
5036 && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5037 {
5038 HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5039 unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5040 rtx dest = XEXP (SET_DEST (x), 0);
5041 unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << len) - 1;
5042 unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x)) & mask;
5043 rtx or_mask;
5044
5045 if (BITS_BIG_ENDIAN)
5046 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5047
5048 or_mask = gen_int_mode (src << pos, inner_mode);
5049 if (src == mask)
5050 SUBST (SET_SRC (x),
5051 simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5052 else
5053 {
5054 rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5055 SUBST (SET_SRC (x),
5056 simplify_gen_binary (IOR, inner_mode,
5057 simplify_gen_binary (AND, inner_mode,
5058 dest, negmask),
5059 or_mask));
5060 }
5061
5062 SUBST (SET_DEST (x), dest);
5063
5064 split = find_split_point (&SET_SRC (x), insn, true);
5065 if (split && split != &SET_SRC (x))
5066 return split;
5067 }
5068
5069 /* Otherwise, see if this is an operation that we can split into two.
5070 If so, try to split that. */
5071 code = GET_CODE (SET_SRC (x));
5072
5073 switch (code)
5074 {
5075 case AND:
5076 /* If we are AND'ing with a large constant that is only a single
5077 bit and the result is only being used in a context where we
5078 need to know if it is zero or nonzero, replace it with a bit
5079 extraction. This will avoid the large constant, which might
5080 have taken more than one insn to make. If the constant were
5081 not a valid argument to the AND but took only one insn to make,
5082 this is no worse, but if it took more than one insn, it will
5083 be better. */
5084
5085 if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5086 && REG_P (XEXP (SET_SRC (x), 0))
5087 && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5088 && REG_P (SET_DEST (x))
5089 && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5090 && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5091 && XEXP (*split, 0) == SET_DEST (x)
5092 && XEXP (*split, 1) == const0_rtx)
5093 {
5094 rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5095 XEXP (SET_SRC (x), 0),
5096 pos, NULL_RTX, 1, 1, 0, 0);
5097 if (extraction != 0)
5098 {
5099 SUBST (SET_SRC (x), extraction);
5100 return find_split_point (loc, insn, false);
5101 }
5102 }
5103 break;
5104
5105 case NE:
5106 /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5107 is known to be on, this can be converted into a NEG of a shift. */
5108 if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5109 && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5110 && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5111 GET_MODE (XEXP (SET_SRC (x),
5112 0))))) >= 1))
5113 {
5114 machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5115 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5116 SUBST (SET_SRC (x),
5117 gen_rtx_NEG (mode,
5118 gen_rtx_LSHIFTRT (mode,
5119 XEXP (SET_SRC (x), 0),
5120 pos_rtx)));
5121
5122 split = find_split_point (&SET_SRC (x), insn, true);
5123 if (split && split != &SET_SRC (x))
5124 return split;
5125 }
5126 break;
5127
5128 case SIGN_EXTEND:
5129 inner = XEXP (SET_SRC (x), 0);
5130
5131 /* We can't optimize if either mode is a partial integer
5132 mode as we don't know how many bits are significant
5133 in those modes. */
5134 if (!is_int_mode (GET_MODE (inner), &inner_mode)
5135 || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5136 break;
5137
5138 pos = 0;
5139 len = GET_MODE_PRECISION (inner_mode);
5140 unsignedp = 0;
5141 break;
5142
5143 case SIGN_EXTRACT:
5144 case ZERO_EXTRACT:
5145 if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5146 &inner_mode)
5147 && CONST_INT_P (XEXP (SET_SRC (x), 1))
5148 && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5149 {
5150 inner = XEXP (SET_SRC (x), 0);
5151 len = INTVAL (XEXP (SET_SRC (x), 1));
5152 pos = INTVAL (XEXP (SET_SRC (x), 2));
5153
5154 if (BITS_BIG_ENDIAN)
5155 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5156 unsignedp = (code == ZERO_EXTRACT);
5157 }
5158 break;
5159
5160 default:
5161 break;
5162 }
5163
5164 if (len
5165 && known_subrange_p (pos, len,
5166 0, GET_MODE_PRECISION (GET_MODE (inner)))
5167 && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5168 {
5169 /* For unsigned, we have a choice of a shift followed by an
5170 AND or two shifts. Use two shifts for field sizes where the
5171 constant might be too large. We assume here that we can
5172 always at least get 8-bit constants in an AND insn, which is
5173 true for every current RISC. */
5174
5175 if (unsignedp && len <= 8)
5176 {
5177 unsigned HOST_WIDE_INT mask
5178 = (HOST_WIDE_INT_1U << len) - 1;
5179 rtx pos_rtx = gen_int_shift_amount (mode, pos);
5180 SUBST (SET_SRC (x),
5181 gen_rtx_AND (mode,
5182 gen_rtx_LSHIFTRT
5183 (mode, gen_lowpart (mode, inner), pos_rtx),
5184 gen_int_mode (mask, mode)));
5185
5186 split = find_split_point (&SET_SRC (x), insn, true);
5187 if (split && split != &SET_SRC (x))
5188 return split;
5189 }
5190 else
5191 {
5192 int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5193 int right_bits = GET_MODE_PRECISION (mode) - len;
5194 SUBST (SET_SRC (x),
5195 gen_rtx_fmt_ee
5196 (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5197 gen_rtx_ASHIFT (mode,
5198 gen_lowpart (mode, inner),
5199 gen_int_shift_amount (mode, left_bits)),
5200 gen_int_shift_amount (mode, right_bits)));
5201
5202 split = find_split_point (&SET_SRC (x), insn, true);
5203 if (split && split != &SET_SRC (x))
5204 return split;
5205 }
5206 }
5207
5208 /* See if this is a simple operation with a constant as the second
5209 operand. It might be that this constant is out of range and hence
5210 could be used as a split point. */
5211 if (BINARY_P (SET_SRC (x))
5212 && CONSTANT_P (XEXP (SET_SRC (x), 1))
5213 && (OBJECT_P (XEXP (SET_SRC (x), 0))
5214 || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5215 && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5216 return &XEXP (SET_SRC (x), 1);
5217
5218 /* Finally, see if this is a simple operation with its first operand
5219 not in a register. The operation might require this operand in a
5220 register, so return it as a split point. We can always do this
5221 because if the first operand were another operation, we would have
5222 already found it as a split point. */
5223 if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5224 && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5225 return &XEXP (SET_SRC (x), 0);
5226
5227 return 0;
5228
5229 case AND:
5230 case IOR:
5231 /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5232 it is better to write this as (not (ior A B)) so we can split it.
5233 Similarly for IOR. */
5234 if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5235 {
5236 SUBST (*loc,
5237 gen_rtx_NOT (GET_MODE (x),
5238 gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5239 GET_MODE (x),
5240 XEXP (XEXP (x, 0), 0),
5241 XEXP (XEXP (x, 1), 0))));
5242 return find_split_point (loc, insn, set_src);
5243 }
5244
5245 /* Many RISC machines have a large set of logical insns. If the
5246 second operand is a NOT, put it first so we will try to split the
5247 other operand first. */
5248 if (GET_CODE (XEXP (x, 1)) == NOT)
5249 {
5250 rtx tem = XEXP (x, 0);
5251 SUBST (XEXP (x, 0), XEXP (x, 1));
5252 SUBST (XEXP (x, 1), tem);
5253 }
5254 break;
5255
5256 case PLUS:
5257 case MINUS:
5258 /* Canonicalization can produce (minus A (mult B C)), where C is a
5259 constant. It may be better to try splitting (plus (mult B -C) A)
5260 instead if this isn't a multiply by a power of two. */
5261 if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5262 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5263 && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5264 {
5265 machine_mode mode = GET_MODE (x);
5266 unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5267 HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5268 SUBST (*loc, gen_rtx_PLUS (mode,
5269 gen_rtx_MULT (mode,
5270 XEXP (XEXP (x, 1), 0),
5271 gen_int_mode (other_int,
5272 mode)),
5273 XEXP (x, 0)));
5274 return find_split_point (loc, insn, set_src);
5275 }
5276
5277 /* Split at a multiply-accumulate instruction. However if this is
5278 the SET_SRC, we likely do not have such an instruction and it's
5279 worthless to try this split. */
5280 if (!set_src
5281 && (GET_CODE (XEXP (x, 0)) == MULT
5282 || (GET_CODE (XEXP (x, 0)) == ASHIFT
5283 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5284 return loc;
5285
5286 default:
5287 break;
5288 }
5289
5290 /* Otherwise, select our actions depending on our rtx class. */
5291 switch (GET_RTX_CLASS (code))
5292 {
5293 case RTX_BITFIELD_OPS: /* This is ZERO_EXTRACT and SIGN_EXTRACT. */
5294 case RTX_TERNARY:
5295 split = find_split_point (&XEXP (x, 2), insn, false);
5296 if (split)
5297 return split;
5298 /* fall through */
5299 case RTX_BIN_ARITH:
5300 case RTX_COMM_ARITH:
5301 case RTX_COMPARE:
5302 case RTX_COMM_COMPARE:
5303 split = find_split_point (&XEXP (x, 1), insn, false);
5304 if (split)
5305 return split;
5306 /* fall through */
5307 case RTX_UNARY:
5308 /* Some machines have (and (shift ...) ...) insns. If X is not
5309 an AND, but XEXP (X, 0) is, use it as our split point. */
5310 if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5311 return &XEXP (x, 0);
5312
5313 split = find_split_point (&XEXP (x, 0), insn, false);
5314 if (split)
5315 return split;
5316 return loc;
5317
5318 default:
5319 /* Otherwise, we don't have a split point. */
5320 return 0;
5321 }
5322 }
5323
5324 /* Throughout X, replace FROM with TO, and return the result.
5325 The result is TO if X is FROM;
5326 otherwise the result is X, but its contents may have been modified.
5327 If they were modified, a record was made in undobuf so that
5328 undo_all will (among other things) return X to its original state.
5329
5330 If the number of changes necessary is too much to record to undo,
5331 the excess changes are not made, so the result is invalid.
5332 The changes already made can still be undone.
5333 undobuf.num_undo is incremented for such changes, so by testing that
5334 the caller can tell whether the result is valid.
5335
5336 `n_occurrences' is incremented each time FROM is replaced.
5337
5338 IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5339
5340 IN_COND is nonzero if we are at the top level of a condition.
5341
5342 UNIQUE_COPY is nonzero if each substitution must be unique. We do this
5343 by copying if `n_occurrences' is nonzero. */
5344
5345 static rtx
subst(rtx x,rtx from,rtx to,int in_dest,int in_cond,int unique_copy)5346 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5347 {
5348 enum rtx_code code = GET_CODE (x);
5349 machine_mode op0_mode = VOIDmode;
5350 const char *fmt;
5351 int len, i;
5352 rtx new_rtx;
5353
5354 /* Two expressions are equal if they are identical copies of a shared
5355 RTX or if they are both registers with the same register number
5356 and mode. */
5357
5358 #define COMBINE_RTX_EQUAL_P(X,Y) \
5359 ((X) == (Y) \
5360 || (REG_P (X) && REG_P (Y) \
5361 && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5362
5363 /* Do not substitute into clobbers of regs -- this will never result in
5364 valid RTL. */
5365 if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5366 return x;
5367
5368 if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5369 {
5370 n_occurrences++;
5371 return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5372 }
5373
5374 /* If X and FROM are the same register but different modes, they
5375 will not have been seen as equal above. However, the log links code
5376 will make a LOG_LINKS entry for that case. If we do nothing, we
5377 will try to rerecognize our original insn and, when it succeeds,
5378 we will delete the feeding insn, which is incorrect.
5379
5380 So force this insn not to match in this (rare) case. */
5381 if (! in_dest && code == REG && REG_P (from)
5382 && reg_overlap_mentioned_p (x, from))
5383 return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5384
5385 /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5386 of which may contain things that can be combined. */
5387 if (code != MEM && code != LO_SUM && OBJECT_P (x))
5388 return x;
5389
5390 /* It is possible to have a subexpression appear twice in the insn.
5391 Suppose that FROM is a register that appears within TO.
5392 Then, after that subexpression has been scanned once by `subst',
5393 the second time it is scanned, TO may be found. If we were
5394 to scan TO here, we would find FROM within it and create a
5395 self-referent rtl structure which is completely wrong. */
5396 if (COMBINE_RTX_EQUAL_P (x, to))
5397 return to;
5398
5399 /* Parallel asm_operands need special attention because all of the
5400 inputs are shared across the arms. Furthermore, unsharing the
5401 rtl results in recognition failures. Failure to handle this case
5402 specially can result in circular rtl.
5403
5404 Solve this by doing a normal pass across the first entry of the
5405 parallel, and only processing the SET_DESTs of the subsequent
5406 entries. Ug. */
5407
5408 if (code == PARALLEL
5409 && GET_CODE (XVECEXP (x, 0, 0)) == SET
5410 && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5411 {
5412 new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5413
5414 /* If this substitution failed, this whole thing fails. */
5415 if (GET_CODE (new_rtx) == CLOBBER
5416 && XEXP (new_rtx, 0) == const0_rtx)
5417 return new_rtx;
5418
5419 SUBST (XVECEXP (x, 0, 0), new_rtx);
5420
5421 for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5422 {
5423 rtx dest = SET_DEST (XVECEXP (x, 0, i));
5424
5425 if (!REG_P (dest)
5426 && GET_CODE (dest) != CC0
5427 && GET_CODE (dest) != PC)
5428 {
5429 new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5430
5431 /* If this substitution failed, this whole thing fails. */
5432 if (GET_CODE (new_rtx) == CLOBBER
5433 && XEXP (new_rtx, 0) == const0_rtx)
5434 return new_rtx;
5435
5436 SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5437 }
5438 }
5439 }
5440 else
5441 {
5442 len = GET_RTX_LENGTH (code);
5443 fmt = GET_RTX_FORMAT (code);
5444
5445 /* We don't need to process a SET_DEST that is a register, CC0,
5446 or PC, so set up to skip this common case. All other cases
5447 where we want to suppress replacing something inside a
5448 SET_SRC are handled via the IN_DEST operand. */
5449 if (code == SET
5450 && (REG_P (SET_DEST (x))
5451 || GET_CODE (SET_DEST (x)) == CC0
5452 || GET_CODE (SET_DEST (x)) == PC))
5453 fmt = "ie";
5454
5455 /* Trying to simplify the operands of a widening MULT is not likely
5456 to create RTL matching a machine insn. */
5457 if (code == MULT
5458 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5459 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5460 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5461 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5462 && REG_P (XEXP (XEXP (x, 0), 0))
5463 && REG_P (XEXP (XEXP (x, 1), 0))
5464 && from == to)
5465 return x;
5466
5467
5468 /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5469 constant. */
5470 if (fmt[0] == 'e')
5471 op0_mode = GET_MODE (XEXP (x, 0));
5472
5473 for (i = 0; i < len; i++)
5474 {
5475 if (fmt[i] == 'E')
5476 {
5477 int j;
5478 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5479 {
5480 if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5481 {
5482 new_rtx = (unique_copy && n_occurrences
5483 ? copy_rtx (to) : to);
5484 n_occurrences++;
5485 }
5486 else
5487 {
5488 new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5489 unique_copy);
5490
5491 /* If this substitution failed, this whole thing
5492 fails. */
5493 if (GET_CODE (new_rtx) == CLOBBER
5494 && XEXP (new_rtx, 0) == const0_rtx)
5495 return new_rtx;
5496 }
5497
5498 SUBST (XVECEXP (x, i, j), new_rtx);
5499 }
5500 }
5501 else if (fmt[i] == 'e')
5502 {
5503 /* If this is a register being set, ignore it. */
5504 new_rtx = XEXP (x, i);
5505 if (in_dest
5506 && i == 0
5507 && (((code == SUBREG || code == ZERO_EXTRACT)
5508 && REG_P (new_rtx))
5509 || code == STRICT_LOW_PART))
5510 ;
5511
5512 else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5513 {
5514 /* In general, don't install a subreg involving two
5515 modes not tieable. It can worsen register
5516 allocation, and can even make invalid reload
5517 insns, since the reg inside may need to be copied
5518 from in the outside mode, and that may be invalid
5519 if it is an fp reg copied in integer mode.
5520
5521 We allow two exceptions to this: It is valid if
5522 it is inside another SUBREG and the mode of that
5523 SUBREG and the mode of the inside of TO is
5524 tieable and it is valid if X is a SET that copies
5525 FROM to CC0. */
5526
5527 if (GET_CODE (to) == SUBREG
5528 && !targetm.modes_tieable_p (GET_MODE (to),
5529 GET_MODE (SUBREG_REG (to)))
5530 && ! (code == SUBREG
5531 && (targetm.modes_tieable_p
5532 (GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5533 && (!HAVE_cc0
5534 || (! (code == SET
5535 && i == 1
5536 && XEXP (x, 0) == cc0_rtx))))
5537 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5538
5539 if (code == SUBREG
5540 && REG_P (to)
5541 && REGNO (to) < FIRST_PSEUDO_REGISTER
5542 && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5543 SUBREG_BYTE (x),
5544 GET_MODE (x)) < 0)
5545 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5546
5547 new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5548 n_occurrences++;
5549 }
5550 else
5551 /* If we are in a SET_DEST, suppress most cases unless we
5552 have gone inside a MEM, in which case we want to
5553 simplify the address. We assume here that things that
5554 are actually part of the destination have their inner
5555 parts in the first expression. This is true for SUBREG,
5556 STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5557 things aside from REG and MEM that should appear in a
5558 SET_DEST. */
5559 new_rtx = subst (XEXP (x, i), from, to,
5560 (((in_dest
5561 && (code == SUBREG || code == STRICT_LOW_PART
5562 || code == ZERO_EXTRACT))
5563 || code == SET)
5564 && i == 0),
5565 code == IF_THEN_ELSE && i == 0,
5566 unique_copy);
5567
5568 /* If we found that we will have to reject this combination,
5569 indicate that by returning the CLOBBER ourselves, rather than
5570 an expression containing it. This will speed things up as
5571 well as prevent accidents where two CLOBBERs are considered
5572 to be equal, thus producing an incorrect simplification. */
5573
5574 if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5575 return new_rtx;
5576
5577 if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5578 {
5579 machine_mode mode = GET_MODE (x);
5580
5581 x = simplify_subreg (GET_MODE (x), new_rtx,
5582 GET_MODE (SUBREG_REG (x)),
5583 SUBREG_BYTE (x));
5584 if (! x)
5585 x = gen_rtx_CLOBBER (mode, const0_rtx);
5586 }
5587 else if (CONST_SCALAR_INT_P (new_rtx)
5588 && (GET_CODE (x) == ZERO_EXTEND
5589 || GET_CODE (x) == FLOAT
5590 || GET_CODE (x) == UNSIGNED_FLOAT))
5591 {
5592 x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5593 new_rtx,
5594 GET_MODE (XEXP (x, 0)));
5595 if (!x)
5596 return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5597 }
5598 else
5599 SUBST (XEXP (x, i), new_rtx);
5600 }
5601 }
5602 }
5603
5604 /* Check if we are loading something from the constant pool via float
5605 extension; in this case we would undo compress_float_constant
5606 optimization and degenerate constant load to an immediate value. */
5607 if (GET_CODE (x) == FLOAT_EXTEND
5608 && MEM_P (XEXP (x, 0))
5609 && MEM_READONLY_P (XEXP (x, 0)))
5610 {
5611 rtx tmp = avoid_constant_pool_reference (x);
5612 if (x != tmp)
5613 return x;
5614 }
5615
5616 /* Try to simplify X. If the simplification changed the code, it is likely
5617 that further simplification will help, so loop, but limit the number
5618 of repetitions that will be performed. */
5619
5620 for (i = 0; i < 4; i++)
5621 {
5622 /* If X is sufficiently simple, don't bother trying to do anything
5623 with it. */
5624 if (code != CONST_INT && code != REG && code != CLOBBER)
5625 x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5626
5627 if (GET_CODE (x) == code)
5628 break;
5629
5630 code = GET_CODE (x);
5631
5632 /* We no longer know the original mode of operand 0 since we
5633 have changed the form of X) */
5634 op0_mode = VOIDmode;
5635 }
5636
5637 return x;
5638 }
5639
5640 /* If X is a commutative operation whose operands are not in the canonical
5641 order, use substitutions to swap them. */
5642
5643 static void
maybe_swap_commutative_operands(rtx x)5644 maybe_swap_commutative_operands (rtx x)
5645 {
5646 if (COMMUTATIVE_ARITH_P (x)
5647 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5648 {
5649 rtx temp = XEXP (x, 0);
5650 SUBST (XEXP (x, 0), XEXP (x, 1));
5651 SUBST (XEXP (x, 1), temp);
5652 }
5653 }
5654
5655 /* Simplify X, a piece of RTL. We just operate on the expression at the
5656 outer level; call `subst' to simplify recursively. Return the new
5657 expression.
5658
5659 OP0_MODE is the original mode of XEXP (x, 0). IN_DEST is nonzero
5660 if we are inside a SET_DEST. IN_COND is nonzero if we are at the top level
5661 of a condition. */
5662
5663 static rtx
combine_simplify_rtx(rtx x,machine_mode op0_mode,int in_dest,int in_cond)5664 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5665 int in_cond)
5666 {
5667 enum rtx_code code = GET_CODE (x);
5668 machine_mode mode = GET_MODE (x);
5669 scalar_int_mode int_mode;
5670 rtx temp;
5671 int i;
5672
5673 /* If this is a commutative operation, put a constant last and a complex
5674 expression first. We don't need to do this for comparisons here. */
5675 maybe_swap_commutative_operands (x);
5676
5677 /* Try to fold this expression in case we have constants that weren't
5678 present before. */
5679 temp = 0;
5680 switch (GET_RTX_CLASS (code))
5681 {
5682 case RTX_UNARY:
5683 if (op0_mode == VOIDmode)
5684 op0_mode = GET_MODE (XEXP (x, 0));
5685 temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5686 break;
5687 case RTX_COMPARE:
5688 case RTX_COMM_COMPARE:
5689 {
5690 machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5691 if (cmp_mode == VOIDmode)
5692 {
5693 cmp_mode = GET_MODE (XEXP (x, 1));
5694 if (cmp_mode == VOIDmode)
5695 cmp_mode = op0_mode;
5696 }
5697 temp = simplify_relational_operation (code, mode, cmp_mode,
5698 XEXP (x, 0), XEXP (x, 1));
5699 }
5700 break;
5701 case RTX_COMM_ARITH:
5702 case RTX_BIN_ARITH:
5703 temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5704 break;
5705 case RTX_BITFIELD_OPS:
5706 case RTX_TERNARY:
5707 temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5708 XEXP (x, 1), XEXP (x, 2));
5709 break;
5710 default:
5711 break;
5712 }
5713
5714 if (temp)
5715 {
5716 x = temp;
5717 code = GET_CODE (temp);
5718 op0_mode = VOIDmode;
5719 mode = GET_MODE (temp);
5720 }
5721
5722 /* If this is a simple operation applied to an IF_THEN_ELSE, try
5723 applying it to the arms of the IF_THEN_ELSE. This often simplifies
5724 things. Check for cases where both arms are testing the same
5725 condition.
5726
5727 Don't do anything if all operands are very simple. */
5728
5729 if ((BINARY_P (x)
5730 && ((!OBJECT_P (XEXP (x, 0))
5731 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5732 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5733 || (!OBJECT_P (XEXP (x, 1))
5734 && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5735 && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5736 || (UNARY_P (x)
5737 && (!OBJECT_P (XEXP (x, 0))
5738 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5739 && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5740 {
5741 rtx cond, true_rtx, false_rtx;
5742
5743 cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5744 if (cond != 0
5745 /* If everything is a comparison, what we have is highly unlikely
5746 to be simpler, so don't use it. */
5747 && ! (COMPARISON_P (x)
5748 && (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5749 /* Similarly, if we end up with one of the expressions the same
5750 as the original, it is certainly not simpler. */
5751 && ! rtx_equal_p (x, true_rtx)
5752 && ! rtx_equal_p (x, false_rtx))
5753 {
5754 rtx cop1 = const0_rtx;
5755 enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5756
5757 if (cond_code == NE && COMPARISON_P (cond))
5758 return x;
5759
5760 /* Simplify the alternative arms; this may collapse the true and
5761 false arms to store-flag values. Be careful to use copy_rtx
5762 here since true_rtx or false_rtx might share RTL with x as a
5763 result of the if_then_else_cond call above. */
5764 true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5765 false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5766
5767 /* If true_rtx and false_rtx are not general_operands, an if_then_else
5768 is unlikely to be simpler. */
5769 if (general_operand (true_rtx, VOIDmode)
5770 && general_operand (false_rtx, VOIDmode))
5771 {
5772 enum rtx_code reversed;
5773
5774 /* Restarting if we generate a store-flag expression will cause
5775 us to loop. Just drop through in this case. */
5776
5777 /* If the result values are STORE_FLAG_VALUE and zero, we can
5778 just make the comparison operation. */
5779 if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5780 x = simplify_gen_relational (cond_code, mode, VOIDmode,
5781 cond, cop1);
5782 else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5783 && ((reversed = reversed_comparison_code_parts
5784 (cond_code, cond, cop1, NULL))
5785 != UNKNOWN))
5786 x = simplify_gen_relational (reversed, mode, VOIDmode,
5787 cond, cop1);
5788
5789 /* Likewise, we can make the negate of a comparison operation
5790 if the result values are - STORE_FLAG_VALUE and zero. */
5791 else if (CONST_INT_P (true_rtx)
5792 && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5793 && false_rtx == const0_rtx)
5794 x = simplify_gen_unary (NEG, mode,
5795 simplify_gen_relational (cond_code,
5796 mode, VOIDmode,
5797 cond, cop1),
5798 mode);
5799 else if (CONST_INT_P (false_rtx)
5800 && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5801 && true_rtx == const0_rtx
5802 && ((reversed = reversed_comparison_code_parts
5803 (cond_code, cond, cop1, NULL))
5804 != UNKNOWN))
5805 x = simplify_gen_unary (NEG, mode,
5806 simplify_gen_relational (reversed,
5807 mode, VOIDmode,
5808 cond, cop1),
5809 mode);
5810 else
5811 return gen_rtx_IF_THEN_ELSE (mode,
5812 simplify_gen_relational (cond_code,
5813 mode,
5814 VOIDmode,
5815 cond,
5816 cop1),
5817 true_rtx, false_rtx);
5818
5819 code = GET_CODE (x);
5820 op0_mode = VOIDmode;
5821 }
5822 }
5823 }
5824
5825 /* First see if we can apply the inverse distributive law. */
5826 if (code == PLUS || code == MINUS
5827 || code == AND || code == IOR || code == XOR)
5828 {
5829 x = apply_distributive_law (x);
5830 code = GET_CODE (x);
5831 op0_mode = VOIDmode;
5832 }
5833
5834 /* If CODE is an associative operation not otherwise handled, see if we
5835 can associate some operands. This can win if they are constants or
5836 if they are logically related (i.e. (a & b) & a). */
5837 if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5838 || code == AND || code == IOR || code == XOR
5839 || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5840 && ((INTEGRAL_MODE_P (mode) && code != DIV)
5841 || (flag_associative_math && FLOAT_MODE_P (mode))))
5842 {
5843 if (GET_CODE (XEXP (x, 0)) == code)
5844 {
5845 rtx other = XEXP (XEXP (x, 0), 0);
5846 rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5847 rtx inner_op1 = XEXP (x, 1);
5848 rtx inner;
5849
5850 /* Make sure we pass the constant operand if any as the second
5851 one if this is a commutative operation. */
5852 if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5853 std::swap (inner_op0, inner_op1);
5854 inner = simplify_binary_operation (code == MINUS ? PLUS
5855 : code == DIV ? MULT
5856 : code,
5857 mode, inner_op0, inner_op1);
5858
5859 /* For commutative operations, try the other pair if that one
5860 didn't simplify. */
5861 if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5862 {
5863 other = XEXP (XEXP (x, 0), 1);
5864 inner = simplify_binary_operation (code, mode,
5865 XEXP (XEXP (x, 0), 0),
5866 XEXP (x, 1));
5867 }
5868
5869 if (inner)
5870 return simplify_gen_binary (code, mode, other, inner);
5871 }
5872 }
5873
5874 /* A little bit of algebraic simplification here. */
5875 switch (code)
5876 {
5877 case MEM:
5878 /* Ensure that our address has any ASHIFTs converted to MULT in case
5879 address-recognizing predicates are called later. */
5880 temp = make_compound_operation (XEXP (x, 0), MEM);
5881 SUBST (XEXP (x, 0), temp);
5882 break;
5883
5884 case SUBREG:
5885 if (op0_mode == VOIDmode)
5886 op0_mode = GET_MODE (SUBREG_REG (x));
5887
5888 /* See if this can be moved to simplify_subreg. */
5889 if (CONSTANT_P (SUBREG_REG (x))
5890 && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5891 /* Don't call gen_lowpart if the inner mode
5892 is VOIDmode and we cannot simplify it, as SUBREG without
5893 inner mode is invalid. */
5894 && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5895 || gen_lowpart_common (mode, SUBREG_REG (x))))
5896 return gen_lowpart (mode, SUBREG_REG (x));
5897
5898 if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
5899 break;
5900 {
5901 rtx temp;
5902 temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
5903 SUBREG_BYTE (x));
5904 if (temp)
5905 return temp;
5906
5907 /* If op is known to have all lower bits zero, the result is zero. */
5908 scalar_int_mode int_mode, int_op0_mode;
5909 if (!in_dest
5910 && is_a <scalar_int_mode> (mode, &int_mode)
5911 && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
5912 && (GET_MODE_PRECISION (int_mode)
5913 < GET_MODE_PRECISION (int_op0_mode))
5914 && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
5915 SUBREG_BYTE (x))
5916 && HWI_COMPUTABLE_MODE_P (int_op0_mode)
5917 && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
5918 & GET_MODE_MASK (int_mode)) == 0)
5919 && !side_effects_p (SUBREG_REG (x)))
5920 return CONST0_RTX (int_mode);
5921 }
5922
5923 /* Don't change the mode of the MEM if that would change the meaning
5924 of the address. */
5925 if (MEM_P (SUBREG_REG (x))
5926 && (MEM_VOLATILE_P (SUBREG_REG (x))
5927 || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
5928 MEM_ADDR_SPACE (SUBREG_REG (x)))))
5929 return gen_rtx_CLOBBER (mode, const0_rtx);
5930
5931 /* Note that we cannot do any narrowing for non-constants since
5932 we might have been counting on using the fact that some bits were
5933 zero. We now do this in the SET. */
5934
5935 break;
5936
5937 case NEG:
5938 temp = expand_compound_operation (XEXP (x, 0));
5939
5940 /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
5941 replaced by (lshiftrt X C). This will convert
5942 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y). */
5943
5944 if (GET_CODE (temp) == ASHIFTRT
5945 && CONST_INT_P (XEXP (temp, 1))
5946 && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
5947 return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
5948 INTVAL (XEXP (temp, 1)));
5949
5950 /* If X has only a single bit that might be nonzero, say, bit I, convert
5951 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
5952 MODE minus 1. This will convert (neg (zero_extract X 1 Y)) to
5953 (sign_extract X 1 Y). But only do this if TEMP isn't a register
5954 or a SUBREG of one since we'd be making the expression more
5955 complex if it was just a register. */
5956
5957 if (!REG_P (temp)
5958 && ! (GET_CODE (temp) == SUBREG
5959 && REG_P (SUBREG_REG (temp)))
5960 && is_a <scalar_int_mode> (mode, &int_mode)
5961 && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
5962 {
5963 rtx temp1 = simplify_shift_const
5964 (NULL_RTX, ASHIFTRT, int_mode,
5965 simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
5966 GET_MODE_PRECISION (int_mode) - 1 - i),
5967 GET_MODE_PRECISION (int_mode) - 1 - i);
5968
5969 /* If all we did was surround TEMP with the two shifts, we
5970 haven't improved anything, so don't use it. Otherwise,
5971 we are better off with TEMP1. */
5972 if (GET_CODE (temp1) != ASHIFTRT
5973 || GET_CODE (XEXP (temp1, 0)) != ASHIFT
5974 || XEXP (XEXP (temp1, 0), 0) != temp)
5975 return temp1;
5976 }
5977 break;
5978
5979 case TRUNCATE:
5980 /* We can't handle truncation to a partial integer mode here
5981 because we don't know the real bitsize of the partial
5982 integer mode. */
5983 if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
5984 break;
5985
5986 if (HWI_COMPUTABLE_MODE_P (mode))
5987 SUBST (XEXP (x, 0),
5988 force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
5989 GET_MODE_MASK (mode), 0));
5990
5991 /* We can truncate a constant value and return it. */
5992 if (CONST_INT_P (XEXP (x, 0)))
5993 return gen_int_mode (INTVAL (XEXP (x, 0)), mode);
5994
5995 /* Similarly to what we do in simplify-rtx.c, a truncate of a register
5996 whose value is a comparison can be replaced with a subreg if
5997 STORE_FLAG_VALUE permits. */
5998 if (HWI_COMPUTABLE_MODE_P (mode)
5999 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6000 && (temp = get_last_value (XEXP (x, 0)))
6001 && COMPARISON_P (temp))
6002 return gen_lowpart (mode, XEXP (x, 0));
6003 break;
6004
6005 case CONST:
6006 /* (const (const X)) can become (const X). Do it this way rather than
6007 returning the inner CONST since CONST can be shared with a
6008 REG_EQUAL note. */
6009 if (GET_CODE (XEXP (x, 0)) == CONST)
6010 SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6011 break;
6012
6013 case LO_SUM:
6014 /* Convert (lo_sum (high FOO) FOO) to FOO. This is necessary so we
6015 can add in an offset. find_split_point will split this address up
6016 again if it doesn't match. */
6017 if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6018 && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6019 return XEXP (x, 1);
6020 break;
6021
6022 case PLUS:
6023 /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6024 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6025 bit-field and can be replaced by either a sign_extend or a
6026 sign_extract. The `and' may be a zero_extend and the two
6027 <c>, -<c> constants may be reversed. */
6028 if (GET_CODE (XEXP (x, 0)) == XOR
6029 && is_a <scalar_int_mode> (mode, &int_mode)
6030 && CONST_INT_P (XEXP (x, 1))
6031 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6032 && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6033 && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6034 || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6035 && HWI_COMPUTABLE_MODE_P (int_mode)
6036 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6037 && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6038 && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6039 == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6040 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6041 && known_eq ((GET_MODE_PRECISION
6042 (GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6043 (unsigned int) i + 1))))
6044 return simplify_shift_const
6045 (NULL_RTX, ASHIFTRT, int_mode,
6046 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6047 XEXP (XEXP (XEXP (x, 0), 0), 0),
6048 GET_MODE_PRECISION (int_mode) - (i + 1)),
6049 GET_MODE_PRECISION (int_mode) - (i + 1));
6050
6051 /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6052 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6053 the bitsize of the mode - 1. This allows simplification of
6054 "a = (b & 8) == 0;" */
6055 if (XEXP (x, 1) == constm1_rtx
6056 && !REG_P (XEXP (x, 0))
6057 && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6058 && REG_P (SUBREG_REG (XEXP (x, 0))))
6059 && is_a <scalar_int_mode> (mode, &int_mode)
6060 && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6061 return simplify_shift_const
6062 (NULL_RTX, ASHIFTRT, int_mode,
6063 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6064 gen_rtx_XOR (int_mode, XEXP (x, 0),
6065 const1_rtx),
6066 GET_MODE_PRECISION (int_mode) - 1),
6067 GET_MODE_PRECISION (int_mode) - 1);
6068
6069 /* If we are adding two things that have no bits in common, convert
6070 the addition into an IOR. This will often be further simplified,
6071 for example in cases like ((a & 1) + (a & 2)), which can
6072 become a & 3. */
6073
6074 if (HWI_COMPUTABLE_MODE_P (mode)
6075 && (nonzero_bits (XEXP (x, 0), mode)
6076 & nonzero_bits (XEXP (x, 1), mode)) == 0)
6077 {
6078 /* Try to simplify the expression further. */
6079 rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6080 temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6081
6082 /* If we could, great. If not, do not go ahead with the IOR
6083 replacement, since PLUS appears in many special purpose
6084 address arithmetic instructions. */
6085 if (GET_CODE (temp) != CLOBBER
6086 && (GET_CODE (temp) != IOR
6087 || ((XEXP (temp, 0) != XEXP (x, 0)
6088 || XEXP (temp, 1) != XEXP (x, 1))
6089 && (XEXP (temp, 0) != XEXP (x, 1)
6090 || XEXP (temp, 1) != XEXP (x, 0)))))
6091 return temp;
6092 }
6093
6094 /* Canonicalize x + x into x << 1. */
6095 if (GET_MODE_CLASS (mode) == MODE_INT
6096 && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6097 && !side_effects_p (XEXP (x, 0)))
6098 return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6099
6100 break;
6101
6102 case MINUS:
6103 /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6104 (and <foo> (const_int pow2-1)) */
6105 if (is_a <scalar_int_mode> (mode, &int_mode)
6106 && GET_CODE (XEXP (x, 1)) == AND
6107 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6108 && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6109 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6110 return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6111 -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6112 break;
6113
6114 case MULT:
6115 /* If we have (mult (plus A B) C), apply the distributive law and then
6116 the inverse distributive law to see if things simplify. This
6117 occurs mostly in addresses, often when unrolling loops. */
6118
6119 if (GET_CODE (XEXP (x, 0)) == PLUS)
6120 {
6121 rtx result = distribute_and_simplify_rtx (x, 0);
6122 if (result)
6123 return result;
6124 }
6125
6126 /* Try simplify a*(b/c) as (a*b)/c. */
6127 if (FLOAT_MODE_P (mode) && flag_associative_math
6128 && GET_CODE (XEXP (x, 0)) == DIV)
6129 {
6130 rtx tem = simplify_binary_operation (MULT, mode,
6131 XEXP (XEXP (x, 0), 0),
6132 XEXP (x, 1));
6133 if (tem)
6134 return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6135 }
6136 break;
6137
6138 case UDIV:
6139 /* If this is a divide by a power of two, treat it as a shift if
6140 its first operand is a shift. */
6141 if (is_a <scalar_int_mode> (mode, &int_mode)
6142 && CONST_INT_P (XEXP (x, 1))
6143 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6144 && (GET_CODE (XEXP (x, 0)) == ASHIFT
6145 || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6146 || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6147 || GET_CODE (XEXP (x, 0)) == ROTATE
6148 || GET_CODE (XEXP (x, 0)) == ROTATERT))
6149 return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6150 XEXP (x, 0), i);
6151 break;
6152
6153 case EQ: case NE:
6154 case GT: case GTU: case GE: case GEU:
6155 case LT: case LTU: case LE: case LEU:
6156 case UNEQ: case LTGT:
6157 case UNGT: case UNGE:
6158 case UNLT: case UNLE:
6159 case UNORDERED: case ORDERED:
6160 /* If the first operand is a condition code, we can't do anything
6161 with it. */
6162 if (GET_CODE (XEXP (x, 0)) == COMPARE
6163 || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6164 && ! CC0_P (XEXP (x, 0))))
6165 {
6166 rtx op0 = XEXP (x, 0);
6167 rtx op1 = XEXP (x, 1);
6168 enum rtx_code new_code;
6169
6170 if (GET_CODE (op0) == COMPARE)
6171 op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6172
6173 /* Simplify our comparison, if possible. */
6174 new_code = simplify_comparison (code, &op0, &op1);
6175
6176 /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6177 if only the low-order bit is possibly nonzero in X (such as when
6178 X is a ZERO_EXTRACT of one bit). Similarly, we can convert EQ to
6179 (xor X 1) or (minus 1 X); we use the former. Finally, if X is
6180 known to be either 0 or -1, NE becomes a NEG and EQ becomes
6181 (plus X 1).
6182
6183 Remove any ZERO_EXTRACT we made when thinking this was a
6184 comparison. It may now be simpler to use, e.g., an AND. If a
6185 ZERO_EXTRACT is indeed appropriate, it will be placed back by
6186 the call to make_compound_operation in the SET case.
6187
6188 Don't apply these optimizations if the caller would
6189 prefer a comparison rather than a value.
6190 E.g., for the condition in an IF_THEN_ELSE most targets need
6191 an explicit comparison. */
6192
6193 if (in_cond)
6194 ;
6195
6196 else if (STORE_FLAG_VALUE == 1
6197 && new_code == NE
6198 && is_int_mode (mode, &int_mode)
6199 && op1 == const0_rtx
6200 && int_mode == GET_MODE (op0)
6201 && nonzero_bits (op0, int_mode) == 1)
6202 return gen_lowpart (int_mode,
6203 expand_compound_operation (op0));
6204
6205 else if (STORE_FLAG_VALUE == 1
6206 && new_code == NE
6207 && is_int_mode (mode, &int_mode)
6208 && op1 == const0_rtx
6209 && int_mode == GET_MODE (op0)
6210 && (num_sign_bit_copies (op0, int_mode)
6211 == GET_MODE_PRECISION (int_mode)))
6212 {
6213 op0 = expand_compound_operation (op0);
6214 return simplify_gen_unary (NEG, int_mode,
6215 gen_lowpart (int_mode, op0),
6216 int_mode);
6217 }
6218
6219 else if (STORE_FLAG_VALUE == 1
6220 && new_code == EQ
6221 && is_int_mode (mode, &int_mode)
6222 && op1 == const0_rtx
6223 && int_mode == GET_MODE (op0)
6224 && nonzero_bits (op0, int_mode) == 1)
6225 {
6226 op0 = expand_compound_operation (op0);
6227 return simplify_gen_binary (XOR, int_mode,
6228 gen_lowpart (int_mode, op0),
6229 const1_rtx);
6230 }
6231
6232 else if (STORE_FLAG_VALUE == 1
6233 && new_code == EQ
6234 && is_int_mode (mode, &int_mode)
6235 && op1 == const0_rtx
6236 && int_mode == GET_MODE (op0)
6237 && (num_sign_bit_copies (op0, int_mode)
6238 == GET_MODE_PRECISION (int_mode)))
6239 {
6240 op0 = expand_compound_operation (op0);
6241 return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6242 }
6243
6244 /* If STORE_FLAG_VALUE is -1, we have cases similar to
6245 those above. */
6246 if (in_cond)
6247 ;
6248
6249 else if (STORE_FLAG_VALUE == -1
6250 && new_code == NE
6251 && is_int_mode (mode, &int_mode)
6252 && op1 == const0_rtx
6253 && int_mode == GET_MODE (op0)
6254 && (num_sign_bit_copies (op0, int_mode)
6255 == GET_MODE_PRECISION (int_mode)))
6256 return gen_lowpart (int_mode, expand_compound_operation (op0));
6257
6258 else if (STORE_FLAG_VALUE == -1
6259 && new_code == NE
6260 && is_int_mode (mode, &int_mode)
6261 && op1 == const0_rtx
6262 && int_mode == GET_MODE (op0)
6263 && nonzero_bits (op0, int_mode) == 1)
6264 {
6265 op0 = expand_compound_operation (op0);
6266 return simplify_gen_unary (NEG, int_mode,
6267 gen_lowpart (int_mode, op0),
6268 int_mode);
6269 }
6270
6271 else if (STORE_FLAG_VALUE == -1
6272 && new_code == EQ
6273 && is_int_mode (mode, &int_mode)
6274 && op1 == const0_rtx
6275 && int_mode == GET_MODE (op0)
6276 && (num_sign_bit_copies (op0, int_mode)
6277 == GET_MODE_PRECISION (int_mode)))
6278 {
6279 op0 = expand_compound_operation (op0);
6280 return simplify_gen_unary (NOT, int_mode,
6281 gen_lowpart (int_mode, op0),
6282 int_mode);
6283 }
6284
6285 /* If X is 0/1, (eq X 0) is X-1. */
6286 else if (STORE_FLAG_VALUE == -1
6287 && new_code == EQ
6288 && is_int_mode (mode, &int_mode)
6289 && op1 == const0_rtx
6290 && int_mode == GET_MODE (op0)
6291 && nonzero_bits (op0, int_mode) == 1)
6292 {
6293 op0 = expand_compound_operation (op0);
6294 return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6295 }
6296
6297 /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6298 one bit that might be nonzero, we can convert (ne x 0) to
6299 (ashift x c) where C puts the bit in the sign bit. Remove any
6300 AND with STORE_FLAG_VALUE when we are done, since we are only
6301 going to test the sign bit. */
6302 if (new_code == NE
6303 && is_int_mode (mode, &int_mode)
6304 && HWI_COMPUTABLE_MODE_P (int_mode)
6305 && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6306 && op1 == const0_rtx
6307 && int_mode == GET_MODE (op0)
6308 && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6309 {
6310 x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6311 expand_compound_operation (op0),
6312 GET_MODE_PRECISION (int_mode) - 1 - i);
6313 if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6314 return XEXP (x, 0);
6315 else
6316 return x;
6317 }
6318
6319 /* If the code changed, return a whole new comparison.
6320 We also need to avoid using SUBST in cases where
6321 simplify_comparison has widened a comparison with a CONST_INT,
6322 since in that case the wider CONST_INT may fail the sanity
6323 checks in do_SUBST. */
6324 if (new_code != code
6325 || (CONST_INT_P (op1)
6326 && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6327 && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6328 return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6329
6330 /* Otherwise, keep this operation, but maybe change its operands.
6331 This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR). */
6332 SUBST (XEXP (x, 0), op0);
6333 SUBST (XEXP (x, 1), op1);
6334 }
6335 break;
6336
6337 case IF_THEN_ELSE:
6338 return simplify_if_then_else (x);
6339
6340 case ZERO_EXTRACT:
6341 case SIGN_EXTRACT:
6342 case ZERO_EXTEND:
6343 case SIGN_EXTEND:
6344 /* If we are processing SET_DEST, we are done. */
6345 if (in_dest)
6346 return x;
6347
6348 return expand_compound_operation (x);
6349
6350 case SET:
6351 return simplify_set (x);
6352
6353 case AND:
6354 case IOR:
6355 return simplify_logical (x);
6356
6357 case ASHIFT:
6358 case LSHIFTRT:
6359 case ASHIFTRT:
6360 case ROTATE:
6361 case ROTATERT:
6362 /* If this is a shift by a constant amount, simplify it. */
6363 if (CONST_INT_P (XEXP (x, 1)))
6364 return simplify_shift_const (x, code, mode, XEXP (x, 0),
6365 INTVAL (XEXP (x, 1)));
6366
6367 else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6368 SUBST (XEXP (x, 1),
6369 force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6370 (HOST_WIDE_INT_1U
6371 << exact_log2 (GET_MODE_UNIT_BITSIZE
6372 (GET_MODE (x))))
6373 - 1,
6374 0));
6375 break;
6376
6377 default:
6378 break;
6379 }
6380
6381 return x;
6382 }
6383
6384 /* Simplify X, an IF_THEN_ELSE expression. Return the new expression. */
6385
6386 static rtx
simplify_if_then_else(rtx x)6387 simplify_if_then_else (rtx x)
6388 {
6389 machine_mode mode = GET_MODE (x);
6390 rtx cond = XEXP (x, 0);
6391 rtx true_rtx = XEXP (x, 1);
6392 rtx false_rtx = XEXP (x, 2);
6393 enum rtx_code true_code = GET_CODE (cond);
6394 int comparison_p = COMPARISON_P (cond);
6395 rtx temp;
6396 int i;
6397 enum rtx_code false_code;
6398 rtx reversed;
6399 scalar_int_mode int_mode, inner_mode;
6400
6401 /* Simplify storing of the truth value. */
6402 if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6403 return simplify_gen_relational (true_code, mode, VOIDmode,
6404 XEXP (cond, 0), XEXP (cond, 1));
6405
6406 /* Also when the truth value has to be reversed. */
6407 if (comparison_p
6408 && true_rtx == const0_rtx && false_rtx == const_true_rtx
6409 && (reversed = reversed_comparison (cond, mode)))
6410 return reversed;
6411
6412 /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6413 in it is being compared against certain values. Get the true and false
6414 comparisons and see if that says anything about the value of each arm. */
6415
6416 if (comparison_p
6417 && ((false_code = reversed_comparison_code (cond, NULL))
6418 != UNKNOWN)
6419 && REG_P (XEXP (cond, 0)))
6420 {
6421 HOST_WIDE_INT nzb;
6422 rtx from = XEXP (cond, 0);
6423 rtx true_val = XEXP (cond, 1);
6424 rtx false_val = true_val;
6425 int swapped = 0;
6426
6427 /* If FALSE_CODE is EQ, swap the codes and arms. */
6428
6429 if (false_code == EQ)
6430 {
6431 swapped = 1, true_code = EQ, false_code = NE;
6432 std::swap (true_rtx, false_rtx);
6433 }
6434
6435 scalar_int_mode from_mode;
6436 if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6437 {
6438 /* If we are comparing against zero and the expression being
6439 tested has only a single bit that might be nonzero, that is
6440 its value when it is not equal to zero. Similarly if it is
6441 known to be -1 or 0. */
6442 if (true_code == EQ
6443 && true_val == const0_rtx
6444 && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6445 {
6446 false_code = EQ;
6447 false_val = gen_int_mode (nzb, from_mode);
6448 }
6449 else if (true_code == EQ
6450 && true_val == const0_rtx
6451 && (num_sign_bit_copies (from, from_mode)
6452 == GET_MODE_PRECISION (from_mode)))
6453 {
6454 false_code = EQ;
6455 false_val = constm1_rtx;
6456 }
6457 }
6458
6459 /* Now simplify an arm if we know the value of the register in the
6460 branch and it is used in the arm. Be careful due to the potential
6461 of locally-shared RTL. */
6462
6463 if (reg_mentioned_p (from, true_rtx))
6464 true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6465 from, true_val),
6466 pc_rtx, pc_rtx, 0, 0, 0);
6467 if (reg_mentioned_p (from, false_rtx))
6468 false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6469 from, false_val),
6470 pc_rtx, pc_rtx, 0, 0, 0);
6471
6472 SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6473 SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6474
6475 true_rtx = XEXP (x, 1);
6476 false_rtx = XEXP (x, 2);
6477 true_code = GET_CODE (cond);
6478 }
6479
6480 /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6481 reversed, do so to avoid needing two sets of patterns for
6482 subtract-and-branch insns. Similarly if we have a constant in the true
6483 arm, the false arm is the same as the first operand of the comparison, or
6484 the false arm is more complicated than the true arm. */
6485
6486 if (comparison_p
6487 && reversed_comparison_code (cond, NULL) != UNKNOWN
6488 && (true_rtx == pc_rtx
6489 || (CONSTANT_P (true_rtx)
6490 && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6491 || true_rtx == const0_rtx
6492 || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6493 || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6494 && !OBJECT_P (false_rtx))
6495 || reg_mentioned_p (true_rtx, false_rtx)
6496 || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6497 {
6498 true_code = reversed_comparison_code (cond, NULL);
6499 SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6500 SUBST (XEXP (x, 1), false_rtx);
6501 SUBST (XEXP (x, 2), true_rtx);
6502
6503 std::swap (true_rtx, false_rtx);
6504 cond = XEXP (x, 0);
6505
6506 /* It is possible that the conditional has been simplified out. */
6507 true_code = GET_CODE (cond);
6508 comparison_p = COMPARISON_P (cond);
6509 }
6510
6511 /* If the two arms are identical, we don't need the comparison. */
6512
6513 if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6514 return true_rtx;
6515
6516 /* Convert a == b ? b : a to "a". */
6517 if (true_code == EQ && ! side_effects_p (cond)
6518 && !HONOR_NANS (mode)
6519 && rtx_equal_p (XEXP (cond, 0), false_rtx)
6520 && rtx_equal_p (XEXP (cond, 1), true_rtx))
6521 return false_rtx;
6522 else if (true_code == NE && ! side_effects_p (cond)
6523 && !HONOR_NANS (mode)
6524 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6525 && rtx_equal_p (XEXP (cond, 1), false_rtx))
6526 return true_rtx;
6527
6528 /* Look for cases where we have (abs x) or (neg (abs X)). */
6529
6530 if (GET_MODE_CLASS (mode) == MODE_INT
6531 && comparison_p
6532 && XEXP (cond, 1) == const0_rtx
6533 && GET_CODE (false_rtx) == NEG
6534 && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6535 && rtx_equal_p (true_rtx, XEXP (cond, 0))
6536 && ! side_effects_p (true_rtx))
6537 switch (true_code)
6538 {
6539 case GT:
6540 case GE:
6541 return simplify_gen_unary (ABS, mode, true_rtx, mode);
6542 case LT:
6543 case LE:
6544 return
6545 simplify_gen_unary (NEG, mode,
6546 simplify_gen_unary (ABS, mode, true_rtx, mode),
6547 mode);
6548 default:
6549 break;
6550 }
6551
6552 /* Look for MIN or MAX. */
6553
6554 if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6555 && comparison_p
6556 && rtx_equal_p (XEXP (cond, 0), true_rtx)
6557 && rtx_equal_p (XEXP (cond, 1), false_rtx)
6558 && ! side_effects_p (cond))
6559 switch (true_code)
6560 {
6561 case GE:
6562 case GT:
6563 return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6564 case LE:
6565 case LT:
6566 return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6567 case GEU:
6568 case GTU:
6569 return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6570 case LEU:
6571 case LTU:
6572 return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6573 default:
6574 break;
6575 }
6576
6577 /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6578 second operand is zero, this can be done as (OP Z (mult COND C2)) where
6579 C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6580 SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6581 We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6582 neither 1 or -1, but it isn't worth checking for. */
6583
6584 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6585 && comparison_p
6586 && is_int_mode (mode, &int_mode)
6587 && ! side_effects_p (x))
6588 {
6589 rtx t = make_compound_operation (true_rtx, SET);
6590 rtx f = make_compound_operation (false_rtx, SET);
6591 rtx cond_op0 = XEXP (cond, 0);
6592 rtx cond_op1 = XEXP (cond, 1);
6593 enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6594 scalar_int_mode m = int_mode;
6595 rtx z = 0, c1 = NULL_RTX;
6596
6597 if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6598 || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6599 || GET_CODE (t) == ASHIFT
6600 || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6601 && rtx_equal_p (XEXP (t, 0), f))
6602 c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6603
6604 /* If an identity-zero op is commutative, check whether there
6605 would be a match if we swapped the operands. */
6606 else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6607 || GET_CODE (t) == XOR)
6608 && rtx_equal_p (XEXP (t, 1), f))
6609 c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6610 else if (GET_CODE (t) == SIGN_EXTEND
6611 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6612 && (GET_CODE (XEXP (t, 0)) == PLUS
6613 || GET_CODE (XEXP (t, 0)) == MINUS
6614 || GET_CODE (XEXP (t, 0)) == IOR
6615 || GET_CODE (XEXP (t, 0)) == XOR
6616 || GET_CODE (XEXP (t, 0)) == ASHIFT
6617 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6618 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6619 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6620 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6621 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6622 && (num_sign_bit_copies (f, GET_MODE (f))
6623 > (unsigned int)
6624 (GET_MODE_PRECISION (int_mode)
6625 - GET_MODE_PRECISION (inner_mode))))
6626 {
6627 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6628 extend_op = SIGN_EXTEND;
6629 m = inner_mode;
6630 }
6631 else if (GET_CODE (t) == SIGN_EXTEND
6632 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6633 && (GET_CODE (XEXP (t, 0)) == PLUS
6634 || GET_CODE (XEXP (t, 0)) == IOR
6635 || GET_CODE (XEXP (t, 0)) == XOR)
6636 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6637 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6638 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6639 && (num_sign_bit_copies (f, GET_MODE (f))
6640 > (unsigned int)
6641 (GET_MODE_PRECISION (int_mode)
6642 - GET_MODE_PRECISION (inner_mode))))
6643 {
6644 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6645 extend_op = SIGN_EXTEND;
6646 m = inner_mode;
6647 }
6648 else if (GET_CODE (t) == ZERO_EXTEND
6649 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6650 && (GET_CODE (XEXP (t, 0)) == PLUS
6651 || GET_CODE (XEXP (t, 0)) == MINUS
6652 || GET_CODE (XEXP (t, 0)) == IOR
6653 || GET_CODE (XEXP (t, 0)) == XOR
6654 || GET_CODE (XEXP (t, 0)) == ASHIFT
6655 || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6656 || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6657 && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6658 && HWI_COMPUTABLE_MODE_P (int_mode)
6659 && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6660 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6661 && ((nonzero_bits (f, GET_MODE (f))
6662 & ~GET_MODE_MASK (inner_mode))
6663 == 0))
6664 {
6665 c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6666 extend_op = ZERO_EXTEND;
6667 m = inner_mode;
6668 }
6669 else if (GET_CODE (t) == ZERO_EXTEND
6670 && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6671 && (GET_CODE (XEXP (t, 0)) == PLUS
6672 || GET_CODE (XEXP (t, 0)) == IOR
6673 || GET_CODE (XEXP (t, 0)) == XOR)
6674 && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6675 && HWI_COMPUTABLE_MODE_P (int_mode)
6676 && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6677 && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6678 && ((nonzero_bits (f, GET_MODE (f))
6679 & ~GET_MODE_MASK (inner_mode))
6680 == 0))
6681 {
6682 c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6683 extend_op = ZERO_EXTEND;
6684 m = inner_mode;
6685 }
6686
6687 if (z)
6688 {
6689 machine_mode cm = m;
6690 if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6691 && GET_MODE (c1) != VOIDmode)
6692 cm = GET_MODE (c1);
6693 temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6694 cond_op0, cond_op1),
6695 pc_rtx, pc_rtx, 0, 0, 0);
6696 temp = simplify_gen_binary (MULT, cm, temp,
6697 simplify_gen_binary (MULT, cm, c1,
6698 const_true_rtx));
6699 temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6700 temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6701
6702 if (extend_op != UNKNOWN)
6703 temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6704
6705 return temp;
6706 }
6707 }
6708
6709 /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6710 1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6711 negation of a single bit, we can convert this operation to a shift. We
6712 can actually do this more generally, but it doesn't seem worth it. */
6713
6714 if (true_code == NE
6715 && is_a <scalar_int_mode> (mode, &int_mode)
6716 && XEXP (cond, 1) == const0_rtx
6717 && false_rtx == const0_rtx
6718 && CONST_INT_P (true_rtx)
6719 && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6720 && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6721 || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6722 == GET_MODE_PRECISION (int_mode))
6723 && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6724 return
6725 simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6726 gen_lowpart (int_mode, XEXP (cond, 0)), i);
6727
6728 /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6729 non-zero bit in A is C1. */
6730 if (true_code == NE && XEXP (cond, 1) == const0_rtx
6731 && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6732 && is_a <scalar_int_mode> (mode, &int_mode)
6733 && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6734 && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6735 == nonzero_bits (XEXP (cond, 0), inner_mode)
6736 && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6737 {
6738 rtx val = XEXP (cond, 0);
6739 if (inner_mode == int_mode)
6740 return val;
6741 else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6742 return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6743 }
6744
6745 return x;
6746 }
6747
6748 /* Simplify X, a SET expression. Return the new expression. */
6749
6750 static rtx
simplify_set(rtx x)6751 simplify_set (rtx x)
6752 {
6753 rtx src = SET_SRC (x);
6754 rtx dest = SET_DEST (x);
6755 machine_mode mode
6756 = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6757 rtx_insn *other_insn;
6758 rtx *cc_use;
6759 scalar_int_mode int_mode;
6760
6761 /* (set (pc) (return)) gets written as (return). */
6762 if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6763 return src;
6764
6765 /* Now that we know for sure which bits of SRC we are using, see if we can
6766 simplify the expression for the object knowing that we only need the
6767 low-order bits. */
6768
6769 if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6770 {
6771 src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6772 SUBST (SET_SRC (x), src);
6773 }
6774
6775 /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6776 the comparison result and try to simplify it unless we already have used
6777 undobuf.other_insn. */
6778 if ((GET_MODE_CLASS (mode) == MODE_CC
6779 || GET_CODE (src) == COMPARE
6780 || CC0_P (dest))
6781 && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6782 && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6783 && COMPARISON_P (*cc_use)
6784 && rtx_equal_p (XEXP (*cc_use, 0), dest))
6785 {
6786 enum rtx_code old_code = GET_CODE (*cc_use);
6787 enum rtx_code new_code;
6788 rtx op0, op1, tmp;
6789 int other_changed = 0;
6790 rtx inner_compare = NULL_RTX;
6791 machine_mode compare_mode = GET_MODE (dest);
6792
6793 if (GET_CODE (src) == COMPARE)
6794 {
6795 op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6796 if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6797 {
6798 inner_compare = op0;
6799 op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6800 }
6801 }
6802 else
6803 op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6804
6805 tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6806 op0, op1);
6807 if (!tmp)
6808 new_code = old_code;
6809 else if (!CONSTANT_P (tmp))
6810 {
6811 new_code = GET_CODE (tmp);
6812 op0 = XEXP (tmp, 0);
6813 op1 = XEXP (tmp, 1);
6814 }
6815 else
6816 {
6817 rtx pat = PATTERN (other_insn);
6818 undobuf.other_insn = other_insn;
6819 SUBST (*cc_use, tmp);
6820
6821 /* Attempt to simplify CC user. */
6822 if (GET_CODE (pat) == SET)
6823 {
6824 rtx new_rtx = simplify_rtx (SET_SRC (pat));
6825 if (new_rtx != NULL_RTX)
6826 SUBST (SET_SRC (pat), new_rtx);
6827 }
6828
6829 /* Convert X into a no-op move. */
6830 SUBST (SET_DEST (x), pc_rtx);
6831 SUBST (SET_SRC (x), pc_rtx);
6832 return x;
6833 }
6834
6835 /* Simplify our comparison, if possible. */
6836 new_code = simplify_comparison (new_code, &op0, &op1);
6837
6838 #ifdef SELECT_CC_MODE
6839 /* If this machine has CC modes other than CCmode, check to see if we
6840 need to use a different CC mode here. */
6841 if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6842 compare_mode = GET_MODE (op0);
6843 else if (inner_compare
6844 && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6845 && new_code == old_code
6846 && op0 == XEXP (inner_compare, 0)
6847 && op1 == XEXP (inner_compare, 1))
6848 compare_mode = GET_MODE (inner_compare);
6849 else
6850 compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6851
6852 /* If the mode changed, we have to change SET_DEST, the mode in the
6853 compare, and the mode in the place SET_DEST is used. If SET_DEST is
6854 a hard register, just build new versions with the proper mode. If it
6855 is a pseudo, we lose unless it is only time we set the pseudo, in
6856 which case we can safely change its mode. */
6857 if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6858 {
6859 if (can_change_dest_mode (dest, 0, compare_mode))
6860 {
6861 unsigned int regno = REGNO (dest);
6862 rtx new_dest;
6863
6864 if (regno < FIRST_PSEUDO_REGISTER)
6865 new_dest = gen_rtx_REG (compare_mode, regno);
6866 else
6867 {
6868 SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6869 new_dest = regno_reg_rtx[regno];
6870 }
6871
6872 SUBST (SET_DEST (x), new_dest);
6873 SUBST (XEXP (*cc_use, 0), new_dest);
6874 other_changed = 1;
6875
6876 dest = new_dest;
6877 }
6878 }
6879 #endif /* SELECT_CC_MODE */
6880
6881 /* If the code changed, we have to build a new comparison in
6882 undobuf.other_insn. */
6883 if (new_code != old_code)
6884 {
6885 int other_changed_previously = other_changed;
6886 unsigned HOST_WIDE_INT mask;
6887 rtx old_cc_use = *cc_use;
6888
6889 SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6890 dest, const0_rtx));
6891 other_changed = 1;
6892
6893 /* If the only change we made was to change an EQ into an NE or
6894 vice versa, OP0 has only one bit that might be nonzero, and OP1
6895 is zero, check if changing the user of the condition code will
6896 produce a valid insn. If it won't, we can keep the original code
6897 in that insn by surrounding our operation with an XOR. */
6898
6899 if (((old_code == NE && new_code == EQ)
6900 || (old_code == EQ && new_code == NE))
6901 && ! other_changed_previously && op1 == const0_rtx
6902 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
6903 && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
6904 {
6905 rtx pat = PATTERN (other_insn), note = 0;
6906
6907 if ((recog_for_combine (&pat, other_insn, ¬e) < 0
6908 && ! check_asm_operands (pat)))
6909 {
6910 *cc_use = old_cc_use;
6911 other_changed = 0;
6912
6913 op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
6914 gen_int_mode (mask,
6915 GET_MODE (op0)));
6916 }
6917 }
6918 }
6919
6920 if (other_changed)
6921 undobuf.other_insn = other_insn;
6922
6923 /* Don't generate a compare of a CC with 0, just use that CC. */
6924 if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
6925 {
6926 SUBST (SET_SRC (x), op0);
6927 src = SET_SRC (x);
6928 }
6929 /* Otherwise, if we didn't previously have the same COMPARE we
6930 want, create it from scratch. */
6931 else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
6932 || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
6933 {
6934 SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
6935 src = SET_SRC (x);
6936 }
6937 }
6938 else
6939 {
6940 /* Get SET_SRC in a form where we have placed back any
6941 compound expressions. Then do the checks below. */
6942 src = make_compound_operation (src, SET);
6943 SUBST (SET_SRC (x), src);
6944 }
6945
6946 /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
6947 and X being a REG or (subreg (reg)), we may be able to convert this to
6948 (set (subreg:m2 x) (op)).
6949
6950 We can always do this if M1 is narrower than M2 because that means that
6951 we only care about the low bits of the result.
6952
6953 However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
6954 perform a narrower operation than requested since the high-order bits will
6955 be undefined. On machine where it is defined, this transformation is safe
6956 as long as M1 and M2 have the same number of words. */
6957
6958 if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
6959 && !OBJECT_P (SUBREG_REG (src))
6960 && (known_equal_after_align_up
6961 (GET_MODE_SIZE (GET_MODE (src)),
6962 GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
6963 UNITS_PER_WORD))
6964 && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
6965 && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
6966 && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
6967 GET_MODE (SUBREG_REG (src)),
6968 GET_MODE (src)))
6969 && (REG_P (dest)
6970 || (GET_CODE (dest) == SUBREG
6971 && REG_P (SUBREG_REG (dest)))))
6972 {
6973 SUBST (SET_DEST (x),
6974 gen_lowpart (GET_MODE (SUBREG_REG (src)),
6975 dest));
6976 SUBST (SET_SRC (x), SUBREG_REG (src));
6977
6978 src = SET_SRC (x), dest = SET_DEST (x);
6979 }
6980
6981 /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
6982 in SRC. */
6983 if (dest == cc0_rtx
6984 && partial_subreg_p (src)
6985 && subreg_lowpart_p (src))
6986 {
6987 rtx inner = SUBREG_REG (src);
6988 machine_mode inner_mode = GET_MODE (inner);
6989
6990 /* Here we make sure that we don't have a sign bit on. */
6991 if (val_signbit_known_clear_p (GET_MODE (src),
6992 nonzero_bits (inner, inner_mode)))
6993 {
6994 SUBST (SET_SRC (x), inner);
6995 src = SET_SRC (x);
6996 }
6997 }
6998
6999 /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7000 would require a paradoxical subreg. Replace the subreg with a
7001 zero_extend to avoid the reload that would otherwise be required.
7002 Don't do this unless we have a scalar integer mode, otherwise the
7003 transformation is incorrect. */
7004
7005 enum rtx_code extend_op;
7006 if (paradoxical_subreg_p (src)
7007 && MEM_P (SUBREG_REG (src))
7008 && SCALAR_INT_MODE_P (GET_MODE (src))
7009 && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7010 {
7011 SUBST (SET_SRC (x),
7012 gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7013
7014 src = SET_SRC (x);
7015 }
7016
7017 /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7018 are comparing an item known to be 0 or -1 against 0, use a logical
7019 operation instead. Check for one of the arms being an IOR of the other
7020 arm with some value. We compute three terms to be IOR'ed together. In
7021 practice, at most two will be nonzero. Then we do the IOR's. */
7022
7023 if (GET_CODE (dest) != PC
7024 && GET_CODE (src) == IF_THEN_ELSE
7025 && is_int_mode (GET_MODE (src), &int_mode)
7026 && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7027 && XEXP (XEXP (src, 0), 1) == const0_rtx
7028 && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7029 && (!HAVE_conditional_move
7030 || ! can_conditionally_move_p (int_mode))
7031 && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7032 == GET_MODE_PRECISION (int_mode))
7033 && ! side_effects_p (src))
7034 {
7035 rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7036 ? XEXP (src, 1) : XEXP (src, 2));
7037 rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7038 ? XEXP (src, 2) : XEXP (src, 1));
7039 rtx term1 = const0_rtx, term2, term3;
7040
7041 if (GET_CODE (true_rtx) == IOR
7042 && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7043 term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7044 else if (GET_CODE (true_rtx) == IOR
7045 && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7046 term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7047 else if (GET_CODE (false_rtx) == IOR
7048 && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7049 term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7050 else if (GET_CODE (false_rtx) == IOR
7051 && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7052 term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7053
7054 term2 = simplify_gen_binary (AND, int_mode,
7055 XEXP (XEXP (src, 0), 0), true_rtx);
7056 term3 = simplify_gen_binary (AND, int_mode,
7057 simplify_gen_unary (NOT, int_mode,
7058 XEXP (XEXP (src, 0), 0),
7059 int_mode),
7060 false_rtx);
7061
7062 SUBST (SET_SRC (x),
7063 simplify_gen_binary (IOR, int_mode,
7064 simplify_gen_binary (IOR, int_mode,
7065 term1, term2),
7066 term3));
7067
7068 src = SET_SRC (x);
7069 }
7070
7071 /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7072 whole thing fail. */
7073 if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7074 return src;
7075 else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7076 return dest;
7077 else
7078 /* Convert this into a field assignment operation, if possible. */
7079 return make_field_assignment (x);
7080 }
7081
7082 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7083 result. */
7084
7085 static rtx
simplify_logical(rtx x)7086 simplify_logical (rtx x)
7087 {
7088 rtx op0 = XEXP (x, 0);
7089 rtx op1 = XEXP (x, 1);
7090 scalar_int_mode mode;
7091
7092 switch (GET_CODE (x))
7093 {
7094 case AND:
7095 /* We can call simplify_and_const_int only if we don't lose
7096 any (sign) bits when converting INTVAL (op1) to
7097 "unsigned HOST_WIDE_INT". */
7098 if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7099 && CONST_INT_P (op1)
7100 && (HWI_COMPUTABLE_MODE_P (mode)
7101 || INTVAL (op1) > 0))
7102 {
7103 x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7104 if (GET_CODE (x) != AND)
7105 return x;
7106
7107 op0 = XEXP (x, 0);
7108 op1 = XEXP (x, 1);
7109 }
7110
7111 /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7112 apply the distributive law and then the inverse distributive
7113 law to see if things simplify. */
7114 if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7115 {
7116 rtx result = distribute_and_simplify_rtx (x, 0);
7117 if (result)
7118 return result;
7119 }
7120 if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7121 {
7122 rtx result = distribute_and_simplify_rtx (x, 1);
7123 if (result)
7124 return result;
7125 }
7126 break;
7127
7128 case IOR:
7129 /* If we have (ior (and A B) C), apply the distributive law and then
7130 the inverse distributive law to see if things simplify. */
7131
7132 if (GET_CODE (op0) == AND)
7133 {
7134 rtx result = distribute_and_simplify_rtx (x, 0);
7135 if (result)
7136 return result;
7137 }
7138
7139 if (GET_CODE (op1) == AND)
7140 {
7141 rtx result = distribute_and_simplify_rtx (x, 1);
7142 if (result)
7143 return result;
7144 }
7145 break;
7146
7147 default:
7148 gcc_unreachable ();
7149 }
7150
7151 return x;
7152 }
7153
7154 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7155 operations" because they can be replaced with two more basic operations.
7156 ZERO_EXTEND is also considered "compound" because it can be replaced with
7157 an AND operation, which is simpler, though only one operation.
7158
7159 The function expand_compound_operation is called with an rtx expression
7160 and will convert it to the appropriate shifts and AND operations,
7161 simplifying at each stage.
7162
7163 The function make_compound_operation is called to convert an expression
7164 consisting of shifts and ANDs into the equivalent compound expression.
7165 It is the inverse of this function, loosely speaking. */
7166
7167 static rtx
expand_compound_operation(rtx x)7168 expand_compound_operation (rtx x)
7169 {
7170 unsigned HOST_WIDE_INT pos = 0, len;
7171 int unsignedp = 0;
7172 unsigned int modewidth;
7173 rtx tem;
7174 scalar_int_mode inner_mode;
7175
7176 switch (GET_CODE (x))
7177 {
7178 case ZERO_EXTEND:
7179 unsignedp = 1;
7180 /* FALLTHRU */
7181 case SIGN_EXTEND:
7182 /* We can't necessarily use a const_int for a multiword mode;
7183 it depends on implicitly extending the value.
7184 Since we don't know the right way to extend it,
7185 we can't tell whether the implicit way is right.
7186
7187 Even for a mode that is no wider than a const_int,
7188 we can't win, because we need to sign extend one of its bits through
7189 the rest of it, and we don't know which bit. */
7190 if (CONST_INT_P (XEXP (x, 0)))
7191 return x;
7192
7193 /* Reject modes that aren't scalar integers because turning vector
7194 or complex modes into shifts causes problems. */
7195 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7196 return x;
7197
7198 /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7199 (zero_extend:MODE FROM) or (sign_extend:MODE FROM). It is for any MEM
7200 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7201 reloaded. If not for that, MEM's would very rarely be safe.
7202
7203 Reject modes bigger than a word, because we might not be able
7204 to reference a two-register group starting with an arbitrary register
7205 (and currently gen_lowpart might crash for a SUBREG). */
7206
7207 if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7208 return x;
7209
7210 len = GET_MODE_PRECISION (inner_mode);
7211 /* If the inner object has VOIDmode (the only way this can happen
7212 is if it is an ASM_OPERANDS), we can't do anything since we don't
7213 know how much masking to do. */
7214 if (len == 0)
7215 return x;
7216
7217 break;
7218
7219 case ZERO_EXTRACT:
7220 unsignedp = 1;
7221
7222 /* fall through */
7223
7224 case SIGN_EXTRACT:
7225 /* If the operand is a CLOBBER, just return it. */
7226 if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7227 return XEXP (x, 0);
7228
7229 if (!CONST_INT_P (XEXP (x, 1))
7230 || !CONST_INT_P (XEXP (x, 2)))
7231 return x;
7232
7233 /* Reject modes that aren't scalar integers because turning vector
7234 or complex modes into shifts causes problems. */
7235 if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7236 return x;
7237
7238 len = INTVAL (XEXP (x, 1));
7239 pos = INTVAL (XEXP (x, 2));
7240
7241 /* This should stay within the object being extracted, fail otherwise. */
7242 if (len + pos > GET_MODE_PRECISION (inner_mode))
7243 return x;
7244
7245 if (BITS_BIG_ENDIAN)
7246 pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7247
7248 break;
7249
7250 default:
7251 return x;
7252 }
7253
7254 /* We've rejected non-scalar operations by now. */
7255 scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7256
7257 /* Convert sign extension to zero extension, if we know that the high
7258 bit is not set, as this is easier to optimize. It will be converted
7259 back to cheaper alternative in make_extraction. */
7260 if (GET_CODE (x) == SIGN_EXTEND
7261 && HWI_COMPUTABLE_MODE_P (mode)
7262 && ((nonzero_bits (XEXP (x, 0), inner_mode)
7263 & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7264 == 0))
7265 {
7266 rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7267 rtx temp2 = expand_compound_operation (temp);
7268
7269 /* Make sure this is a profitable operation. */
7270 if (set_src_cost (x, mode, optimize_this_for_speed_p)
7271 > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7272 return temp2;
7273 else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7274 > set_src_cost (temp, mode, optimize_this_for_speed_p))
7275 return temp;
7276 else
7277 return x;
7278 }
7279
7280 /* We can optimize some special cases of ZERO_EXTEND. */
7281 if (GET_CODE (x) == ZERO_EXTEND)
7282 {
7283 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7284 know that the last value didn't have any inappropriate bits
7285 set. */
7286 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7287 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7288 && HWI_COMPUTABLE_MODE_P (mode)
7289 && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7290 & ~GET_MODE_MASK (inner_mode)) == 0)
7291 return XEXP (XEXP (x, 0), 0);
7292
7293 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7294 if (GET_CODE (XEXP (x, 0)) == SUBREG
7295 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7296 && subreg_lowpart_p (XEXP (x, 0))
7297 && HWI_COMPUTABLE_MODE_P (mode)
7298 && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7299 & ~GET_MODE_MASK (inner_mode)) == 0)
7300 return SUBREG_REG (XEXP (x, 0));
7301
7302 /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7303 is a comparison and STORE_FLAG_VALUE permits. This is like
7304 the first case, but it works even when MODE is larger
7305 than HOST_WIDE_INT. */
7306 if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7307 && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7308 && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7309 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7310 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7311 return XEXP (XEXP (x, 0), 0);
7312
7313 /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)). */
7314 if (GET_CODE (XEXP (x, 0)) == SUBREG
7315 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7316 && subreg_lowpart_p (XEXP (x, 0))
7317 && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7318 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7319 && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7320 return SUBREG_REG (XEXP (x, 0));
7321
7322 }
7323
7324 /* If we reach here, we want to return a pair of shifts. The inner
7325 shift is a left shift of BITSIZE - POS - LEN bits. The outer
7326 shift is a right shift of BITSIZE - LEN bits. It is arithmetic or
7327 logical depending on the value of UNSIGNEDP.
7328
7329 If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7330 converted into an AND of a shift.
7331
7332 We must check for the case where the left shift would have a negative
7333 count. This can happen in a case like (x >> 31) & 255 on machines
7334 that can't shift by a constant. On those machines, we would first
7335 combine the shift with the AND to produce a variable-position
7336 extraction. Then the constant of 31 would be substituted in
7337 to produce such a position. */
7338
7339 modewidth = GET_MODE_PRECISION (mode);
7340 if (modewidth >= pos + len)
7341 {
7342 tem = gen_lowpart (mode, XEXP (x, 0));
7343 if (!tem || GET_CODE (tem) == CLOBBER)
7344 return x;
7345 tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7346 tem, modewidth - pos - len);
7347 tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7348 mode, tem, modewidth - len);
7349 }
7350 else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7351 tem = simplify_and_const_int (NULL_RTX, mode,
7352 simplify_shift_const (NULL_RTX, LSHIFTRT,
7353 mode, XEXP (x, 0),
7354 pos),
7355 (HOST_WIDE_INT_1U << len) - 1);
7356 else
7357 /* Any other cases we can't handle. */
7358 return x;
7359
7360 /* If we couldn't do this for some reason, return the original
7361 expression. */
7362 if (GET_CODE (tem) == CLOBBER)
7363 return x;
7364
7365 return tem;
7366 }
7367
7368 /* X is a SET which contains an assignment of one object into
7369 a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7370 or certain SUBREGS). If possible, convert it into a series of
7371 logical operations.
7372
7373 We half-heartedly support variable positions, but do not at all
7374 support variable lengths. */
7375
7376 static const_rtx
expand_field_assignment(const_rtx x)7377 expand_field_assignment (const_rtx x)
7378 {
7379 rtx inner;
7380 rtx pos; /* Always counts from low bit. */
7381 int len, inner_len;
7382 rtx mask, cleared, masked;
7383 scalar_int_mode compute_mode;
7384
7385 /* Loop until we find something we can't simplify. */
7386 while (1)
7387 {
7388 if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7389 && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7390 {
7391 rtx x0 = XEXP (SET_DEST (x), 0);
7392 if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7393 break;
7394 inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7395 pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7396 MAX_MODE_INT);
7397 }
7398 else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7399 && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7400 {
7401 inner = XEXP (SET_DEST (x), 0);
7402 if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7403 break;
7404
7405 len = INTVAL (XEXP (SET_DEST (x), 1));
7406 pos = XEXP (SET_DEST (x), 2);
7407
7408 /* A constant position should stay within the width of INNER. */
7409 if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7410 break;
7411
7412 if (BITS_BIG_ENDIAN)
7413 {
7414 if (CONST_INT_P (pos))
7415 pos = GEN_INT (inner_len - len - INTVAL (pos));
7416 else if (GET_CODE (pos) == MINUS
7417 && CONST_INT_P (XEXP (pos, 1))
7418 && INTVAL (XEXP (pos, 1)) == inner_len - len)
7419 /* If position is ADJUST - X, new position is X. */
7420 pos = XEXP (pos, 0);
7421 else
7422 pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7423 gen_int_mode (inner_len - len,
7424 GET_MODE (pos)),
7425 pos);
7426 }
7427 }
7428
7429 /* If the destination is a subreg that overwrites the whole of the inner
7430 register, we can move the subreg to the source. */
7431 else if (GET_CODE (SET_DEST (x)) == SUBREG
7432 /* We need SUBREGs to compute nonzero_bits properly. */
7433 && nonzero_sign_valid
7434 && !read_modify_subreg_p (SET_DEST (x)))
7435 {
7436 x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7437 gen_lowpart
7438 (GET_MODE (SUBREG_REG (SET_DEST (x))),
7439 SET_SRC (x)));
7440 continue;
7441 }
7442 else
7443 break;
7444
7445 while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7446 inner = SUBREG_REG (inner);
7447
7448 /* Don't attempt bitwise arithmetic on non scalar integer modes. */
7449 if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7450 {
7451 /* Don't do anything for vector or complex integral types. */
7452 if (! FLOAT_MODE_P (GET_MODE (inner)))
7453 break;
7454
7455 /* Try to find an integral mode to pun with. */
7456 if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7457 .exists (&compute_mode))
7458 break;
7459
7460 inner = gen_lowpart (compute_mode, inner);
7461 }
7462
7463 /* Compute a mask of LEN bits, if we can do this on the host machine. */
7464 if (len >= HOST_BITS_PER_WIDE_INT)
7465 break;
7466
7467 /* Don't try to compute in too wide unsupported modes. */
7468 if (!targetm.scalar_mode_supported_p (compute_mode))
7469 break;
7470
7471 /* Now compute the equivalent expression. Make a copy of INNER
7472 for the SET_DEST in case it is a MEM into which we will substitute;
7473 we don't want shared RTL in that case. */
7474 mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7475 compute_mode);
7476 cleared = simplify_gen_binary (AND, compute_mode,
7477 simplify_gen_unary (NOT, compute_mode,
7478 simplify_gen_binary (ASHIFT,
7479 compute_mode,
7480 mask, pos),
7481 compute_mode),
7482 inner);
7483 masked = simplify_gen_binary (ASHIFT, compute_mode,
7484 simplify_gen_binary (
7485 AND, compute_mode,
7486 gen_lowpart (compute_mode, SET_SRC (x)),
7487 mask),
7488 pos);
7489
7490 x = gen_rtx_SET (copy_rtx (inner),
7491 simplify_gen_binary (IOR, compute_mode,
7492 cleared, masked));
7493 }
7494
7495 return x;
7496 }
7497
7498 /* Return an RTX for a reference to LEN bits of INNER. If POS_RTX is nonzero,
7499 it is an RTX that represents the (variable) starting position; otherwise,
7500 POS is the (constant) starting bit position. Both are counted from the LSB.
7501
7502 UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7503
7504 IN_DEST is nonzero if this is a reference in the destination of a SET.
7505 This is used when a ZERO_ or SIGN_EXTRACT isn't needed. If nonzero,
7506 a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7507 be used.
7508
7509 IN_COMPARE is nonzero if we are in a COMPARE. This means that a
7510 ZERO_EXTRACT should be built even for bits starting at bit 0.
7511
7512 MODE is the desired mode of the result (if IN_DEST == 0).
7513
7514 The result is an RTX for the extraction or NULL_RTX if the target
7515 can't handle it. */
7516
7517 static rtx
make_extraction(machine_mode mode,rtx inner,HOST_WIDE_INT pos,rtx pos_rtx,unsigned HOST_WIDE_INT len,int unsignedp,int in_dest,int in_compare)7518 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7519 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7520 int in_dest, int in_compare)
7521 {
7522 /* This mode describes the size of the storage area
7523 to fetch the overall value from. Within that, we
7524 ignore the POS lowest bits, etc. */
7525 machine_mode is_mode = GET_MODE (inner);
7526 machine_mode inner_mode;
7527 scalar_int_mode wanted_inner_mode;
7528 scalar_int_mode wanted_inner_reg_mode = word_mode;
7529 scalar_int_mode pos_mode = word_mode;
7530 machine_mode extraction_mode = word_mode;
7531 rtx new_rtx = 0;
7532 rtx orig_pos_rtx = pos_rtx;
7533 HOST_WIDE_INT orig_pos;
7534
7535 if (pos_rtx && CONST_INT_P (pos_rtx))
7536 pos = INTVAL (pos_rtx), pos_rtx = 0;
7537
7538 if (GET_CODE (inner) == SUBREG
7539 && subreg_lowpart_p (inner)
7540 && (paradoxical_subreg_p (inner)
7541 /* If trying or potentionally trying to extract
7542 bits outside of is_mode, don't look through
7543 non-paradoxical SUBREGs. See PR82192. */
7544 || (pos_rtx == NULL_RTX
7545 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7546 {
7547 /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7548 consider just the QI as the memory to extract from.
7549 The subreg adds or removes high bits; its mode is
7550 irrelevant to the meaning of this extraction,
7551 since POS and LEN count from the lsb. */
7552 if (MEM_P (SUBREG_REG (inner)))
7553 is_mode = GET_MODE (SUBREG_REG (inner));
7554 inner = SUBREG_REG (inner);
7555 }
7556 else if (GET_CODE (inner) == ASHIFT
7557 && CONST_INT_P (XEXP (inner, 1))
7558 && pos_rtx == 0 && pos == 0
7559 && len > UINTVAL (XEXP (inner, 1)))
7560 {
7561 /* We're extracting the least significant bits of an rtx
7562 (ashift X (const_int C)), where LEN > C. Extract the
7563 least significant (LEN - C) bits of X, giving an rtx
7564 whose mode is MODE, then shift it left C times. */
7565 new_rtx = make_extraction (mode, XEXP (inner, 0),
7566 0, 0, len - INTVAL (XEXP (inner, 1)),
7567 unsignedp, in_dest, in_compare);
7568 if (new_rtx != 0)
7569 return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7570 }
7571 else if (GET_CODE (inner) == TRUNCATE
7572 /* If trying or potentionally trying to extract
7573 bits outside of is_mode, don't look through
7574 TRUNCATE. See PR82192. */
7575 && pos_rtx == NULL_RTX
7576 && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7577 inner = XEXP (inner, 0);
7578
7579 inner_mode = GET_MODE (inner);
7580
7581 /* See if this can be done without an extraction. We never can if the
7582 width of the field is not the same as that of some integer mode. For
7583 registers, we can only avoid the extraction if the position is at the
7584 low-order bit and this is either not in the destination or we have the
7585 appropriate STRICT_LOW_PART operation available.
7586
7587 For MEM, we can avoid an extract if the field starts on an appropriate
7588 boundary and we can change the mode of the memory reference. */
7589
7590 scalar_int_mode tmode;
7591 if (int_mode_for_size (len, 1).exists (&tmode)
7592 && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7593 && !MEM_P (inner)
7594 && (pos == 0 || REG_P (inner))
7595 && (inner_mode == tmode
7596 || !REG_P (inner)
7597 || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7598 || reg_truncated_to_mode (tmode, inner))
7599 && (! in_dest
7600 || (REG_P (inner)
7601 && have_insn_for (STRICT_LOW_PART, tmode))))
7602 || (MEM_P (inner) && pos_rtx == 0
7603 && (pos
7604 % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7605 : BITS_PER_UNIT)) == 0
7606 /* We can't do this if we are widening INNER_MODE (it
7607 may not be aligned, for one thing). */
7608 && !paradoxical_subreg_p (tmode, inner_mode)
7609 && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7610 && (inner_mode == tmode
7611 || (! mode_dependent_address_p (XEXP (inner, 0),
7612 MEM_ADDR_SPACE (inner))
7613 && ! MEM_VOLATILE_P (inner))))))
7614 {
7615 /* If INNER is a MEM, make a new MEM that encompasses just the desired
7616 field. If the original and current mode are the same, we need not
7617 adjust the offset. Otherwise, we do if bytes big endian.
7618
7619 If INNER is not a MEM, get a piece consisting of just the field
7620 of interest (in this case POS % BITS_PER_WORD must be 0). */
7621
7622 if (MEM_P (inner))
7623 {
7624 poly_int64 offset;
7625
7626 /* POS counts from lsb, but make OFFSET count in memory order. */
7627 if (BYTES_BIG_ENDIAN)
7628 offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7629 - len - pos);
7630 else
7631 offset = pos / BITS_PER_UNIT;
7632
7633 new_rtx = adjust_address_nv (inner, tmode, offset);
7634 }
7635 else if (REG_P (inner))
7636 {
7637 if (tmode != inner_mode)
7638 {
7639 /* We can't call gen_lowpart in a DEST since we
7640 always want a SUBREG (see below) and it would sometimes
7641 return a new hard register. */
7642 if (pos || in_dest)
7643 {
7644 poly_uint64 offset
7645 = subreg_offset_from_lsb (tmode, inner_mode, pos);
7646
7647 /* Avoid creating invalid subregs, for example when
7648 simplifying (x>>32)&255. */
7649 if (!validate_subreg (tmode, inner_mode, inner, offset))
7650 return NULL_RTX;
7651
7652 new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7653 }
7654 else
7655 new_rtx = gen_lowpart (tmode, inner);
7656 }
7657 else
7658 new_rtx = inner;
7659 }
7660 else
7661 new_rtx = force_to_mode (inner, tmode,
7662 len >= HOST_BITS_PER_WIDE_INT
7663 ? HOST_WIDE_INT_M1U
7664 : (HOST_WIDE_INT_1U << len) - 1, 0);
7665
7666 /* If this extraction is going into the destination of a SET,
7667 make a STRICT_LOW_PART unless we made a MEM. */
7668
7669 if (in_dest)
7670 return (MEM_P (new_rtx) ? new_rtx
7671 : (GET_CODE (new_rtx) != SUBREG
7672 ? gen_rtx_CLOBBER (tmode, const0_rtx)
7673 : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7674
7675 if (mode == tmode)
7676 return new_rtx;
7677
7678 if (CONST_SCALAR_INT_P (new_rtx))
7679 return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7680 mode, new_rtx, tmode);
7681
7682 /* If we know that no extraneous bits are set, and that the high
7683 bit is not set, convert the extraction to the cheaper of
7684 sign and zero extension, that are equivalent in these cases. */
7685 if (flag_expensive_optimizations
7686 && (HWI_COMPUTABLE_MODE_P (tmode)
7687 && ((nonzero_bits (new_rtx, tmode)
7688 & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7689 == 0)))
7690 {
7691 rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7692 rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7693
7694 /* Prefer ZERO_EXTENSION, since it gives more information to
7695 backends. */
7696 if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7697 <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7698 return temp;
7699 return temp1;
7700 }
7701
7702 /* Otherwise, sign- or zero-extend unless we already are in the
7703 proper mode. */
7704
7705 return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7706 mode, new_rtx));
7707 }
7708
7709 /* Unless this is a COMPARE or we have a funny memory reference,
7710 don't do anything with zero-extending field extracts starting at
7711 the low-order bit since they are simple AND operations. */
7712 if (pos_rtx == 0 && pos == 0 && ! in_dest
7713 && ! in_compare && unsignedp)
7714 return 0;
7715
7716 /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7717 if the position is not a constant and the length is not 1. In all
7718 other cases, we would only be going outside our object in cases when
7719 an original shift would have been undefined. */
7720 if (MEM_P (inner)
7721 && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7722 || (pos_rtx != 0 && len != 1)))
7723 return 0;
7724
7725 enum extraction_pattern pattern = (in_dest ? EP_insv
7726 : unsignedp ? EP_extzv : EP_extv);
7727
7728 /* If INNER is not from memory, we want it to have the mode of a register
7729 extraction pattern's structure operand, or word_mode if there is no
7730 such pattern. The same applies to extraction_mode and pos_mode
7731 and their respective operands.
7732
7733 For memory, assume that the desired extraction_mode and pos_mode
7734 are the same as for a register operation, since at present we don't
7735 have named patterns for aligned memory structures. */
7736 struct extraction_insn insn;
7737 unsigned int inner_size;
7738 if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7739 && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7740 {
7741 wanted_inner_reg_mode = insn.struct_mode.require ();
7742 pos_mode = insn.pos_mode;
7743 extraction_mode = insn.field_mode;
7744 }
7745
7746 /* Never narrow an object, since that might not be safe. */
7747
7748 if (mode != VOIDmode
7749 && partial_subreg_p (extraction_mode, mode))
7750 extraction_mode = mode;
7751
7752 /* Punt if len is too large for extraction_mode. */
7753 if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7754 return NULL_RTX;
7755
7756 if (!MEM_P (inner))
7757 wanted_inner_mode = wanted_inner_reg_mode;
7758 else
7759 {
7760 /* Be careful not to go beyond the extracted object and maintain the
7761 natural alignment of the memory. */
7762 wanted_inner_mode = smallest_int_mode_for_size (len);
7763 while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7764 > GET_MODE_BITSIZE (wanted_inner_mode))
7765 wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7766 }
7767
7768 orig_pos = pos;
7769
7770 if (BITS_BIG_ENDIAN)
7771 {
7772 /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7773 BITS_BIG_ENDIAN style. If position is constant, compute new
7774 position. Otherwise, build subtraction.
7775 Note that POS is relative to the mode of the original argument.
7776 If it's a MEM we need to recompute POS relative to that.
7777 However, if we're extracting from (or inserting into) a register,
7778 we want to recompute POS relative to wanted_inner_mode. */
7779 int width;
7780 if (!MEM_P (inner))
7781 width = GET_MODE_BITSIZE (wanted_inner_mode);
7782 else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7783 return NULL_RTX;
7784
7785 if (pos_rtx == 0)
7786 pos = width - len - pos;
7787 else
7788 pos_rtx
7789 = gen_rtx_MINUS (GET_MODE (pos_rtx),
7790 gen_int_mode (width - len, GET_MODE (pos_rtx)),
7791 pos_rtx);
7792 /* POS may be less than 0 now, but we check for that below.
7793 Note that it can only be less than 0 if !MEM_P (inner). */
7794 }
7795
7796 /* If INNER has a wider mode, and this is a constant extraction, try to
7797 make it smaller and adjust the byte to point to the byte containing
7798 the value. */
7799 if (wanted_inner_mode != VOIDmode
7800 && inner_mode != wanted_inner_mode
7801 && ! pos_rtx
7802 && partial_subreg_p (wanted_inner_mode, is_mode)
7803 && MEM_P (inner)
7804 && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7805 && ! MEM_VOLATILE_P (inner))
7806 {
7807 poly_int64 offset = 0;
7808
7809 /* The computations below will be correct if the machine is big
7810 endian in both bits and bytes or little endian in bits and bytes.
7811 If it is mixed, we must adjust. */
7812
7813 /* If bytes are big endian and we had a paradoxical SUBREG, we must
7814 adjust OFFSET to compensate. */
7815 if (BYTES_BIG_ENDIAN
7816 && paradoxical_subreg_p (is_mode, inner_mode))
7817 offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7818
7819 /* We can now move to the desired byte. */
7820 offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7821 * GET_MODE_SIZE (wanted_inner_mode);
7822 pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7823
7824 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7825 && is_mode != wanted_inner_mode)
7826 offset = (GET_MODE_SIZE (is_mode)
7827 - GET_MODE_SIZE (wanted_inner_mode) - offset);
7828
7829 inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7830 }
7831
7832 /* If INNER is not memory, get it into the proper mode. If we are changing
7833 its mode, POS must be a constant and smaller than the size of the new
7834 mode. */
7835 else if (!MEM_P (inner))
7836 {
7837 /* On the LHS, don't create paradoxical subregs implicitely truncating
7838 the register unless TARGET_TRULY_NOOP_TRUNCATION. */
7839 if (in_dest
7840 && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7841 wanted_inner_mode))
7842 return NULL_RTX;
7843
7844 if (GET_MODE (inner) != wanted_inner_mode
7845 && (pos_rtx != 0
7846 || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7847 return NULL_RTX;
7848
7849 if (orig_pos < 0)
7850 return NULL_RTX;
7851
7852 inner = force_to_mode (inner, wanted_inner_mode,
7853 pos_rtx
7854 || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7855 ? HOST_WIDE_INT_M1U
7856 : (((HOST_WIDE_INT_1U << len) - 1)
7857 << orig_pos),
7858 0);
7859 }
7860
7861 /* Adjust mode of POS_RTX, if needed. If we want a wider mode, we
7862 have to zero extend. Otherwise, we can just use a SUBREG.
7863
7864 We dealt with constant rtxes earlier, so pos_rtx cannot
7865 have VOIDmode at this point. */
7866 if (pos_rtx != 0
7867 && (GET_MODE_SIZE (pos_mode)
7868 > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7869 {
7870 rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7871 GET_MODE (pos_rtx));
7872
7873 /* If we know that no extraneous bits are set, and that the high
7874 bit is not set, convert extraction to cheaper one - either
7875 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7876 cases. */
7877 if (flag_expensive_optimizations
7878 && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7879 && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7880 & ~(((unsigned HOST_WIDE_INT)
7881 GET_MODE_MASK (GET_MODE (pos_rtx)))
7882 >> 1))
7883 == 0)))
7884 {
7885 rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7886 GET_MODE (pos_rtx));
7887
7888 /* Prefer ZERO_EXTENSION, since it gives more information to
7889 backends. */
7890 if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7891 < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7892 temp = temp1;
7893 }
7894 pos_rtx = temp;
7895 }
7896
7897 /* Make POS_RTX unless we already have it and it is correct. If we don't
7898 have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
7899 be a CONST_INT. */
7900 if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
7901 pos_rtx = orig_pos_rtx;
7902
7903 else if (pos_rtx == 0)
7904 pos_rtx = GEN_INT (pos);
7905
7906 /* Make the required operation. See if we can use existing rtx. */
7907 new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
7908 extraction_mode, inner, GEN_INT (len), pos_rtx);
7909 if (! in_dest)
7910 new_rtx = gen_lowpart (mode, new_rtx);
7911
7912 return new_rtx;
7913 }
7914
7915 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
7916 can be commuted with any other operations in X. Return X without
7917 that shift if so. */
7918
7919 static rtx
extract_left_shift(scalar_int_mode mode,rtx x,int count)7920 extract_left_shift (scalar_int_mode mode, rtx x, int count)
7921 {
7922 enum rtx_code code = GET_CODE (x);
7923 rtx tem;
7924
7925 switch (code)
7926 {
7927 case ASHIFT:
7928 /* This is the shift itself. If it is wide enough, we will return
7929 either the value being shifted if the shift count is equal to
7930 COUNT or a shift for the difference. */
7931 if (CONST_INT_P (XEXP (x, 1))
7932 && INTVAL (XEXP (x, 1)) >= count)
7933 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
7934 INTVAL (XEXP (x, 1)) - count);
7935 break;
7936
7937 case NEG: case NOT:
7938 if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7939 return simplify_gen_unary (code, mode, tem, mode);
7940
7941 break;
7942
7943 case PLUS: case IOR: case XOR: case AND:
7944 /* If we can safely shift this constant and we find the inner shift,
7945 make a new operation. */
7946 if (CONST_INT_P (XEXP (x, 1))
7947 && (UINTVAL (XEXP (x, 1))
7948 & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
7949 && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
7950 {
7951 HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
7952 return simplify_gen_binary (code, mode, tem,
7953 gen_int_mode (val, mode));
7954 }
7955 break;
7956
7957 default:
7958 break;
7959 }
7960
7961 return 0;
7962 }
7963
7964 /* Subroutine of make_compound_operation. *X_PTR is the rtx at the current
7965 level of the expression and MODE is its mode. IN_CODE is as for
7966 make_compound_operation. *NEXT_CODE_PTR is the value of IN_CODE
7967 that should be used when recursing on operands of *X_PTR.
7968
7969 There are two possible actions:
7970
7971 - Return null. This tells the caller to recurse on *X_PTR with IN_CODE
7972 equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
7973
7974 - Return a new rtx, which the caller returns directly. */
7975
7976 static rtx
make_compound_operation_int(scalar_int_mode mode,rtx * x_ptr,enum rtx_code in_code,enum rtx_code * next_code_ptr)7977 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
7978 enum rtx_code in_code,
7979 enum rtx_code *next_code_ptr)
7980 {
7981 rtx x = *x_ptr;
7982 enum rtx_code next_code = *next_code_ptr;
7983 enum rtx_code code = GET_CODE (x);
7984 int mode_width = GET_MODE_PRECISION (mode);
7985 rtx rhs, lhs;
7986 rtx new_rtx = 0;
7987 int i;
7988 rtx tem;
7989 scalar_int_mode inner_mode;
7990 bool equality_comparison = false;
7991
7992 if (in_code == EQ)
7993 {
7994 equality_comparison = true;
7995 in_code = COMPARE;
7996 }
7997
7998 /* Process depending on the code of this operation. If NEW is set
7999 nonzero, it will be returned. */
8000
8001 switch (code)
8002 {
8003 case ASHIFT:
8004 /* Convert shifts by constants into multiplications if inside
8005 an address. */
8006 if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8007 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8008 && INTVAL (XEXP (x, 1)) >= 0)
8009 {
8010 HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8011 HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8012
8013 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8014 if (GET_CODE (new_rtx) == NEG)
8015 {
8016 new_rtx = XEXP (new_rtx, 0);
8017 multval = -multval;
8018 }
8019 multval = trunc_int_for_mode (multval, mode);
8020 new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8021 }
8022 break;
8023
8024 case PLUS:
8025 lhs = XEXP (x, 0);
8026 rhs = XEXP (x, 1);
8027 lhs = make_compound_operation (lhs, next_code);
8028 rhs = make_compound_operation (rhs, next_code);
8029 if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8030 {
8031 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8032 XEXP (lhs, 1));
8033 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8034 }
8035 else if (GET_CODE (lhs) == MULT
8036 && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8037 {
8038 tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8039 simplify_gen_unary (NEG, mode,
8040 XEXP (lhs, 1),
8041 mode));
8042 new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8043 }
8044 else
8045 {
8046 SUBST (XEXP (x, 0), lhs);
8047 SUBST (XEXP (x, 1), rhs);
8048 }
8049 maybe_swap_commutative_operands (x);
8050 return x;
8051
8052 case MINUS:
8053 lhs = XEXP (x, 0);
8054 rhs = XEXP (x, 1);
8055 lhs = make_compound_operation (lhs, next_code);
8056 rhs = make_compound_operation (rhs, next_code);
8057 if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8058 {
8059 tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8060 XEXP (rhs, 1));
8061 return simplify_gen_binary (PLUS, mode, tem, lhs);
8062 }
8063 else if (GET_CODE (rhs) == MULT
8064 && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8065 {
8066 tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8067 simplify_gen_unary (NEG, mode,
8068 XEXP (rhs, 1),
8069 mode));
8070 return simplify_gen_binary (PLUS, mode, tem, lhs);
8071 }
8072 else
8073 {
8074 SUBST (XEXP (x, 0), lhs);
8075 SUBST (XEXP (x, 1), rhs);
8076 return x;
8077 }
8078
8079 case AND:
8080 /* If the second operand is not a constant, we can't do anything
8081 with it. */
8082 if (!CONST_INT_P (XEXP (x, 1)))
8083 break;
8084
8085 /* If the constant is a power of two minus one and the first operand
8086 is a logical right shift, make an extraction. */
8087 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8088 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8089 {
8090 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8091 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8092 i, 1, 0, in_code == COMPARE);
8093 }
8094
8095 /* Same as previous, but for (subreg (lshiftrt ...)) in first op. */
8096 else if (GET_CODE (XEXP (x, 0)) == SUBREG
8097 && subreg_lowpart_p (XEXP (x, 0))
8098 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8099 &inner_mode)
8100 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8101 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8102 {
8103 rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8104 new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8105 new_rtx = make_extraction (inner_mode, new_rtx, 0,
8106 XEXP (inner_x0, 1),
8107 i, 1, 0, in_code == COMPARE);
8108
8109 /* If we narrowed the mode when dropping the subreg, then we lose. */
8110 if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8111 new_rtx = NULL;
8112
8113 /* If that didn't give anything, see if the AND simplifies on
8114 its own. */
8115 if (!new_rtx && i >= 0)
8116 {
8117 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8118 new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8119 0, in_code == COMPARE);
8120 }
8121 }
8122 /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)). */
8123 else if ((GET_CODE (XEXP (x, 0)) == XOR
8124 || GET_CODE (XEXP (x, 0)) == IOR)
8125 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8126 && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8127 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8128 {
8129 /* Apply the distributive law, and then try to make extractions. */
8130 new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8131 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8132 XEXP (x, 1)),
8133 gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8134 XEXP (x, 1)));
8135 new_rtx = make_compound_operation (new_rtx, in_code);
8136 }
8137
8138 /* If we are have (and (rotate X C) M) and C is larger than the number
8139 of bits in M, this is an extraction. */
8140
8141 else if (GET_CODE (XEXP (x, 0)) == ROTATE
8142 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8143 && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8144 && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8145 {
8146 new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8147 new_rtx = make_extraction (mode, new_rtx,
8148 (GET_MODE_PRECISION (mode)
8149 - INTVAL (XEXP (XEXP (x, 0), 1))),
8150 NULL_RTX, i, 1, 0, in_code == COMPARE);
8151 }
8152
8153 /* On machines without logical shifts, if the operand of the AND is
8154 a logical shift and our mask turns off all the propagated sign
8155 bits, we can replace the logical shift with an arithmetic shift. */
8156 else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8157 && !have_insn_for (LSHIFTRT, mode)
8158 && have_insn_for (ASHIFTRT, mode)
8159 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8160 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8161 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8162 && mode_width <= HOST_BITS_PER_WIDE_INT)
8163 {
8164 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8165
8166 mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8167 if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8168 SUBST (XEXP (x, 0),
8169 gen_rtx_ASHIFTRT (mode,
8170 make_compound_operation (XEXP (XEXP (x,
8171 0),
8172 0),
8173 next_code),
8174 XEXP (XEXP (x, 0), 1)));
8175 }
8176
8177 /* If the constant is one less than a power of two, this might be
8178 representable by an extraction even if no shift is present.
8179 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8180 we are in a COMPARE. */
8181 else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8182 new_rtx = make_extraction (mode,
8183 make_compound_operation (XEXP (x, 0),
8184 next_code),
8185 0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8186
8187 /* If we are in a comparison and this is an AND with a power of two,
8188 convert this into the appropriate bit extract. */
8189 else if (in_code == COMPARE
8190 && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8191 && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8192 new_rtx = make_extraction (mode,
8193 make_compound_operation (XEXP (x, 0),
8194 next_code),
8195 i, NULL_RTX, 1, 1, 0, 1);
8196
8197 /* If the one operand is a paradoxical subreg of a register or memory and
8198 the constant (limited to the smaller mode) has only zero bits where
8199 the sub expression has known zero bits, this can be expressed as
8200 a zero_extend. */
8201 else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8202 {
8203 rtx sub;
8204
8205 sub = XEXP (XEXP (x, 0), 0);
8206 machine_mode sub_mode = GET_MODE (sub);
8207 int sub_width;
8208 if ((REG_P (sub) || MEM_P (sub))
8209 && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8210 && sub_width < mode_width)
8211 {
8212 unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8213 unsigned HOST_WIDE_INT mask;
8214
8215 /* original AND constant with all the known zero bits set */
8216 mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8217 if ((mask & mode_mask) == mode_mask)
8218 {
8219 new_rtx = make_compound_operation (sub, next_code);
8220 new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8221 1, 0, in_code == COMPARE);
8222 }
8223 }
8224 }
8225
8226 break;
8227
8228 case LSHIFTRT:
8229 /* If the sign bit is known to be zero, replace this with an
8230 arithmetic shift. */
8231 if (have_insn_for (ASHIFTRT, mode)
8232 && ! have_insn_for (LSHIFTRT, mode)
8233 && mode_width <= HOST_BITS_PER_WIDE_INT
8234 && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8235 {
8236 new_rtx = gen_rtx_ASHIFTRT (mode,
8237 make_compound_operation (XEXP (x, 0),
8238 next_code),
8239 XEXP (x, 1));
8240 break;
8241 }
8242
8243 /* fall through */
8244
8245 case ASHIFTRT:
8246 lhs = XEXP (x, 0);
8247 rhs = XEXP (x, 1);
8248
8249 /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8250 this is a SIGN_EXTRACT. */
8251 if (CONST_INT_P (rhs)
8252 && GET_CODE (lhs) == ASHIFT
8253 && CONST_INT_P (XEXP (lhs, 1))
8254 && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8255 && INTVAL (XEXP (lhs, 1)) >= 0
8256 && INTVAL (rhs) < mode_width)
8257 {
8258 new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8259 new_rtx = make_extraction (mode, new_rtx,
8260 INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8261 NULL_RTX, mode_width - INTVAL (rhs),
8262 code == LSHIFTRT, 0, in_code == COMPARE);
8263 break;
8264 }
8265
8266 /* See if we have operations between an ASHIFTRT and an ASHIFT.
8267 If so, try to merge the shifts into a SIGN_EXTEND. We could
8268 also do this for some cases of SIGN_EXTRACT, but it doesn't
8269 seem worth the effort; the case checked for occurs on Alpha. */
8270
8271 if (!OBJECT_P (lhs)
8272 && ! (GET_CODE (lhs) == SUBREG
8273 && (OBJECT_P (SUBREG_REG (lhs))))
8274 && CONST_INT_P (rhs)
8275 && INTVAL (rhs) >= 0
8276 && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8277 && INTVAL (rhs) < mode_width
8278 && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8279 new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8280 next_code),
8281 0, NULL_RTX, mode_width - INTVAL (rhs),
8282 code == LSHIFTRT, 0, in_code == COMPARE);
8283
8284 break;
8285
8286 case SUBREG:
8287 /* Call ourselves recursively on the inner expression. If we are
8288 narrowing the object and it has a different RTL code from
8289 what it originally did, do this SUBREG as a force_to_mode. */
8290 {
8291 rtx inner = SUBREG_REG (x), simplified;
8292 enum rtx_code subreg_code = in_code;
8293
8294 /* If the SUBREG is masking of a logical right shift,
8295 make an extraction. */
8296 if (GET_CODE (inner) == LSHIFTRT
8297 && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8298 && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8299 && CONST_INT_P (XEXP (inner, 1))
8300 && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8301 && subreg_lowpart_p (x))
8302 {
8303 new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8304 int width = GET_MODE_PRECISION (inner_mode)
8305 - INTVAL (XEXP (inner, 1));
8306 if (width > mode_width)
8307 width = mode_width;
8308 new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8309 width, 1, 0, in_code == COMPARE);
8310 break;
8311 }
8312
8313 /* If in_code is COMPARE, it isn't always safe to pass it through
8314 to the recursive make_compound_operation call. */
8315 if (subreg_code == COMPARE
8316 && (!subreg_lowpart_p (x)
8317 || GET_CODE (inner) == SUBREG
8318 /* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8319 is (const_int 0), rather than
8320 (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8321 Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8322 for non-equality comparisons against 0 is not equivalent
8323 to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0). */
8324 || (GET_CODE (inner) == AND
8325 && CONST_INT_P (XEXP (inner, 1))
8326 && partial_subreg_p (x)
8327 && exact_log2 (UINTVAL (XEXP (inner, 1)))
8328 >= GET_MODE_BITSIZE (mode) - 1)))
8329 subreg_code = SET;
8330
8331 tem = make_compound_operation (inner, subreg_code);
8332
8333 simplified
8334 = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8335 if (simplified)
8336 tem = simplified;
8337
8338 if (GET_CODE (tem) != GET_CODE (inner)
8339 && partial_subreg_p (x)
8340 && subreg_lowpart_p (x))
8341 {
8342 rtx newer
8343 = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8344
8345 /* If we have something other than a SUBREG, we might have
8346 done an expansion, so rerun ourselves. */
8347 if (GET_CODE (newer) != SUBREG)
8348 newer = make_compound_operation (newer, in_code);
8349
8350 /* force_to_mode can expand compounds. If it just re-expanded
8351 the compound, use gen_lowpart to convert to the desired
8352 mode. */
8353 if (rtx_equal_p (newer, x)
8354 /* Likewise if it re-expanded the compound only partially.
8355 This happens for SUBREG of ZERO_EXTRACT if they extract
8356 the same number of bits. */
8357 || (GET_CODE (newer) == SUBREG
8358 && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8359 || GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8360 && GET_CODE (inner) == AND
8361 && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8362 return gen_lowpart (GET_MODE (x), tem);
8363
8364 return newer;
8365 }
8366
8367 if (simplified)
8368 return tem;
8369 }
8370 break;
8371
8372 default:
8373 break;
8374 }
8375
8376 if (new_rtx)
8377 *x_ptr = gen_lowpart (mode, new_rtx);
8378 *next_code_ptr = next_code;
8379 return NULL_RTX;
8380 }
8381
8382 /* Look at the expression rooted at X. Look for expressions
8383 equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8384 Form these expressions.
8385
8386 Return the new rtx, usually just X.
8387
8388 Also, for machines like the VAX that don't have logical shift insns,
8389 try to convert logical to arithmetic shift operations in cases where
8390 they are equivalent. This undoes the canonicalizations to logical
8391 shifts done elsewhere.
8392
8393 We try, as much as possible, to re-use rtl expressions to save memory.
8394
8395 IN_CODE says what kind of expression we are processing. Normally, it is
8396 SET. In a memory address it is MEM. When processing the arguments of
8397 a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8398 precisely it is an equality comparison against zero. */
8399
8400 rtx
make_compound_operation(rtx x,enum rtx_code in_code)8401 make_compound_operation (rtx x, enum rtx_code in_code)
8402 {
8403 enum rtx_code code = GET_CODE (x);
8404 const char *fmt;
8405 int i, j;
8406 enum rtx_code next_code;
8407 rtx new_rtx, tem;
8408
8409 /* Select the code to be used in recursive calls. Once we are inside an
8410 address, we stay there. If we have a comparison, set to COMPARE,
8411 but once inside, go back to our default of SET. */
8412
8413 next_code = (code == MEM ? MEM
8414 : ((code == COMPARE || COMPARISON_P (x))
8415 && XEXP (x, 1) == const0_rtx) ? COMPARE
8416 : in_code == COMPARE || in_code == EQ ? SET : in_code);
8417
8418 scalar_int_mode mode;
8419 if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8420 {
8421 rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8422 &next_code);
8423 if (new_rtx)
8424 return new_rtx;
8425 code = GET_CODE (x);
8426 }
8427
8428 /* Now recursively process each operand of this operation. We need to
8429 handle ZERO_EXTEND specially so that we don't lose track of the
8430 inner mode. */
8431 if (code == ZERO_EXTEND)
8432 {
8433 new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8434 tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8435 new_rtx, GET_MODE (XEXP (x, 0)));
8436 if (tem)
8437 return tem;
8438 SUBST (XEXP (x, 0), new_rtx);
8439 return x;
8440 }
8441
8442 fmt = GET_RTX_FORMAT (code);
8443 for (i = 0; i < GET_RTX_LENGTH (code); i++)
8444 if (fmt[i] == 'e')
8445 {
8446 new_rtx = make_compound_operation (XEXP (x, i), next_code);
8447 SUBST (XEXP (x, i), new_rtx);
8448 }
8449 else if (fmt[i] == 'E')
8450 for (j = 0; j < XVECLEN (x, i); j++)
8451 {
8452 new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8453 SUBST (XVECEXP (x, i, j), new_rtx);
8454 }
8455
8456 maybe_swap_commutative_operands (x);
8457 return x;
8458 }
8459
8460 /* Given M see if it is a value that would select a field of bits
8461 within an item, but not the entire word. Return -1 if not.
8462 Otherwise, return the starting position of the field, where 0 is the
8463 low-order bit.
8464
8465 *PLEN is set to the length of the field. */
8466
8467 static int
get_pos_from_mask(unsigned HOST_WIDE_INT m,unsigned HOST_WIDE_INT * plen)8468 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8469 {
8470 /* Get the bit number of the first 1 bit from the right, -1 if none. */
8471 int pos = m ? ctz_hwi (m) : -1;
8472 int len = 0;
8473
8474 if (pos >= 0)
8475 /* Now shift off the low-order zero bits and see if we have a
8476 power of two minus 1. */
8477 len = exact_log2 ((m >> pos) + 1);
8478
8479 if (len <= 0)
8480 pos = -1;
8481
8482 *plen = len;
8483 return pos;
8484 }
8485
8486 /* If X refers to a register that equals REG in value, replace these
8487 references with REG. */
8488 static rtx
canon_reg_for_combine(rtx x,rtx reg)8489 canon_reg_for_combine (rtx x, rtx reg)
8490 {
8491 rtx op0, op1, op2;
8492 const char *fmt;
8493 int i;
8494 bool copied;
8495
8496 enum rtx_code code = GET_CODE (x);
8497 switch (GET_RTX_CLASS (code))
8498 {
8499 case RTX_UNARY:
8500 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8501 if (op0 != XEXP (x, 0))
8502 return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8503 GET_MODE (reg));
8504 break;
8505
8506 case RTX_BIN_ARITH:
8507 case RTX_COMM_ARITH:
8508 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8509 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8510 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8511 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8512 break;
8513
8514 case RTX_COMPARE:
8515 case RTX_COMM_COMPARE:
8516 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8517 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8518 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8519 return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8520 GET_MODE (op0), op0, op1);
8521 break;
8522
8523 case RTX_TERNARY:
8524 case RTX_BITFIELD_OPS:
8525 op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8526 op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8527 op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8528 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8529 return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8530 GET_MODE (op0), op0, op1, op2);
8531 /* FALLTHRU */
8532
8533 case RTX_OBJ:
8534 if (REG_P (x))
8535 {
8536 if (rtx_equal_p (get_last_value (reg), x)
8537 || rtx_equal_p (reg, get_last_value (x)))
8538 return reg;
8539 else
8540 break;
8541 }
8542
8543 /* fall through */
8544
8545 default:
8546 fmt = GET_RTX_FORMAT (code);
8547 copied = false;
8548 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8549 if (fmt[i] == 'e')
8550 {
8551 rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8552 if (op != XEXP (x, i))
8553 {
8554 if (!copied)
8555 {
8556 copied = true;
8557 x = copy_rtx (x);
8558 }
8559 XEXP (x, i) = op;
8560 }
8561 }
8562 else if (fmt[i] == 'E')
8563 {
8564 int j;
8565 for (j = 0; j < XVECLEN (x, i); j++)
8566 {
8567 rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8568 if (op != XVECEXP (x, i, j))
8569 {
8570 if (!copied)
8571 {
8572 copied = true;
8573 x = copy_rtx (x);
8574 }
8575 XVECEXP (x, i, j) = op;
8576 }
8577 }
8578 }
8579
8580 break;
8581 }
8582
8583 return x;
8584 }
8585
8586 /* Return X converted to MODE. If the value is already truncated to
8587 MODE we can just return a subreg even though in the general case we
8588 would need an explicit truncation. */
8589
8590 static rtx
gen_lowpart_or_truncate(machine_mode mode,rtx x)8591 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8592 {
8593 if (!CONST_INT_P (x)
8594 && partial_subreg_p (mode, GET_MODE (x))
8595 && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8596 && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8597 {
8598 /* Bit-cast X into an integer mode. */
8599 if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8600 x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8601 x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8602 x, GET_MODE (x));
8603 }
8604
8605 return gen_lowpart (mode, x);
8606 }
8607
8608 /* See if X can be simplified knowing that we will only refer to it in
8609 MODE and will only refer to those bits that are nonzero in MASK.
8610 If other bits are being computed or if masking operations are done
8611 that select a superset of the bits in MASK, they can sometimes be
8612 ignored.
8613
8614 Return a possibly simplified expression, but always convert X to
8615 MODE. If X is a CONST_INT, AND the CONST_INT with MASK.
8616
8617 If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8618 are all off in X. This is used when X will be complemented, by either
8619 NOT, NEG, or XOR. */
8620
8621 static rtx
force_to_mode(rtx x,machine_mode mode,unsigned HOST_WIDE_INT mask,int just_select)8622 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8623 int just_select)
8624 {
8625 enum rtx_code code = GET_CODE (x);
8626 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8627 machine_mode op_mode;
8628 unsigned HOST_WIDE_INT nonzero;
8629
8630 /* If this is a CALL or ASM_OPERANDS, don't do anything. Some of the
8631 code below will do the wrong thing since the mode of such an
8632 expression is VOIDmode.
8633
8634 Also do nothing if X is a CLOBBER; this can happen if X was
8635 the return value from a call to gen_lowpart. */
8636 if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8637 return x;
8638
8639 /* We want to perform the operation in its present mode unless we know
8640 that the operation is valid in MODE, in which case we do the operation
8641 in MODE. */
8642 op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8643 && have_insn_for (code, mode))
8644 ? mode : GET_MODE (x));
8645
8646 /* It is not valid to do a right-shift in a narrower mode
8647 than the one it came in with. */
8648 if ((code == LSHIFTRT || code == ASHIFTRT)
8649 && partial_subreg_p (mode, GET_MODE (x)))
8650 op_mode = GET_MODE (x);
8651
8652 /* Truncate MASK to fit OP_MODE. */
8653 if (op_mode)
8654 mask &= GET_MODE_MASK (op_mode);
8655
8656 /* Determine what bits of X are guaranteed to be (non)zero. */
8657 nonzero = nonzero_bits (x, mode);
8658
8659 /* If none of the bits in X are needed, return a zero. */
8660 if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8661 x = const0_rtx;
8662
8663 /* If X is a CONST_INT, return a new one. Do this here since the
8664 test below will fail. */
8665 if (CONST_INT_P (x))
8666 {
8667 if (SCALAR_INT_MODE_P (mode))
8668 return gen_int_mode (INTVAL (x) & mask, mode);
8669 else
8670 {
8671 x = GEN_INT (INTVAL (x) & mask);
8672 return gen_lowpart_common (mode, x);
8673 }
8674 }
8675
8676 /* If X is narrower than MODE and we want all the bits in X's mode, just
8677 get X in the proper mode. */
8678 if (paradoxical_subreg_p (mode, GET_MODE (x))
8679 && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8680 return gen_lowpart (mode, x);
8681
8682 /* We can ignore the effect of a SUBREG if it narrows the mode or
8683 if the constant masks to zero all the bits the mode doesn't have. */
8684 if (GET_CODE (x) == SUBREG
8685 && subreg_lowpart_p (x)
8686 && (partial_subreg_p (x)
8687 || (mask
8688 & GET_MODE_MASK (GET_MODE (x))
8689 & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8690 return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8691
8692 scalar_int_mode int_mode, xmode;
8693 if (is_a <scalar_int_mode> (mode, &int_mode)
8694 && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8695 /* OP_MODE is either MODE or XMODE, so it must be a scalar
8696 integer too. */
8697 return force_int_to_mode (x, int_mode, xmode,
8698 as_a <scalar_int_mode> (op_mode),
8699 mask, just_select);
8700
8701 return gen_lowpart_or_truncate (mode, x);
8702 }
8703
8704 /* Subroutine of force_to_mode that handles cases in which both X and
8705 the result are scalar integers. MODE is the mode of the result,
8706 XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8707 is preferred for simplified versions of X. The other arguments
8708 are as for force_to_mode. */
8709
8710 static rtx
force_int_to_mode(rtx x,scalar_int_mode mode,scalar_int_mode xmode,scalar_int_mode op_mode,unsigned HOST_WIDE_INT mask,int just_select)8711 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8712 scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8713 int just_select)
8714 {
8715 enum rtx_code code = GET_CODE (x);
8716 int next_select = just_select || code == XOR || code == NOT || code == NEG;
8717 unsigned HOST_WIDE_INT fuller_mask;
8718 rtx op0, op1, temp;
8719
8720 /* When we have an arithmetic operation, or a shift whose count we
8721 do not know, we need to assume that all bits up to the highest-order
8722 bit in MASK will be needed. This is how we form such a mask. */
8723 if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8724 fuller_mask = HOST_WIDE_INT_M1U;
8725 else
8726 fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8727 - 1);
8728
8729 switch (code)
8730 {
8731 case CLOBBER:
8732 /* If X is a (clobber (const_int)), return it since we know we are
8733 generating something that won't match. */
8734 return x;
8735
8736 case SIGN_EXTEND:
8737 case ZERO_EXTEND:
8738 case ZERO_EXTRACT:
8739 case SIGN_EXTRACT:
8740 x = expand_compound_operation (x);
8741 if (GET_CODE (x) != code)
8742 return force_to_mode (x, mode, mask, next_select);
8743 break;
8744
8745 case TRUNCATE:
8746 /* Similarly for a truncate. */
8747 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8748
8749 case AND:
8750 /* If this is an AND with a constant, convert it into an AND
8751 whose constant is the AND of that constant with MASK. If it
8752 remains an AND of MASK, delete it since it is redundant. */
8753
8754 if (CONST_INT_P (XEXP (x, 1)))
8755 {
8756 x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8757 mask & INTVAL (XEXP (x, 1)));
8758 xmode = op_mode;
8759
8760 /* If X is still an AND, see if it is an AND with a mask that
8761 is just some low-order bits. If so, and it is MASK, we don't
8762 need it. */
8763
8764 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8765 && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8766 x = XEXP (x, 0);
8767
8768 /* If it remains an AND, try making another AND with the bits
8769 in the mode mask that aren't in MASK turned on. If the
8770 constant in the AND is wide enough, this might make a
8771 cheaper constant. */
8772
8773 if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8774 && GET_MODE_MASK (xmode) != mask
8775 && HWI_COMPUTABLE_MODE_P (xmode))
8776 {
8777 unsigned HOST_WIDE_INT cval
8778 = UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8779 rtx y;
8780
8781 y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8782 gen_int_mode (cval, xmode));
8783 if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8784 < set_src_cost (x, xmode, optimize_this_for_speed_p))
8785 x = y;
8786 }
8787
8788 break;
8789 }
8790
8791 goto binop;
8792
8793 case PLUS:
8794 /* In (and (plus FOO C1) M), if M is a mask that just turns off
8795 low-order bits (as in an alignment operation) and FOO is already
8796 aligned to that boundary, mask C1 to that boundary as well.
8797 This may eliminate that PLUS and, later, the AND. */
8798
8799 {
8800 unsigned int width = GET_MODE_PRECISION (mode);
8801 unsigned HOST_WIDE_INT smask = mask;
8802
8803 /* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8804 number, sign extend it. */
8805
8806 if (width < HOST_BITS_PER_WIDE_INT
8807 && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8808 smask |= HOST_WIDE_INT_M1U << width;
8809
8810 if (CONST_INT_P (XEXP (x, 1))
8811 && pow2p_hwi (- smask)
8812 && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8813 && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8814 return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8815 (INTVAL (XEXP (x, 1)) & smask)),
8816 mode, smask, next_select);
8817 }
8818
8819 /* fall through */
8820
8821 case MULT:
8822 /* Substituting into the operands of a widening MULT is not likely to
8823 create RTL matching a machine insn. */
8824 if (code == MULT
8825 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8826 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8827 && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8828 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8829 && REG_P (XEXP (XEXP (x, 0), 0))
8830 && REG_P (XEXP (XEXP (x, 1), 0)))
8831 return gen_lowpart_or_truncate (mode, x);
8832
8833 /* For PLUS, MINUS and MULT, we need any bits less significant than the
8834 most significant bit in MASK since carries from those bits will
8835 affect the bits we are interested in. */
8836 mask = fuller_mask;
8837 goto binop;
8838
8839 case MINUS:
8840 /* If X is (minus C Y) where C's least set bit is larger than any bit
8841 in the mask, then we may replace with (neg Y). */
8842 if (CONST_INT_P (XEXP (x, 0))
8843 && least_bit_hwi (UINTVAL (XEXP (x, 0))) > mask)
8844 {
8845 x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8846 return force_to_mode (x, mode, mask, next_select);
8847 }
8848
8849 /* Similarly, if C contains every bit in the fuller_mask, then we may
8850 replace with (not Y). */
8851 if (CONST_INT_P (XEXP (x, 0))
8852 && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8853 {
8854 x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8855 return force_to_mode (x, mode, mask, next_select);
8856 }
8857
8858 mask = fuller_mask;
8859 goto binop;
8860
8861 case IOR:
8862 case XOR:
8863 /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8864 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8865 operation which may be a bitfield extraction. Ensure that the
8866 constant we form is not wider than the mode of X. */
8867
8868 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8869 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8870 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8871 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8872 && CONST_INT_P (XEXP (x, 1))
8873 && ((INTVAL (XEXP (XEXP (x, 0), 1))
8874 + floor_log2 (INTVAL (XEXP (x, 1))))
8875 < GET_MODE_PRECISION (xmode))
8876 && (UINTVAL (XEXP (x, 1))
8877 & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8878 {
8879 temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8880 << INTVAL (XEXP (XEXP (x, 0), 1)),
8881 xmode);
8882 temp = simplify_gen_binary (GET_CODE (x), xmode,
8883 XEXP (XEXP (x, 0), 0), temp);
8884 x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8885 XEXP (XEXP (x, 0), 1));
8886 return force_to_mode (x, mode, mask, next_select);
8887 }
8888
8889 binop:
8890 /* For most binary operations, just propagate into the operation and
8891 change the mode if we have an operation of that mode. */
8892
8893 op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
8894 op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
8895
8896 /* If we ended up truncating both operands, truncate the result of the
8897 operation instead. */
8898 if (GET_CODE (op0) == TRUNCATE
8899 && GET_CODE (op1) == TRUNCATE)
8900 {
8901 op0 = XEXP (op0, 0);
8902 op1 = XEXP (op1, 0);
8903 }
8904
8905 op0 = gen_lowpart_or_truncate (op_mode, op0);
8906 op1 = gen_lowpart_or_truncate (op_mode, op1);
8907
8908 if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8909 {
8910 x = simplify_gen_binary (code, op_mode, op0, op1);
8911 xmode = op_mode;
8912 }
8913 break;
8914
8915 case ASHIFT:
8916 /* For left shifts, do the same, but just for the first operand.
8917 However, we cannot do anything with shifts where we cannot
8918 guarantee that the counts are smaller than the size of the mode
8919 because such a count will have a different meaning in a
8920 wider mode. */
8921
8922 if (! (CONST_INT_P (XEXP (x, 1))
8923 && INTVAL (XEXP (x, 1)) >= 0
8924 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
8925 && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
8926 && (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
8927 < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
8928 break;
8929
8930 /* If the shift count is a constant and we can do arithmetic in
8931 the mode of the shift, refine which bits we need. Otherwise, use the
8932 conservative form of the mask. */
8933 if (CONST_INT_P (XEXP (x, 1))
8934 && INTVAL (XEXP (x, 1)) >= 0
8935 && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
8936 && HWI_COMPUTABLE_MODE_P (op_mode))
8937 mask >>= INTVAL (XEXP (x, 1));
8938 else
8939 mask = fuller_mask;
8940
8941 op0 = gen_lowpart_or_truncate (op_mode,
8942 force_to_mode (XEXP (x, 0), mode,
8943 mask, next_select));
8944
8945 if (op_mode != xmode || op0 != XEXP (x, 0))
8946 {
8947 x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
8948 xmode = op_mode;
8949 }
8950 break;
8951
8952 case LSHIFTRT:
8953 /* Here we can only do something if the shift count is a constant,
8954 this shift constant is valid for the host, and we can do arithmetic
8955 in OP_MODE. */
8956
8957 if (CONST_INT_P (XEXP (x, 1))
8958 && INTVAL (XEXP (x, 1)) >= 0
8959 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8960 && HWI_COMPUTABLE_MODE_P (op_mode))
8961 {
8962 rtx inner = XEXP (x, 0);
8963 unsigned HOST_WIDE_INT inner_mask;
8964
8965 /* Select the mask of the bits we need for the shift operand. */
8966 inner_mask = mask << INTVAL (XEXP (x, 1));
8967
8968 /* We can only change the mode of the shift if we can do arithmetic
8969 in the mode of the shift and INNER_MASK is no wider than the
8970 width of X's mode. */
8971 if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
8972 op_mode = xmode;
8973
8974 inner = force_to_mode (inner, op_mode, inner_mask, next_select);
8975
8976 if (xmode != op_mode || inner != XEXP (x, 0))
8977 {
8978 x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
8979 xmode = op_mode;
8980 }
8981 }
8982
8983 /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
8984 shift and AND produces only copies of the sign bit (C2 is one less
8985 than a power of two), we can do this with just a shift. */
8986
8987 if (GET_CODE (x) == LSHIFTRT
8988 && CONST_INT_P (XEXP (x, 1))
8989 /* The shift puts one of the sign bit copies in the least significant
8990 bit. */
8991 && ((INTVAL (XEXP (x, 1))
8992 + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
8993 >= GET_MODE_PRECISION (xmode))
8994 && pow2p_hwi (mask + 1)
8995 /* Number of bits left after the shift must be more than the mask
8996 needs. */
8997 && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
8998 <= GET_MODE_PRECISION (xmode))
8999 /* Must be more sign bit copies than the mask needs. */
9000 && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9001 >= exact_log2 (mask + 1)))
9002 {
9003 int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9004 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9005 gen_int_shift_amount (xmode, nbits));
9006 }
9007 goto shiftrt;
9008
9009 case ASHIFTRT:
9010 /* If we are just looking for the sign bit, we don't need this shift at
9011 all, even if it has a variable count. */
9012 if (val_signbit_p (xmode, mask))
9013 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9014
9015 /* If this is a shift by a constant, get a mask that contains those bits
9016 that are not copies of the sign bit. We then have two cases: If
9017 MASK only includes those bits, this can be a logical shift, which may
9018 allow simplifications. If MASK is a single-bit field not within
9019 those bits, we are requesting a copy of the sign bit and hence can
9020 shift the sign bit to the appropriate location. */
9021
9022 if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9023 && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9024 {
9025 unsigned HOST_WIDE_INT nonzero;
9026 int i;
9027
9028 /* If the considered data is wider than HOST_WIDE_INT, we can't
9029 represent a mask for all its bits in a single scalar.
9030 But we only care about the lower bits, so calculate these. */
9031
9032 if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9033 {
9034 nonzero = HOST_WIDE_INT_M1U;
9035
9036 /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9037 is the number of bits a full-width mask would have set.
9038 We need only shift if these are fewer than nonzero can
9039 hold. If not, we must keep all bits set in nonzero. */
9040
9041 if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9042 < HOST_BITS_PER_WIDE_INT)
9043 nonzero >>= INTVAL (XEXP (x, 1))
9044 + HOST_BITS_PER_WIDE_INT
9045 - GET_MODE_PRECISION (xmode);
9046 }
9047 else
9048 {
9049 nonzero = GET_MODE_MASK (xmode);
9050 nonzero >>= INTVAL (XEXP (x, 1));
9051 }
9052
9053 if ((mask & ~nonzero) == 0)
9054 {
9055 x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9056 XEXP (x, 0), INTVAL (XEXP (x, 1)));
9057 if (GET_CODE (x) != ASHIFTRT)
9058 return force_to_mode (x, mode, mask, next_select);
9059 }
9060
9061 else if ((i = exact_log2 (mask)) >= 0)
9062 {
9063 x = simplify_shift_const
9064 (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9065 GET_MODE_PRECISION (xmode) - 1 - i);
9066
9067 if (GET_CODE (x) != ASHIFTRT)
9068 return force_to_mode (x, mode, mask, next_select);
9069 }
9070 }
9071
9072 /* If MASK is 1, convert this to an LSHIFTRT. This can be done
9073 even if the shift count isn't a constant. */
9074 if (mask == 1)
9075 x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9076
9077 shiftrt:
9078
9079 /* If this is a zero- or sign-extension operation that just affects bits
9080 we don't care about, remove it. Be sure the call above returned
9081 something that is still a shift. */
9082
9083 if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9084 && CONST_INT_P (XEXP (x, 1))
9085 && INTVAL (XEXP (x, 1)) >= 0
9086 && (INTVAL (XEXP (x, 1))
9087 <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9088 && GET_CODE (XEXP (x, 0)) == ASHIFT
9089 && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9090 return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9091 next_select);
9092
9093 break;
9094
9095 case ROTATE:
9096 case ROTATERT:
9097 /* If the shift count is constant and we can do computations
9098 in the mode of X, compute where the bits we care about are.
9099 Otherwise, we can't do anything. Don't change the mode of
9100 the shift or propagate MODE into the shift, though. */
9101 if (CONST_INT_P (XEXP (x, 1))
9102 && INTVAL (XEXP (x, 1)) >= 0)
9103 {
9104 temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9105 xmode, gen_int_mode (mask, xmode),
9106 XEXP (x, 1));
9107 if (temp && CONST_INT_P (temp))
9108 x = simplify_gen_binary (code, xmode,
9109 force_to_mode (XEXP (x, 0), xmode,
9110 INTVAL (temp), next_select),
9111 XEXP (x, 1));
9112 }
9113 break;
9114
9115 case NEG:
9116 /* If we just want the low-order bit, the NEG isn't needed since it
9117 won't change the low-order bit. */
9118 if (mask == 1)
9119 return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9120
9121 /* We need any bits less significant than the most significant bit in
9122 MASK since carries from those bits will affect the bits we are
9123 interested in. */
9124 mask = fuller_mask;
9125 goto unop;
9126
9127 case NOT:
9128 /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9129 same as the XOR case above. Ensure that the constant we form is not
9130 wider than the mode of X. */
9131
9132 if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9133 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9134 && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9135 && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9136 < GET_MODE_PRECISION (xmode))
9137 && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9138 {
9139 temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9140 temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9141 x = simplify_gen_binary (LSHIFTRT, xmode,
9142 temp, XEXP (XEXP (x, 0), 1));
9143
9144 return force_to_mode (x, mode, mask, next_select);
9145 }
9146
9147 /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9148 use the full mask inside the NOT. */
9149 mask = fuller_mask;
9150
9151 unop:
9152 op0 = gen_lowpart_or_truncate (op_mode,
9153 force_to_mode (XEXP (x, 0), mode, mask,
9154 next_select));
9155 if (op_mode != xmode || op0 != XEXP (x, 0))
9156 {
9157 x = simplify_gen_unary (code, op_mode, op0, op_mode);
9158 xmode = op_mode;
9159 }
9160 break;
9161
9162 case NE:
9163 /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9164 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9165 which is equal to STORE_FLAG_VALUE. */
9166 if ((mask & ~STORE_FLAG_VALUE) == 0
9167 && XEXP (x, 1) == const0_rtx
9168 && GET_MODE (XEXP (x, 0)) == mode
9169 && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9170 && (nonzero_bits (XEXP (x, 0), mode)
9171 == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9172 return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9173
9174 break;
9175
9176 case IF_THEN_ELSE:
9177 /* We have no way of knowing if the IF_THEN_ELSE can itself be
9178 written in a narrower mode. We play it safe and do not do so. */
9179
9180 op0 = gen_lowpart_or_truncate (xmode,
9181 force_to_mode (XEXP (x, 1), mode,
9182 mask, next_select));
9183 op1 = gen_lowpart_or_truncate (xmode,
9184 force_to_mode (XEXP (x, 2), mode,
9185 mask, next_select));
9186 if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9187 x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9188 GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9189 op0, op1);
9190 break;
9191
9192 default:
9193 break;
9194 }
9195
9196 /* Ensure we return a value of the proper mode. */
9197 return gen_lowpart_or_truncate (mode, x);
9198 }
9199
9200 /* Return nonzero if X is an expression that has one of two values depending on
9201 whether some other value is zero or nonzero. In that case, we return the
9202 value that is being tested, *PTRUE is set to the value if the rtx being
9203 returned has a nonzero value, and *PFALSE is set to the other alternative.
9204
9205 If we return zero, we set *PTRUE and *PFALSE to X. */
9206
9207 static rtx
if_then_else_cond(rtx x,rtx * ptrue,rtx * pfalse)9208 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9209 {
9210 machine_mode mode = GET_MODE (x);
9211 enum rtx_code code = GET_CODE (x);
9212 rtx cond0, cond1, true0, true1, false0, false1;
9213 unsigned HOST_WIDE_INT nz;
9214 scalar_int_mode int_mode;
9215
9216 /* If we are comparing a value against zero, we are done. */
9217 if ((code == NE || code == EQ)
9218 && XEXP (x, 1) == const0_rtx)
9219 {
9220 *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9221 *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9222 return XEXP (x, 0);
9223 }
9224
9225 /* If this is a unary operation whose operand has one of two values, apply
9226 our opcode to compute those values. */
9227 else if (UNARY_P (x)
9228 && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9229 {
9230 *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9231 *pfalse = simplify_gen_unary (code, mode, false0,
9232 GET_MODE (XEXP (x, 0)));
9233 return cond0;
9234 }
9235
9236 /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9237 make can't possibly match and would suppress other optimizations. */
9238 else if (code == COMPARE)
9239 ;
9240
9241 /* If this is a binary operation, see if either side has only one of two
9242 values. If either one does or if both do and they are conditional on
9243 the same value, compute the new true and false values. */
9244 else if (BINARY_P (x))
9245 {
9246 rtx op0 = XEXP (x, 0);
9247 rtx op1 = XEXP (x, 1);
9248 cond0 = if_then_else_cond (op0, &true0, &false0);
9249 cond1 = if_then_else_cond (op1, &true1, &false1);
9250
9251 if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9252 && (REG_P (op0) || REG_P (op1)))
9253 {
9254 /* Try to enable a simplification by undoing work done by
9255 if_then_else_cond if it converted a REG into something more
9256 complex. */
9257 if (REG_P (op0))
9258 {
9259 cond0 = 0;
9260 true0 = false0 = op0;
9261 }
9262 else
9263 {
9264 cond1 = 0;
9265 true1 = false1 = op1;
9266 }
9267 }
9268
9269 if ((cond0 != 0 || cond1 != 0)
9270 && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9271 {
9272 /* If if_then_else_cond returned zero, then true/false are the
9273 same rtl. We must copy one of them to prevent invalid rtl
9274 sharing. */
9275 if (cond0 == 0)
9276 true0 = copy_rtx (true0);
9277 else if (cond1 == 0)
9278 true1 = copy_rtx (true1);
9279
9280 if (COMPARISON_P (x))
9281 {
9282 *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9283 true0, true1);
9284 *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9285 false0, false1);
9286 }
9287 else
9288 {
9289 *ptrue = simplify_gen_binary (code, mode, true0, true1);
9290 *pfalse = simplify_gen_binary (code, mode, false0, false1);
9291 }
9292
9293 return cond0 ? cond0 : cond1;
9294 }
9295
9296 /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9297 operands is zero when the other is nonzero, and vice-versa,
9298 and STORE_FLAG_VALUE is 1 or -1. */
9299
9300 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9301 && (code == PLUS || code == IOR || code == XOR || code == MINUS
9302 || code == UMAX)
9303 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9304 {
9305 rtx op0 = XEXP (XEXP (x, 0), 1);
9306 rtx op1 = XEXP (XEXP (x, 1), 1);
9307
9308 cond0 = XEXP (XEXP (x, 0), 0);
9309 cond1 = XEXP (XEXP (x, 1), 0);
9310
9311 if (COMPARISON_P (cond0)
9312 && COMPARISON_P (cond1)
9313 && SCALAR_INT_MODE_P (mode)
9314 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9315 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9316 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9317 || ((swap_condition (GET_CODE (cond0))
9318 == reversed_comparison_code (cond1, NULL))
9319 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9320 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9321 && ! side_effects_p (x))
9322 {
9323 *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9324 *pfalse = simplify_gen_binary (MULT, mode,
9325 (code == MINUS
9326 ? simplify_gen_unary (NEG, mode,
9327 op1, mode)
9328 : op1),
9329 const_true_rtx);
9330 return cond0;
9331 }
9332 }
9333
9334 /* Similarly for MULT, AND and UMIN, except that for these the result
9335 is always zero. */
9336 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9337 && (code == MULT || code == AND || code == UMIN)
9338 && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9339 {
9340 cond0 = XEXP (XEXP (x, 0), 0);
9341 cond1 = XEXP (XEXP (x, 1), 0);
9342
9343 if (COMPARISON_P (cond0)
9344 && COMPARISON_P (cond1)
9345 && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9346 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9347 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9348 || ((swap_condition (GET_CODE (cond0))
9349 == reversed_comparison_code (cond1, NULL))
9350 && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9351 && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9352 && ! side_effects_p (x))
9353 {
9354 *ptrue = *pfalse = const0_rtx;
9355 return cond0;
9356 }
9357 }
9358 }
9359
9360 else if (code == IF_THEN_ELSE)
9361 {
9362 /* If we have IF_THEN_ELSE already, extract the condition and
9363 canonicalize it if it is NE or EQ. */
9364 cond0 = XEXP (x, 0);
9365 *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9366 if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9367 return XEXP (cond0, 0);
9368 else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9369 {
9370 *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9371 return XEXP (cond0, 0);
9372 }
9373 else
9374 return cond0;
9375 }
9376
9377 /* If X is a SUBREG, we can narrow both the true and false values
9378 if the inner expression, if there is a condition. */
9379 else if (code == SUBREG
9380 && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9381 &false0)) != 0)
9382 {
9383 true0 = simplify_gen_subreg (mode, true0,
9384 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9385 false0 = simplify_gen_subreg (mode, false0,
9386 GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9387 if (true0 && false0)
9388 {
9389 *ptrue = true0;
9390 *pfalse = false0;
9391 return cond0;
9392 }
9393 }
9394
9395 /* If X is a constant, this isn't special and will cause confusions
9396 if we treat it as such. Likewise if it is equivalent to a constant. */
9397 else if (CONSTANT_P (x)
9398 || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9399 ;
9400
9401 /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9402 will be least confusing to the rest of the compiler. */
9403 else if (mode == BImode)
9404 {
9405 *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9406 return x;
9407 }
9408
9409 /* If X is known to be either 0 or -1, those are the true and
9410 false values when testing X. */
9411 else if (x == constm1_rtx || x == const0_rtx
9412 || (is_a <scalar_int_mode> (mode, &int_mode)
9413 && (num_sign_bit_copies (x, int_mode)
9414 == GET_MODE_PRECISION (int_mode))))
9415 {
9416 *ptrue = constm1_rtx, *pfalse = const0_rtx;
9417 return x;
9418 }
9419
9420 /* Likewise for 0 or a single bit. */
9421 else if (HWI_COMPUTABLE_MODE_P (mode)
9422 && pow2p_hwi (nz = nonzero_bits (x, mode)))
9423 {
9424 *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9425 return x;
9426 }
9427
9428 /* Otherwise fail; show no condition with true and false values the same. */
9429 *ptrue = *pfalse = x;
9430 return 0;
9431 }
9432
9433 /* Return the value of expression X given the fact that condition COND
9434 is known to be true when applied to REG as its first operand and VAL
9435 as its second. X is known to not be shared and so can be modified in
9436 place.
9437
9438 We only handle the simplest cases, and specifically those cases that
9439 arise with IF_THEN_ELSE expressions. */
9440
9441 static rtx
known_cond(rtx x,enum rtx_code cond,rtx reg,rtx val)9442 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9443 {
9444 enum rtx_code code = GET_CODE (x);
9445 const char *fmt;
9446 int i, j;
9447
9448 if (side_effects_p (x))
9449 return x;
9450
9451 /* If either operand of the condition is a floating point value,
9452 then we have to avoid collapsing an EQ comparison. */
9453 if (cond == EQ
9454 && rtx_equal_p (x, reg)
9455 && ! FLOAT_MODE_P (GET_MODE (x))
9456 && ! FLOAT_MODE_P (GET_MODE (val)))
9457 return val;
9458
9459 if (cond == UNEQ && rtx_equal_p (x, reg))
9460 return val;
9461
9462 /* If X is (abs REG) and we know something about REG's relationship
9463 with zero, we may be able to simplify this. */
9464
9465 if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9466 switch (cond)
9467 {
9468 case GE: case GT: case EQ:
9469 return XEXP (x, 0);
9470 case LT: case LE:
9471 return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9472 XEXP (x, 0),
9473 GET_MODE (XEXP (x, 0)));
9474 default:
9475 break;
9476 }
9477
9478 /* The only other cases we handle are MIN, MAX, and comparisons if the
9479 operands are the same as REG and VAL. */
9480
9481 else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9482 {
9483 if (rtx_equal_p (XEXP (x, 0), val))
9484 {
9485 std::swap (val, reg);
9486 cond = swap_condition (cond);
9487 }
9488
9489 if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9490 {
9491 if (COMPARISON_P (x))
9492 {
9493 if (comparison_dominates_p (cond, code))
9494 return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9495
9496 code = reversed_comparison_code (x, NULL);
9497 if (code != UNKNOWN
9498 && comparison_dominates_p (cond, code))
9499 return CONST0_RTX (GET_MODE (x));
9500 else
9501 return x;
9502 }
9503 else if (code == SMAX || code == SMIN
9504 || code == UMIN || code == UMAX)
9505 {
9506 int unsignedp = (code == UMIN || code == UMAX);
9507
9508 /* Do not reverse the condition when it is NE or EQ.
9509 This is because we cannot conclude anything about
9510 the value of 'SMAX (x, y)' when x is not equal to y,
9511 but we can when x equals y. */
9512 if ((code == SMAX || code == UMAX)
9513 && ! (cond == EQ || cond == NE))
9514 cond = reverse_condition (cond);
9515
9516 switch (cond)
9517 {
9518 case GE: case GT:
9519 return unsignedp ? x : XEXP (x, 1);
9520 case LE: case LT:
9521 return unsignedp ? x : XEXP (x, 0);
9522 case GEU: case GTU:
9523 return unsignedp ? XEXP (x, 1) : x;
9524 case LEU: case LTU:
9525 return unsignedp ? XEXP (x, 0) : x;
9526 default:
9527 break;
9528 }
9529 }
9530 }
9531 }
9532 else if (code == SUBREG)
9533 {
9534 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9535 rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9536
9537 if (SUBREG_REG (x) != r)
9538 {
9539 /* We must simplify subreg here, before we lose track of the
9540 original inner_mode. */
9541 new_rtx = simplify_subreg (GET_MODE (x), r,
9542 inner_mode, SUBREG_BYTE (x));
9543 if (new_rtx)
9544 return new_rtx;
9545 else
9546 SUBST (SUBREG_REG (x), r);
9547 }
9548
9549 return x;
9550 }
9551 /* We don't have to handle SIGN_EXTEND here, because even in the
9552 case of replacing something with a modeless CONST_INT, a
9553 CONST_INT is already (supposed to be) a valid sign extension for
9554 its narrower mode, which implies it's already properly
9555 sign-extended for the wider mode. Now, for ZERO_EXTEND, the
9556 story is different. */
9557 else if (code == ZERO_EXTEND)
9558 {
9559 machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9560 rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9561
9562 if (XEXP (x, 0) != r)
9563 {
9564 /* We must simplify the zero_extend here, before we lose
9565 track of the original inner_mode. */
9566 new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9567 r, inner_mode);
9568 if (new_rtx)
9569 return new_rtx;
9570 else
9571 SUBST (XEXP (x, 0), r);
9572 }
9573
9574 return x;
9575 }
9576
9577 fmt = GET_RTX_FORMAT (code);
9578 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9579 {
9580 if (fmt[i] == 'e')
9581 SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9582 else if (fmt[i] == 'E')
9583 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9584 SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9585 cond, reg, val));
9586 }
9587
9588 return x;
9589 }
9590
9591 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9592 assignment as a field assignment. */
9593
9594 static int
rtx_equal_for_field_assignment_p(rtx x,rtx y,bool widen_x)9595 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9596 {
9597 if (widen_x && GET_MODE (x) != GET_MODE (y))
9598 {
9599 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9600 return 0;
9601 if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9602 return 0;
9603 x = adjust_address_nv (x, GET_MODE (y),
9604 byte_lowpart_offset (GET_MODE (y),
9605 GET_MODE (x)));
9606 }
9607
9608 if (x == y || rtx_equal_p (x, y))
9609 return 1;
9610
9611 if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9612 return 0;
9613
9614 /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9615 Note that all SUBREGs of MEM are paradoxical; otherwise they
9616 would have been rewritten. */
9617 if (MEM_P (x) && GET_CODE (y) == SUBREG
9618 && MEM_P (SUBREG_REG (y))
9619 && rtx_equal_p (SUBREG_REG (y),
9620 gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9621 return 1;
9622
9623 if (MEM_P (y) && GET_CODE (x) == SUBREG
9624 && MEM_P (SUBREG_REG (x))
9625 && rtx_equal_p (SUBREG_REG (x),
9626 gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9627 return 1;
9628
9629 /* We used to see if get_last_value of X and Y were the same but that's
9630 not correct. In one direction, we'll cause the assignment to have
9631 the wrong destination and in the case, we'll import a register into this
9632 insn that might have already have been dead. So fail if none of the
9633 above cases are true. */
9634 return 0;
9635 }
9636
9637 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9638 Return that assignment if so.
9639
9640 We only handle the most common cases. */
9641
9642 static rtx
make_field_assignment(rtx x)9643 make_field_assignment (rtx x)
9644 {
9645 rtx dest = SET_DEST (x);
9646 rtx src = SET_SRC (x);
9647 rtx assign;
9648 rtx rhs, lhs;
9649 HOST_WIDE_INT c1;
9650 HOST_WIDE_INT pos;
9651 unsigned HOST_WIDE_INT len;
9652 rtx other;
9653
9654 /* All the rules in this function are specific to scalar integers. */
9655 scalar_int_mode mode;
9656 if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9657 return x;
9658
9659 /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9660 a clear of a one-bit field. We will have changed it to
9661 (and (rotate (const_int -2) POS) DEST), so check for that. Also check
9662 for a SUBREG. */
9663
9664 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9665 && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9666 && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9667 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9668 {
9669 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9670 1, 1, 1, 0);
9671 if (assign != 0)
9672 return gen_rtx_SET (assign, const0_rtx);
9673 return x;
9674 }
9675
9676 if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9677 && subreg_lowpart_p (XEXP (src, 0))
9678 && partial_subreg_p (XEXP (src, 0))
9679 && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9680 && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9681 && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9682 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9683 {
9684 assign = make_extraction (VOIDmode, dest, 0,
9685 XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9686 1, 1, 1, 0);
9687 if (assign != 0)
9688 return gen_rtx_SET (assign, const0_rtx);
9689 return x;
9690 }
9691
9692 /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9693 one-bit field. */
9694 if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9695 && XEXP (XEXP (src, 0), 0) == const1_rtx
9696 && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9697 {
9698 assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9699 1, 1, 1, 0);
9700 if (assign != 0)
9701 return gen_rtx_SET (assign, const1_rtx);
9702 return x;
9703 }
9704
9705 /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9706 SRC is an AND with all bits of that field set, then we can discard
9707 the AND. */
9708 if (GET_CODE (dest) == ZERO_EXTRACT
9709 && CONST_INT_P (XEXP (dest, 1))
9710 && GET_CODE (src) == AND
9711 && CONST_INT_P (XEXP (src, 1)))
9712 {
9713 HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9714 unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9715 unsigned HOST_WIDE_INT ze_mask;
9716
9717 if (width >= HOST_BITS_PER_WIDE_INT)
9718 ze_mask = -1;
9719 else
9720 ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9721
9722 /* Complete overlap. We can remove the source AND. */
9723 if ((and_mask & ze_mask) == ze_mask)
9724 return gen_rtx_SET (dest, XEXP (src, 0));
9725
9726 /* Partial overlap. We can reduce the source AND. */
9727 if ((and_mask & ze_mask) != and_mask)
9728 {
9729 src = gen_rtx_AND (mode, XEXP (src, 0),
9730 gen_int_mode (and_mask & ze_mask, mode));
9731 return gen_rtx_SET (dest, src);
9732 }
9733 }
9734
9735 /* The other case we handle is assignments into a constant-position
9736 field. They look like (ior/xor (and DEST C1) OTHER). If C1 represents
9737 a mask that has all one bits except for a group of zero bits and
9738 OTHER is known to have zeros where C1 has ones, this is such an
9739 assignment. Compute the position and length from C1. Shift OTHER
9740 to the appropriate position, force it to the required mode, and
9741 make the extraction. Check for the AND in both operands. */
9742
9743 /* One or more SUBREGs might obscure the constant-position field
9744 assignment. The first one we are likely to encounter is an outer
9745 narrowing SUBREG, which we can just strip for the purposes of
9746 identifying the constant-field assignment. */
9747 scalar_int_mode src_mode = mode;
9748 if (GET_CODE (src) == SUBREG
9749 && subreg_lowpart_p (src)
9750 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9751 src = SUBREG_REG (src);
9752
9753 if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9754 return x;
9755
9756 rhs = expand_compound_operation (XEXP (src, 0));
9757 lhs = expand_compound_operation (XEXP (src, 1));
9758
9759 if (GET_CODE (rhs) == AND
9760 && CONST_INT_P (XEXP (rhs, 1))
9761 && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9762 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9763 /* The second SUBREG that might get in the way is a paradoxical
9764 SUBREG around the first operand of the AND. We want to
9765 pretend the operand is as wide as the destination here. We
9766 do this by adjusting the MEM to wider mode for the sole
9767 purpose of the call to rtx_equal_for_field_assignment_p. Also
9768 note this trick only works for MEMs. */
9769 else if (GET_CODE (rhs) == AND
9770 && paradoxical_subreg_p (XEXP (rhs, 0))
9771 && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9772 && CONST_INT_P (XEXP (rhs, 1))
9773 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9774 dest, true))
9775 c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9776 else if (GET_CODE (lhs) == AND
9777 && CONST_INT_P (XEXP (lhs, 1))
9778 && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9779 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9780 /* The second SUBREG that might get in the way is a paradoxical
9781 SUBREG around the first operand of the AND. We want to
9782 pretend the operand is as wide as the destination here. We
9783 do this by adjusting the MEM to wider mode for the sole
9784 purpose of the call to rtx_equal_for_field_assignment_p. Also
9785 note this trick only works for MEMs. */
9786 else if (GET_CODE (lhs) == AND
9787 && paradoxical_subreg_p (XEXP (lhs, 0))
9788 && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9789 && CONST_INT_P (XEXP (lhs, 1))
9790 && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9791 dest, true))
9792 c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9793 else
9794 return x;
9795
9796 pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9797 if (pos < 0
9798 || pos + len > GET_MODE_PRECISION (mode)
9799 || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9800 || (c1 & nonzero_bits (other, mode)) != 0)
9801 return x;
9802
9803 assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9804 if (assign == 0)
9805 return x;
9806
9807 /* The mode to use for the source is the mode of the assignment, or of
9808 what is inside a possible STRICT_LOW_PART. */
9809 machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9810 ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9811
9812 /* Shift OTHER right POS places and make it the source, restricting it
9813 to the proper length and mode. */
9814
9815 src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9816 src_mode, other, pos),
9817 dest);
9818 src = force_to_mode (src, new_mode,
9819 len >= HOST_BITS_PER_WIDE_INT
9820 ? HOST_WIDE_INT_M1U
9821 : (HOST_WIDE_INT_1U << len) - 1,
9822 0);
9823
9824 /* If SRC is masked by an AND that does not make a difference in
9825 the value being stored, strip it. */
9826 if (GET_CODE (assign) == ZERO_EXTRACT
9827 && CONST_INT_P (XEXP (assign, 1))
9828 && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9829 && GET_CODE (src) == AND
9830 && CONST_INT_P (XEXP (src, 1))
9831 && UINTVAL (XEXP (src, 1))
9832 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9833 src = XEXP (src, 0);
9834
9835 return gen_rtx_SET (assign, src);
9836 }
9837
9838 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9839 if so. */
9840
9841 static rtx
apply_distributive_law(rtx x)9842 apply_distributive_law (rtx x)
9843 {
9844 enum rtx_code code = GET_CODE (x);
9845 enum rtx_code inner_code;
9846 rtx lhs, rhs, other;
9847 rtx tem;
9848
9849 /* Distributivity is not true for floating point as it can change the
9850 value. So we don't do it unless -funsafe-math-optimizations. */
9851 if (FLOAT_MODE_P (GET_MODE (x))
9852 && ! flag_unsafe_math_optimizations)
9853 return x;
9854
9855 /* The outer operation can only be one of the following: */
9856 if (code != IOR && code != AND && code != XOR
9857 && code != PLUS && code != MINUS)
9858 return x;
9859
9860 lhs = XEXP (x, 0);
9861 rhs = XEXP (x, 1);
9862
9863 /* If either operand is a primitive we can't do anything, so get out
9864 fast. */
9865 if (OBJECT_P (lhs) || OBJECT_P (rhs))
9866 return x;
9867
9868 lhs = expand_compound_operation (lhs);
9869 rhs = expand_compound_operation (rhs);
9870 inner_code = GET_CODE (lhs);
9871 if (inner_code != GET_CODE (rhs))
9872 return x;
9873
9874 /* See if the inner and outer operations distribute. */
9875 switch (inner_code)
9876 {
9877 case LSHIFTRT:
9878 case ASHIFTRT:
9879 case AND:
9880 case IOR:
9881 /* These all distribute except over PLUS. */
9882 if (code == PLUS || code == MINUS)
9883 return x;
9884 break;
9885
9886 case MULT:
9887 if (code != PLUS && code != MINUS)
9888 return x;
9889 break;
9890
9891 case ASHIFT:
9892 /* This is also a multiply, so it distributes over everything. */
9893 break;
9894
9895 /* This used to handle SUBREG, but this turned out to be counter-
9896 productive, since (subreg (op ...)) usually is not handled by
9897 insn patterns, and this "optimization" therefore transformed
9898 recognizable patterns into unrecognizable ones. Therefore the
9899 SUBREG case was removed from here.
9900
9901 It is possible that distributing SUBREG over arithmetic operations
9902 leads to an intermediate result than can then be optimized further,
9903 e.g. by moving the outer SUBREG to the other side of a SET as done
9904 in simplify_set. This seems to have been the original intent of
9905 handling SUBREGs here.
9906
9907 However, with current GCC this does not appear to actually happen,
9908 at least on major platforms. If some case is found where removing
9909 the SUBREG case here prevents follow-on optimizations, distributing
9910 SUBREGs ought to be re-added at that place, e.g. in simplify_set. */
9911
9912 default:
9913 return x;
9914 }
9915
9916 /* Set LHS and RHS to the inner operands (A and B in the example
9917 above) and set OTHER to the common operand (C in the example).
9918 There is only one way to do this unless the inner operation is
9919 commutative. */
9920 if (COMMUTATIVE_ARITH_P (lhs)
9921 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
9922 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
9923 else if (COMMUTATIVE_ARITH_P (lhs)
9924 && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
9925 other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
9926 else if (COMMUTATIVE_ARITH_P (lhs)
9927 && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
9928 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
9929 else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
9930 other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
9931 else
9932 return x;
9933
9934 /* Form the new inner operation, seeing if it simplifies first. */
9935 tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
9936
9937 /* There is one exception to the general way of distributing:
9938 (a | c) ^ (b | c) -> (a ^ b) & ~c */
9939 if (code == XOR && inner_code == IOR)
9940 {
9941 inner_code = AND;
9942 other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
9943 }
9944
9945 /* We may be able to continuing distributing the result, so call
9946 ourselves recursively on the inner operation before forming the
9947 outer operation, which we return. */
9948 return simplify_gen_binary (inner_code, GET_MODE (x),
9949 apply_distributive_law (tem), other);
9950 }
9951
9952 /* See if X is of the form (* (+ A B) C), and if so convert to
9953 (+ (* A C) (* B C)) and try to simplify.
9954
9955 Most of the time, this results in no change. However, if some of
9956 the operands are the same or inverses of each other, simplifications
9957 will result.
9958
9959 For example, (and (ior A B) (not B)) can occur as the result of
9960 expanding a bit field assignment. When we apply the distributive
9961 law to this, we get (ior (and (A (not B))) (and (B (not B)))),
9962 which then simplifies to (and (A (not B))).
9963
9964 Note that no checks happen on the validity of applying the inverse
9965 distributive law. This is pointless since we can do it in the
9966 few places where this routine is called.
9967
9968 N is the index of the term that is decomposed (the arithmetic operation,
9969 i.e. (+ A B) in the first example above). !N is the index of the term that
9970 is distributed, i.e. of C in the first example above. */
9971 static rtx
distribute_and_simplify_rtx(rtx x,int n)9972 distribute_and_simplify_rtx (rtx x, int n)
9973 {
9974 machine_mode mode;
9975 enum rtx_code outer_code, inner_code;
9976 rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
9977
9978 /* Distributivity is not true for floating point as it can change the
9979 value. So we don't do it unless -funsafe-math-optimizations. */
9980 if (FLOAT_MODE_P (GET_MODE (x))
9981 && ! flag_unsafe_math_optimizations)
9982 return NULL_RTX;
9983
9984 decomposed = XEXP (x, n);
9985 if (!ARITHMETIC_P (decomposed))
9986 return NULL_RTX;
9987
9988 mode = GET_MODE (x);
9989 outer_code = GET_CODE (x);
9990 distributed = XEXP (x, !n);
9991
9992 inner_code = GET_CODE (decomposed);
9993 inner_op0 = XEXP (decomposed, 0);
9994 inner_op1 = XEXP (decomposed, 1);
9995
9996 /* Special case (and (xor B C) (not A)), which is equivalent to
9997 (xor (ior A B) (ior A C)) */
9998 if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
9999 {
10000 distributed = XEXP (distributed, 0);
10001 outer_code = IOR;
10002 }
10003
10004 if (n == 0)
10005 {
10006 /* Distribute the second term. */
10007 new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10008 new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10009 }
10010 else
10011 {
10012 /* Distribute the first term. */
10013 new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10014 new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10015 }
10016
10017 tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10018 new_op0, new_op1));
10019 if (GET_CODE (tmp) != outer_code
10020 && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10021 < set_src_cost (x, mode, optimize_this_for_speed_p)))
10022 return tmp;
10023
10024 return NULL_RTX;
10025 }
10026
10027 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10028 in MODE. Return an equivalent form, if different from (and VAROP
10029 (const_int CONSTOP)). Otherwise, return NULL_RTX. */
10030
10031 static rtx
simplify_and_const_int_1(scalar_int_mode mode,rtx varop,unsigned HOST_WIDE_INT constop)10032 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10033 unsigned HOST_WIDE_INT constop)
10034 {
10035 unsigned HOST_WIDE_INT nonzero;
10036 unsigned HOST_WIDE_INT orig_constop;
10037 rtx orig_varop;
10038 int i;
10039
10040 orig_varop = varop;
10041 orig_constop = constop;
10042 if (GET_CODE (varop) == CLOBBER)
10043 return NULL_RTX;
10044
10045 /* Simplify VAROP knowing that we will be only looking at some of the
10046 bits in it.
10047
10048 Note by passing in CONSTOP, we guarantee that the bits not set in
10049 CONSTOP are not significant and will never be examined. We must
10050 ensure that is the case by explicitly masking out those bits
10051 before returning. */
10052 varop = force_to_mode (varop, mode, constop, 0);
10053
10054 /* If VAROP is a CLOBBER, we will fail so return it. */
10055 if (GET_CODE (varop) == CLOBBER)
10056 return varop;
10057
10058 /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10059 to VAROP and return the new constant. */
10060 if (CONST_INT_P (varop))
10061 return gen_int_mode (INTVAL (varop) & constop, mode);
10062
10063 /* See what bits may be nonzero in VAROP. Unlike the general case of
10064 a call to nonzero_bits, here we don't care about bits outside
10065 MODE. */
10066
10067 nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10068
10069 /* Turn off all bits in the constant that are known to already be zero.
10070 Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10071 which is tested below. */
10072
10073 constop &= nonzero;
10074
10075 /* If we don't have any bits left, return zero. */
10076 if (constop == 0)
10077 return const0_rtx;
10078
10079 /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10080 a power of two, we can replace this with an ASHIFT. */
10081 if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10082 && (i = exact_log2 (constop)) >= 0)
10083 return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10084
10085 /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10086 or XOR, then try to apply the distributive law. This may eliminate
10087 operations if either branch can be simplified because of the AND.
10088 It may also make some cases more complex, but those cases probably
10089 won't match a pattern either with or without this. */
10090
10091 if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10092 {
10093 scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10094 return
10095 gen_lowpart
10096 (mode,
10097 apply_distributive_law
10098 (simplify_gen_binary (GET_CODE (varop), varop_mode,
10099 simplify_and_const_int (NULL_RTX, varop_mode,
10100 XEXP (varop, 0),
10101 constop),
10102 simplify_and_const_int (NULL_RTX, varop_mode,
10103 XEXP (varop, 1),
10104 constop))));
10105 }
10106
10107 /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10108 the AND and see if one of the operands simplifies to zero. If so, we
10109 may eliminate it. */
10110
10111 if (GET_CODE (varop) == PLUS
10112 && pow2p_hwi (constop + 1))
10113 {
10114 rtx o0, o1;
10115
10116 o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10117 o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10118 if (o0 == const0_rtx)
10119 return o1;
10120 if (o1 == const0_rtx)
10121 return o0;
10122 }
10123
10124 /* Make a SUBREG if necessary. If we can't make it, fail. */
10125 varop = gen_lowpart (mode, varop);
10126 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10127 return NULL_RTX;
10128
10129 /* If we are only masking insignificant bits, return VAROP. */
10130 if (constop == nonzero)
10131 return varop;
10132
10133 if (varop == orig_varop && constop == orig_constop)
10134 return NULL_RTX;
10135
10136 /* Otherwise, return an AND. */
10137 return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10138 }
10139
10140
10141 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10142 in MODE.
10143
10144 Return an equivalent form, if different from X. Otherwise, return X. If
10145 X is zero, we are to always construct the equivalent form. */
10146
10147 static rtx
simplify_and_const_int(rtx x,scalar_int_mode mode,rtx varop,unsigned HOST_WIDE_INT constop)10148 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10149 unsigned HOST_WIDE_INT constop)
10150 {
10151 rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10152 if (tem)
10153 return tem;
10154
10155 if (!x)
10156 x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10157 gen_int_mode (constop, mode));
10158 if (GET_MODE (x) != mode)
10159 x = gen_lowpart (mode, x);
10160 return x;
10161 }
10162
10163 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10164 We don't care about bits outside of those defined in MODE.
10165
10166 For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10167 a shift, AND, or zero_extract, we can do better. */
10168
10169 static rtx
reg_nonzero_bits_for_combine(const_rtx x,scalar_int_mode xmode,scalar_int_mode mode,unsigned HOST_WIDE_INT * nonzero)10170 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10171 scalar_int_mode mode,
10172 unsigned HOST_WIDE_INT *nonzero)
10173 {
10174 rtx tem;
10175 reg_stat_type *rsp;
10176
10177 /* If X is a register whose nonzero bits value is current, use it.
10178 Otherwise, if X is a register whose value we can find, use that
10179 value. Otherwise, use the previously-computed global nonzero bits
10180 for this register. */
10181
10182 rsp = ®_stat[REGNO (x)];
10183 if (rsp->last_set_value != 0
10184 && (rsp->last_set_mode == mode
10185 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10186 && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10187 && GET_MODE_CLASS (mode) == MODE_INT))
10188 && ((rsp->last_set_label >= label_tick_ebb_start
10189 && rsp->last_set_label < label_tick)
10190 || (rsp->last_set_label == label_tick
10191 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10192 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10193 && REGNO (x) < reg_n_sets_max
10194 && REG_N_SETS (REGNO (x)) == 1
10195 && !REGNO_REG_SET_P
10196 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10197 REGNO (x)))))
10198 {
10199 /* Note that, even if the precision of last_set_mode is lower than that
10200 of mode, record_value_for_reg invoked nonzero_bits on the register
10201 with nonzero_bits_mode (because last_set_mode is necessarily integral
10202 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10203 are all valid, hence in mode too since nonzero_bits_mode is defined
10204 to the largest HWI_COMPUTABLE_MODE_P mode. */
10205 *nonzero &= rsp->last_set_nonzero_bits;
10206 return NULL;
10207 }
10208
10209 tem = get_last_value (x);
10210 if (tem)
10211 {
10212 if (SHORT_IMMEDIATES_SIGN_EXTEND)
10213 tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10214
10215 return tem;
10216 }
10217
10218 if (nonzero_sign_valid && rsp->nonzero_bits)
10219 {
10220 unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10221
10222 if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10223 /* We don't know anything about the upper bits. */
10224 mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10225
10226 *nonzero &= mask;
10227 }
10228
10229 return NULL;
10230 }
10231
10232 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10233 end of X that are known to be equal to the sign bit. X will be used
10234 in mode MODE; the returned value will always be between 1 and the
10235 number of bits in MODE. */
10236
10237 static rtx
reg_num_sign_bit_copies_for_combine(const_rtx x,scalar_int_mode xmode,scalar_int_mode mode,unsigned int * result)10238 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10239 scalar_int_mode mode,
10240 unsigned int *result)
10241 {
10242 rtx tem;
10243 reg_stat_type *rsp;
10244
10245 rsp = ®_stat[REGNO (x)];
10246 if (rsp->last_set_value != 0
10247 && rsp->last_set_mode == mode
10248 && ((rsp->last_set_label >= label_tick_ebb_start
10249 && rsp->last_set_label < label_tick)
10250 || (rsp->last_set_label == label_tick
10251 && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10252 || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10253 && REGNO (x) < reg_n_sets_max
10254 && REG_N_SETS (REGNO (x)) == 1
10255 && !REGNO_REG_SET_P
10256 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10257 REGNO (x)))))
10258 {
10259 *result = rsp->last_set_sign_bit_copies;
10260 return NULL;
10261 }
10262
10263 tem = get_last_value (x);
10264 if (tem != 0)
10265 return tem;
10266
10267 if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10268 && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10269 *result = rsp->sign_bit_copies;
10270
10271 return NULL;
10272 }
10273
10274 /* Return the number of "extended" bits there are in X, when interpreted
10275 as a quantity in MODE whose signedness is indicated by UNSIGNEDP. For
10276 unsigned quantities, this is the number of high-order zero bits.
10277 For signed quantities, this is the number of copies of the sign bit
10278 minus 1. In both case, this function returns the number of "spare"
10279 bits. For example, if two quantities for which this function returns
10280 at least 1 are added, the addition is known not to overflow.
10281
10282 This function will always return 0 unless called during combine, which
10283 implies that it must be called from a define_split. */
10284
10285 unsigned int
extended_count(const_rtx x,machine_mode mode,int unsignedp)10286 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10287 {
10288 if (nonzero_sign_valid == 0)
10289 return 0;
10290
10291 scalar_int_mode int_mode;
10292 return (unsignedp
10293 ? (is_a <scalar_int_mode> (mode, &int_mode)
10294 && HWI_COMPUTABLE_MODE_P (int_mode)
10295 ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10296 - floor_log2 (nonzero_bits (x, int_mode)))
10297 : 0)
10298 : num_sign_bit_copies (x, mode) - 1);
10299 }
10300
10301 /* This function is called from `simplify_shift_const' to merge two
10302 outer operations. Specifically, we have already found that we need
10303 to perform operation *POP0 with constant *PCONST0 at the outermost
10304 position. We would now like to also perform OP1 with constant CONST1
10305 (with *POP0 being done last).
10306
10307 Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10308 the resulting operation. *PCOMP_P is set to 1 if we would need to
10309 complement the innermost operand, otherwise it is unchanged.
10310
10311 MODE is the mode in which the operation will be done. No bits outside
10312 the width of this mode matter. It is assumed that the width of this mode
10313 is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10314
10315 If *POP0 or OP1 are UNKNOWN, it means no operation is required. Only NEG, PLUS,
10316 IOR, XOR, and AND are supported. We may set *POP0 to SET if the proper
10317 result is simply *PCONST0.
10318
10319 If the resulting operation cannot be expressed as one operation, we
10320 return 0 and do not change *POP0, *PCONST0, and *PCOMP_P. */
10321
10322 static int
merge_outer_ops(enum rtx_code * pop0,HOST_WIDE_INT * pconst0,enum rtx_code op1,HOST_WIDE_INT const1,machine_mode mode,int * pcomp_p)10323 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10324 {
10325 enum rtx_code op0 = *pop0;
10326 HOST_WIDE_INT const0 = *pconst0;
10327
10328 const0 &= GET_MODE_MASK (mode);
10329 const1 &= GET_MODE_MASK (mode);
10330
10331 /* If OP0 is an AND, clear unimportant bits in CONST1. */
10332 if (op0 == AND)
10333 const1 &= const0;
10334
10335 /* If OP0 or OP1 is UNKNOWN, this is easy. Similarly if they are the same or
10336 if OP0 is SET. */
10337
10338 if (op1 == UNKNOWN || op0 == SET)
10339 return 1;
10340
10341 else if (op0 == UNKNOWN)
10342 op0 = op1, const0 = const1;
10343
10344 else if (op0 == op1)
10345 {
10346 switch (op0)
10347 {
10348 case AND:
10349 const0 &= const1;
10350 break;
10351 case IOR:
10352 const0 |= const1;
10353 break;
10354 case XOR:
10355 const0 ^= const1;
10356 break;
10357 case PLUS:
10358 const0 += const1;
10359 break;
10360 case NEG:
10361 op0 = UNKNOWN;
10362 break;
10363 default:
10364 break;
10365 }
10366 }
10367
10368 /* Otherwise, if either is a PLUS or NEG, we can't do anything. */
10369 else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10370 return 0;
10371
10372 /* If the two constants aren't the same, we can't do anything. The
10373 remaining six cases can all be done. */
10374 else if (const0 != const1)
10375 return 0;
10376
10377 else
10378 switch (op0)
10379 {
10380 case IOR:
10381 if (op1 == AND)
10382 /* (a & b) | b == b */
10383 op0 = SET;
10384 else /* op1 == XOR */
10385 /* (a ^ b) | b == a | b */
10386 {;}
10387 break;
10388
10389 case XOR:
10390 if (op1 == AND)
10391 /* (a & b) ^ b == (~a) & b */
10392 op0 = AND, *pcomp_p = 1;
10393 else /* op1 == IOR */
10394 /* (a | b) ^ b == a & ~b */
10395 op0 = AND, const0 = ~const0;
10396 break;
10397
10398 case AND:
10399 if (op1 == IOR)
10400 /* (a | b) & b == b */
10401 op0 = SET;
10402 else /* op1 == XOR */
10403 /* (a ^ b) & b) == (~a) & b */
10404 *pcomp_p = 1;
10405 break;
10406 default:
10407 break;
10408 }
10409
10410 /* Check for NO-OP cases. */
10411 const0 &= GET_MODE_MASK (mode);
10412 if (const0 == 0
10413 && (op0 == IOR || op0 == XOR || op0 == PLUS))
10414 op0 = UNKNOWN;
10415 else if (const0 == 0 && op0 == AND)
10416 op0 = SET;
10417 else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10418 && op0 == AND)
10419 op0 = UNKNOWN;
10420
10421 *pop0 = op0;
10422
10423 /* ??? Slightly redundant with the above mask, but not entirely.
10424 Moving this above means we'd have to sign-extend the mode mask
10425 for the final test. */
10426 if (op0 != UNKNOWN && op0 != NEG)
10427 *pconst0 = trunc_int_for_mode (const0, mode);
10428
10429 return 1;
10430 }
10431
10432 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10433 the shift in. The original shift operation CODE is performed on OP in
10434 ORIG_MODE. Return the wider mode MODE if we can perform the operation
10435 in that mode. Return ORIG_MODE otherwise. We can also assume that the
10436 result of the shift is subject to operation OUTER_CODE with operand
10437 OUTER_CONST. */
10438
10439 static scalar_int_mode
try_widen_shift_mode(enum rtx_code code,rtx op,int count,scalar_int_mode orig_mode,scalar_int_mode mode,enum rtx_code outer_code,HOST_WIDE_INT outer_const)10440 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10441 scalar_int_mode orig_mode, scalar_int_mode mode,
10442 enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10443 {
10444 gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10445
10446 /* In general we can't perform in wider mode for right shift and rotate. */
10447 switch (code)
10448 {
10449 case ASHIFTRT:
10450 /* We can still widen if the bits brought in from the left are identical
10451 to the sign bit of ORIG_MODE. */
10452 if (num_sign_bit_copies (op, mode)
10453 > (unsigned) (GET_MODE_PRECISION (mode)
10454 - GET_MODE_PRECISION (orig_mode)))
10455 return mode;
10456 return orig_mode;
10457
10458 case LSHIFTRT:
10459 /* Similarly here but with zero bits. */
10460 if (HWI_COMPUTABLE_MODE_P (mode)
10461 && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10462 return mode;
10463
10464 /* We can also widen if the bits brought in will be masked off. This
10465 operation is performed in ORIG_MODE. */
10466 if (outer_code == AND)
10467 {
10468 int care_bits = low_bitmask_len (orig_mode, outer_const);
10469
10470 if (care_bits >= 0
10471 && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10472 return mode;
10473 }
10474 /* fall through */
10475
10476 case ROTATE:
10477 return orig_mode;
10478
10479 case ROTATERT:
10480 gcc_unreachable ();
10481
10482 default:
10483 return mode;
10484 }
10485 }
10486
10487 /* Simplify a shift of VAROP by ORIG_COUNT bits. CODE says what kind
10488 of shift. The result of the shift is RESULT_MODE. Return NULL_RTX
10489 if we cannot simplify it. Otherwise, return a simplified value.
10490
10491 The shift is normally computed in the widest mode we find in VAROP, as
10492 long as it isn't a different number of words than RESULT_MODE. Exceptions
10493 are ASHIFTRT and ROTATE, which are always done in their original mode. */
10494
10495 static rtx
simplify_shift_const_1(enum rtx_code code,machine_mode result_mode,rtx varop,int orig_count)10496 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10497 rtx varop, int orig_count)
10498 {
10499 enum rtx_code orig_code = code;
10500 rtx orig_varop = varop;
10501 int count, log2;
10502 machine_mode mode = result_mode;
10503 machine_mode shift_mode;
10504 scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10505 /* We form (outer_op (code varop count) (outer_const)). */
10506 enum rtx_code outer_op = UNKNOWN;
10507 HOST_WIDE_INT outer_const = 0;
10508 int complement_p = 0;
10509 rtx new_rtx, x;
10510
10511 /* Make sure and truncate the "natural" shift on the way in. We don't
10512 want to do this inside the loop as it makes it more difficult to
10513 combine shifts. */
10514 if (SHIFT_COUNT_TRUNCATED)
10515 orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10516
10517 /* If we were given an invalid count, don't do anything except exactly
10518 what was requested. */
10519
10520 if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10521 return NULL_RTX;
10522
10523 count = orig_count;
10524
10525 /* Unless one of the branches of the `if' in this loop does a `continue',
10526 we will `break' the loop after the `if'. */
10527
10528 while (count != 0)
10529 {
10530 /* If we have an operand of (clobber (const_int 0)), fail. */
10531 if (GET_CODE (varop) == CLOBBER)
10532 return NULL_RTX;
10533
10534 /* Convert ROTATERT to ROTATE. */
10535 if (code == ROTATERT)
10536 {
10537 unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10538 code = ROTATE;
10539 count = bitsize - count;
10540 }
10541
10542 shift_mode = result_mode;
10543 if (shift_mode != mode)
10544 {
10545 /* We only change the modes of scalar shifts. */
10546 int_mode = as_a <scalar_int_mode> (mode);
10547 int_result_mode = as_a <scalar_int_mode> (result_mode);
10548 shift_mode = try_widen_shift_mode (code, varop, count,
10549 int_result_mode, int_mode,
10550 outer_op, outer_const);
10551 }
10552
10553 scalar_int_mode shift_unit_mode
10554 = as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10555
10556 /* Handle cases where the count is greater than the size of the mode
10557 minus 1. For ASHIFT, use the size minus one as the count (this can
10558 occur when simplifying (lshiftrt (ashiftrt ..))). For rotates,
10559 take the count modulo the size. For other shifts, the result is
10560 zero.
10561
10562 Since these shifts are being produced by the compiler by combining
10563 multiple operations, each of which are defined, we know what the
10564 result is supposed to be. */
10565
10566 if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10567 {
10568 if (code == ASHIFTRT)
10569 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10570 else if (code == ROTATE || code == ROTATERT)
10571 count %= GET_MODE_PRECISION (shift_unit_mode);
10572 else
10573 {
10574 /* We can't simply return zero because there may be an
10575 outer op. */
10576 varop = const0_rtx;
10577 count = 0;
10578 break;
10579 }
10580 }
10581
10582 /* If we discovered we had to complement VAROP, leave. Making a NOT
10583 here would cause an infinite loop. */
10584 if (complement_p)
10585 break;
10586
10587 if (shift_mode == shift_unit_mode)
10588 {
10589 /* An arithmetic right shift of a quantity known to be -1 or 0
10590 is a no-op. */
10591 if (code == ASHIFTRT
10592 && (num_sign_bit_copies (varop, shift_unit_mode)
10593 == GET_MODE_PRECISION (shift_unit_mode)))
10594 {
10595 count = 0;
10596 break;
10597 }
10598
10599 /* If we are doing an arithmetic right shift and discarding all but
10600 the sign bit copies, this is equivalent to doing a shift by the
10601 bitsize minus one. Convert it into that shift because it will
10602 often allow other simplifications. */
10603
10604 if (code == ASHIFTRT
10605 && (count + num_sign_bit_copies (varop, shift_unit_mode)
10606 >= GET_MODE_PRECISION (shift_unit_mode)))
10607 count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10608
10609 /* We simplify the tests below and elsewhere by converting
10610 ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10611 `make_compound_operation' will convert it to an ASHIFTRT for
10612 those machines (such as VAX) that don't have an LSHIFTRT. */
10613 if (code == ASHIFTRT
10614 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10615 && val_signbit_known_clear_p (shift_unit_mode,
10616 nonzero_bits (varop,
10617 shift_unit_mode)))
10618 code = LSHIFTRT;
10619
10620 if (((code == LSHIFTRT
10621 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10622 && !(nonzero_bits (varop, shift_unit_mode) >> count))
10623 || (code == ASHIFT
10624 && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10625 && !((nonzero_bits (varop, shift_unit_mode) << count)
10626 & GET_MODE_MASK (shift_unit_mode))))
10627 && !side_effects_p (varop))
10628 varop = const0_rtx;
10629 }
10630
10631 switch (GET_CODE (varop))
10632 {
10633 case SIGN_EXTEND:
10634 case ZERO_EXTEND:
10635 case SIGN_EXTRACT:
10636 case ZERO_EXTRACT:
10637 new_rtx = expand_compound_operation (varop);
10638 if (new_rtx != varop)
10639 {
10640 varop = new_rtx;
10641 continue;
10642 }
10643 break;
10644
10645 case MEM:
10646 /* The following rules apply only to scalars. */
10647 if (shift_mode != shift_unit_mode)
10648 break;
10649 int_mode = as_a <scalar_int_mode> (mode);
10650
10651 /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10652 minus the width of a smaller mode, we can do this with a
10653 SIGN_EXTEND or ZERO_EXTEND from the narrower memory location. */
10654 if ((code == ASHIFTRT || code == LSHIFTRT)
10655 && ! mode_dependent_address_p (XEXP (varop, 0),
10656 MEM_ADDR_SPACE (varop))
10657 && ! MEM_VOLATILE_P (varop)
10658 && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10659 .exists (&tmode)))
10660 {
10661 new_rtx = adjust_address_nv (varop, tmode,
10662 BYTES_BIG_ENDIAN ? 0
10663 : count / BITS_PER_UNIT);
10664
10665 varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10666 : ZERO_EXTEND, int_mode, new_rtx);
10667 count = 0;
10668 continue;
10669 }
10670 break;
10671
10672 case SUBREG:
10673 /* The following rules apply only to scalars. */
10674 if (shift_mode != shift_unit_mode)
10675 break;
10676 int_mode = as_a <scalar_int_mode> (mode);
10677 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10678
10679 /* If VAROP is a SUBREG, strip it as long as the inner operand has
10680 the same number of words as what we've seen so far. Then store
10681 the widest mode in MODE. */
10682 if (subreg_lowpart_p (varop)
10683 && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10684 && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10685 && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10686 == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10687 && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10688 {
10689 varop = SUBREG_REG (varop);
10690 if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10691 mode = inner_mode;
10692 continue;
10693 }
10694 break;
10695
10696 case MULT:
10697 /* Some machines use MULT instead of ASHIFT because MULT
10698 is cheaper. But it is still better on those machines to
10699 merge two shifts into one. */
10700 if (CONST_INT_P (XEXP (varop, 1))
10701 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10702 {
10703 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10704 varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10705 XEXP (varop, 0), log2_rtx);
10706 continue;
10707 }
10708 break;
10709
10710 case UDIV:
10711 /* Similar, for when divides are cheaper. */
10712 if (CONST_INT_P (XEXP (varop, 1))
10713 && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10714 {
10715 rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10716 varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10717 XEXP (varop, 0), log2_rtx);
10718 continue;
10719 }
10720 break;
10721
10722 case ASHIFTRT:
10723 /* If we are extracting just the sign bit of an arithmetic
10724 right shift, that shift is not needed. However, the sign
10725 bit of a wider mode may be different from what would be
10726 interpreted as the sign bit in a narrower mode, so, if
10727 the result is narrower, don't discard the shift. */
10728 if (code == LSHIFTRT
10729 && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10730 && (GET_MODE_UNIT_BITSIZE (result_mode)
10731 >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10732 {
10733 varop = XEXP (varop, 0);
10734 continue;
10735 }
10736
10737 /* fall through */
10738
10739 case LSHIFTRT:
10740 case ASHIFT:
10741 case ROTATE:
10742 /* The following rules apply only to scalars. */
10743 if (shift_mode != shift_unit_mode)
10744 break;
10745 int_mode = as_a <scalar_int_mode> (mode);
10746 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10747 int_result_mode = as_a <scalar_int_mode> (result_mode);
10748
10749 /* Here we have two nested shifts. The result is usually the
10750 AND of a new shift with a mask. We compute the result below. */
10751 if (CONST_INT_P (XEXP (varop, 1))
10752 && INTVAL (XEXP (varop, 1)) >= 0
10753 && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10754 && HWI_COMPUTABLE_MODE_P (int_result_mode)
10755 && HWI_COMPUTABLE_MODE_P (int_mode))
10756 {
10757 enum rtx_code first_code = GET_CODE (varop);
10758 unsigned int first_count = INTVAL (XEXP (varop, 1));
10759 unsigned HOST_WIDE_INT mask;
10760 rtx mask_rtx;
10761
10762 /* We have one common special case. We can't do any merging if
10763 the inner code is an ASHIFTRT of a smaller mode. However, if
10764 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10765 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10766 we can convert it to
10767 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10768 This simplifies certain SIGN_EXTEND operations. */
10769 if (code == ASHIFT && first_code == ASHIFTRT
10770 && count == (GET_MODE_PRECISION (int_result_mode)
10771 - GET_MODE_PRECISION (int_varop_mode)))
10772 {
10773 /* C3 has the low-order C1 bits zero. */
10774
10775 mask = GET_MODE_MASK (int_mode)
10776 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10777
10778 varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10779 XEXP (varop, 0), mask);
10780 varop = simplify_shift_const (NULL_RTX, ASHIFT,
10781 int_result_mode, varop, count);
10782 count = first_count;
10783 code = ASHIFTRT;
10784 continue;
10785 }
10786
10787 /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10788 than C1 high-order bits equal to the sign bit, we can convert
10789 this to either an ASHIFT or an ASHIFTRT depending on the
10790 two counts.
10791
10792 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE. */
10793
10794 if (code == ASHIFTRT && first_code == ASHIFT
10795 && int_varop_mode == shift_unit_mode
10796 && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10797 > first_count))
10798 {
10799 varop = XEXP (varop, 0);
10800 count -= first_count;
10801 if (count < 0)
10802 {
10803 count = -count;
10804 code = ASHIFT;
10805 }
10806
10807 continue;
10808 }
10809
10810 /* There are some cases we can't do. If CODE is ASHIFTRT,
10811 we can only do this if FIRST_CODE is also ASHIFTRT.
10812
10813 We can't do the case when CODE is ROTATE and FIRST_CODE is
10814 ASHIFTRT.
10815
10816 If the mode of this shift is not the mode of the outer shift,
10817 we can't do this if either shift is a right shift or ROTATE.
10818
10819 Finally, we can't do any of these if the mode is too wide
10820 unless the codes are the same.
10821
10822 Handle the case where the shift codes are the same
10823 first. */
10824
10825 if (code == first_code)
10826 {
10827 if (int_varop_mode != int_result_mode
10828 && (code == ASHIFTRT || code == LSHIFTRT
10829 || code == ROTATE))
10830 break;
10831
10832 count += first_count;
10833 varop = XEXP (varop, 0);
10834 continue;
10835 }
10836
10837 if (code == ASHIFTRT
10838 || (code == ROTATE && first_code == ASHIFTRT)
10839 || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10840 || (int_varop_mode != int_result_mode
10841 && (first_code == ASHIFTRT || first_code == LSHIFTRT
10842 || first_code == ROTATE
10843 || code == ROTATE)))
10844 break;
10845
10846 /* To compute the mask to apply after the shift, shift the
10847 nonzero bits of the inner shift the same way the
10848 outer shift will. */
10849
10850 mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10851 int_result_mode);
10852 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10853 mask_rtx
10854 = simplify_const_binary_operation (code, int_result_mode,
10855 mask_rtx, count_rtx);
10856
10857 /* Give up if we can't compute an outer operation to use. */
10858 if (mask_rtx == 0
10859 || !CONST_INT_P (mask_rtx)
10860 || ! merge_outer_ops (&outer_op, &outer_const, AND,
10861 INTVAL (mask_rtx),
10862 int_result_mode, &complement_p))
10863 break;
10864
10865 /* If the shifts are in the same direction, we add the
10866 counts. Otherwise, we subtract them. */
10867 if ((code == ASHIFTRT || code == LSHIFTRT)
10868 == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10869 count += first_count;
10870 else
10871 count -= first_count;
10872
10873 /* If COUNT is positive, the new shift is usually CODE,
10874 except for the two exceptions below, in which case it is
10875 FIRST_CODE. If the count is negative, FIRST_CODE should
10876 always be used */
10877 if (count > 0
10878 && ((first_code == ROTATE && code == ASHIFT)
10879 || (first_code == ASHIFTRT && code == LSHIFTRT)))
10880 code = first_code;
10881 else if (count < 0)
10882 code = first_code, count = -count;
10883
10884 varop = XEXP (varop, 0);
10885 continue;
10886 }
10887
10888 /* If we have (A << B << C) for any shift, we can convert this to
10889 (A << C << B). This wins if A is a constant. Only try this if
10890 B is not a constant. */
10891
10892 else if (GET_CODE (varop) == code
10893 && CONST_INT_P (XEXP (varop, 0))
10894 && !CONST_INT_P (XEXP (varop, 1)))
10895 {
10896 /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
10897 sure the result will be masked. See PR70222. */
10898 if (code == LSHIFTRT
10899 && int_mode != int_result_mode
10900 && !merge_outer_ops (&outer_op, &outer_const, AND,
10901 GET_MODE_MASK (int_result_mode)
10902 >> orig_count, int_result_mode,
10903 &complement_p))
10904 break;
10905 /* For ((int) (cstLL >> count)) >> cst2 just give up. Queuing
10906 up outer sign extension (often left and right shift) is
10907 hardly more efficient than the original. See PR70429. */
10908 if (code == ASHIFTRT && int_mode != int_result_mode)
10909 break;
10910
10911 rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10912 rtx new_rtx = simplify_const_binary_operation (code, int_mode,
10913 XEXP (varop, 0),
10914 count_rtx);
10915 varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
10916 count = 0;
10917 continue;
10918 }
10919 break;
10920
10921 case NOT:
10922 /* The following rules apply only to scalars. */
10923 if (shift_mode != shift_unit_mode)
10924 break;
10925
10926 /* Make this fit the case below. */
10927 varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
10928 continue;
10929
10930 case IOR:
10931 case AND:
10932 case XOR:
10933 /* The following rules apply only to scalars. */
10934 if (shift_mode != shift_unit_mode)
10935 break;
10936 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10937 int_result_mode = as_a <scalar_int_mode> (result_mode);
10938
10939 /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
10940 with C the size of VAROP - 1 and the shift is logical if
10941 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
10942 we have an (le X 0) operation. If we have an arithmetic shift
10943 and STORE_FLAG_VALUE is 1 or we have a logical shift with
10944 STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation. */
10945
10946 if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
10947 && XEXP (XEXP (varop, 0), 1) == constm1_rtx
10948 && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
10949 && (code == LSHIFTRT || code == ASHIFTRT)
10950 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
10951 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
10952 {
10953 count = 0;
10954 varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
10955 const0_rtx);
10956
10957 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
10958 varop = gen_rtx_NEG (int_varop_mode, varop);
10959
10960 continue;
10961 }
10962
10963 /* If we have (shift (logical)), move the logical to the outside
10964 to allow it to possibly combine with another logical and the
10965 shift to combine with another shift. This also canonicalizes to
10966 what a ZERO_EXTRACT looks like. Also, some machines have
10967 (and (shift)) insns. */
10968
10969 if (CONST_INT_P (XEXP (varop, 1))
10970 /* We can't do this if we have (ashiftrt (xor)) and the
10971 constant has its sign bit set in shift_unit_mode with
10972 shift_unit_mode wider than result_mode. */
10973 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10974 && int_result_mode != shift_unit_mode
10975 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10976 shift_unit_mode) < 0)
10977 && (new_rtx = simplify_const_binary_operation
10978 (code, int_result_mode,
10979 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
10980 gen_int_shift_amount (int_result_mode, count))) != 0
10981 && CONST_INT_P (new_rtx)
10982 && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
10983 INTVAL (new_rtx), int_result_mode,
10984 &complement_p))
10985 {
10986 varop = XEXP (varop, 0);
10987 continue;
10988 }
10989
10990 /* If we can't do that, try to simplify the shift in each arm of the
10991 logical expression, make a new logical expression, and apply
10992 the inverse distributive law. This also can't be done for
10993 (ashiftrt (xor)) where we've widened the shift and the constant
10994 changes the sign bit. */
10995 if (CONST_INT_P (XEXP (varop, 1))
10996 && !(code == ASHIFTRT && GET_CODE (varop) == XOR
10997 && int_result_mode != shift_unit_mode
10998 && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
10999 shift_unit_mode) < 0))
11000 {
11001 rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11002 XEXP (varop, 0), count);
11003 rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11004 XEXP (varop, 1), count);
11005
11006 varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11007 lhs, rhs);
11008 varop = apply_distributive_law (varop);
11009
11010 count = 0;
11011 continue;
11012 }
11013 break;
11014
11015 case EQ:
11016 /* The following rules apply only to scalars. */
11017 if (shift_mode != shift_unit_mode)
11018 break;
11019 int_result_mode = as_a <scalar_int_mode> (result_mode);
11020
11021 /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11022 says that the sign bit can be tested, FOO has mode MODE, C is
11023 GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11024 that may be nonzero. */
11025 if (code == LSHIFTRT
11026 && XEXP (varop, 1) == const0_rtx
11027 && GET_MODE (XEXP (varop, 0)) == int_result_mode
11028 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11029 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11030 && STORE_FLAG_VALUE == -1
11031 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11032 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11033 int_result_mode, &complement_p))
11034 {
11035 varop = XEXP (varop, 0);
11036 count = 0;
11037 continue;
11038 }
11039 break;
11040
11041 case NEG:
11042 /* The following rules apply only to scalars. */
11043 if (shift_mode != shift_unit_mode)
11044 break;
11045 int_result_mode = as_a <scalar_int_mode> (result_mode);
11046
11047 /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11048 than the number of bits in the mode is equivalent to A. */
11049 if (code == LSHIFTRT
11050 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11051 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11052 {
11053 varop = XEXP (varop, 0);
11054 count = 0;
11055 continue;
11056 }
11057
11058 /* NEG commutes with ASHIFT since it is multiplication. Move the
11059 NEG outside to allow shifts to combine. */
11060 if (code == ASHIFT
11061 && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11062 int_result_mode, &complement_p))
11063 {
11064 varop = XEXP (varop, 0);
11065 continue;
11066 }
11067 break;
11068
11069 case PLUS:
11070 /* The following rules apply only to scalars. */
11071 if (shift_mode != shift_unit_mode)
11072 break;
11073 int_result_mode = as_a <scalar_int_mode> (result_mode);
11074
11075 /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11076 is one less than the number of bits in the mode is
11077 equivalent to (xor A 1). */
11078 if (code == LSHIFTRT
11079 && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11080 && XEXP (varop, 1) == constm1_rtx
11081 && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11082 && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11083 int_result_mode, &complement_p))
11084 {
11085 count = 0;
11086 varop = XEXP (varop, 0);
11087 continue;
11088 }
11089
11090 /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11091 that might be nonzero in BAR are those being shifted out and those
11092 bits are known zero in FOO, we can replace the PLUS with FOO.
11093 Similarly in the other operand order. This code occurs when
11094 we are computing the size of a variable-size array. */
11095
11096 if ((code == ASHIFTRT || code == LSHIFTRT)
11097 && count < HOST_BITS_PER_WIDE_INT
11098 && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11099 && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11100 & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11101 {
11102 varop = XEXP (varop, 0);
11103 continue;
11104 }
11105 else if ((code == ASHIFTRT || code == LSHIFTRT)
11106 && count < HOST_BITS_PER_WIDE_INT
11107 && HWI_COMPUTABLE_MODE_P (int_result_mode)
11108 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11109 >> count) == 0
11110 && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11111 & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11112 {
11113 varop = XEXP (varop, 1);
11114 continue;
11115 }
11116
11117 /* (ashift (plus foo C) N) is (plus (ashift foo N) C'). */
11118 if (code == ASHIFT
11119 && CONST_INT_P (XEXP (varop, 1))
11120 && (new_rtx = simplify_const_binary_operation
11121 (ASHIFT, int_result_mode,
11122 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11123 gen_int_shift_amount (int_result_mode, count))) != 0
11124 && CONST_INT_P (new_rtx)
11125 && merge_outer_ops (&outer_op, &outer_const, PLUS,
11126 INTVAL (new_rtx), int_result_mode,
11127 &complement_p))
11128 {
11129 varop = XEXP (varop, 0);
11130 continue;
11131 }
11132
11133 /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11134 signbit', and attempt to change the PLUS to an XOR and move it to
11135 the outer operation as is done above in the AND/IOR/XOR case
11136 leg for shift(logical). See details in logical handling above
11137 for reasoning in doing so. */
11138 if (code == LSHIFTRT
11139 && CONST_INT_P (XEXP (varop, 1))
11140 && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11141 && (new_rtx = simplify_const_binary_operation
11142 (code, int_result_mode,
11143 gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11144 gen_int_shift_amount (int_result_mode, count))) != 0
11145 && CONST_INT_P (new_rtx)
11146 && merge_outer_ops (&outer_op, &outer_const, XOR,
11147 INTVAL (new_rtx), int_result_mode,
11148 &complement_p))
11149 {
11150 varop = XEXP (varop, 0);
11151 continue;
11152 }
11153
11154 break;
11155
11156 case MINUS:
11157 /* The following rules apply only to scalars. */
11158 if (shift_mode != shift_unit_mode)
11159 break;
11160 int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11161
11162 /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11163 with C the size of VAROP - 1 and the shift is logical if
11164 STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11165 we have a (gt X 0) operation. If the shift is arithmetic with
11166 STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11167 we have a (neg (gt X 0)) operation. */
11168
11169 if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11170 && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11171 && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11172 && (code == LSHIFTRT || code == ASHIFTRT)
11173 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11174 && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11175 && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11176 {
11177 count = 0;
11178 varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11179 const0_rtx);
11180
11181 if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11182 varop = gen_rtx_NEG (int_varop_mode, varop);
11183
11184 continue;
11185 }
11186 break;
11187
11188 case TRUNCATE:
11189 /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11190 if the truncate does not affect the value. */
11191 if (code == LSHIFTRT
11192 && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11193 && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11194 && (INTVAL (XEXP (XEXP (varop, 0), 1))
11195 >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11196 - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11197 {
11198 rtx varop_inner = XEXP (varop, 0);
11199 int new_count = count + INTVAL (XEXP (varop_inner, 1));
11200 rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11201 new_count);
11202 varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11203 XEXP (varop_inner, 0),
11204 new_count_rtx);
11205 varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11206 count = 0;
11207 continue;
11208 }
11209 break;
11210
11211 default:
11212 break;
11213 }
11214
11215 break;
11216 }
11217
11218 shift_mode = result_mode;
11219 if (shift_mode != mode)
11220 {
11221 /* We only change the modes of scalar shifts. */
11222 int_mode = as_a <scalar_int_mode> (mode);
11223 int_result_mode = as_a <scalar_int_mode> (result_mode);
11224 shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11225 int_mode, outer_op, outer_const);
11226 }
11227
11228 /* We have now finished analyzing the shift. The result should be
11229 a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places. If
11230 OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11231 to the result of the shift. OUTER_CONST is the relevant constant,
11232 but we must turn off all bits turned off in the shift. */
11233
11234 if (outer_op == UNKNOWN
11235 && orig_code == code && orig_count == count
11236 && varop == orig_varop
11237 && shift_mode == GET_MODE (varop))
11238 return NULL_RTX;
11239
11240 /* Make a SUBREG if necessary. If we can't make it, fail. */
11241 varop = gen_lowpart (shift_mode, varop);
11242 if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11243 return NULL_RTX;
11244
11245 /* If we have an outer operation and we just made a shift, it is
11246 possible that we could have simplified the shift were it not
11247 for the outer operation. So try to do the simplification
11248 recursively. */
11249
11250 if (outer_op != UNKNOWN)
11251 x = simplify_shift_const_1 (code, shift_mode, varop, count);
11252 else
11253 x = NULL_RTX;
11254
11255 if (x == NULL_RTX)
11256 x = simplify_gen_binary (code, shift_mode, varop,
11257 gen_int_shift_amount (shift_mode, count));
11258
11259 /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11260 turn off all the bits that the shift would have turned off. */
11261 if (orig_code == LSHIFTRT && result_mode != shift_mode)
11262 /* We only change the modes of scalar shifts. */
11263 x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11264 x, GET_MODE_MASK (result_mode) >> orig_count);
11265
11266 /* Do the remainder of the processing in RESULT_MODE. */
11267 x = gen_lowpart_or_truncate (result_mode, x);
11268
11269 /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11270 operation. */
11271 if (complement_p)
11272 x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11273
11274 if (outer_op != UNKNOWN)
11275 {
11276 int_result_mode = as_a <scalar_int_mode> (result_mode);
11277
11278 if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11279 && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11280 outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11281
11282 if (outer_op == AND)
11283 x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11284 else if (outer_op == SET)
11285 {
11286 /* This means that we have determined that the result is
11287 equivalent to a constant. This should be rare. */
11288 if (!side_effects_p (x))
11289 x = GEN_INT (outer_const);
11290 }
11291 else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11292 x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11293 else
11294 x = simplify_gen_binary (outer_op, int_result_mode, x,
11295 GEN_INT (outer_const));
11296 }
11297
11298 return x;
11299 }
11300
11301 /* Simplify a shift of VAROP by COUNT bits. CODE says what kind of shift.
11302 The result of the shift is RESULT_MODE. If we cannot simplify it,
11303 return X or, if it is NULL, synthesize the expression with
11304 simplify_gen_binary. Otherwise, return a simplified value.
11305
11306 The shift is normally computed in the widest mode we find in VAROP, as
11307 long as it isn't a different number of words than RESULT_MODE. Exceptions
11308 are ASHIFTRT and ROTATE, which are always done in their original mode. */
11309
11310 static rtx
simplify_shift_const(rtx x,enum rtx_code code,machine_mode result_mode,rtx varop,int count)11311 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11312 rtx varop, int count)
11313 {
11314 rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11315 if (tem)
11316 return tem;
11317
11318 if (!x)
11319 x = simplify_gen_binary (code, GET_MODE (varop), varop,
11320 gen_int_shift_amount (GET_MODE (varop), count));
11321 if (GET_MODE (x) != result_mode)
11322 x = gen_lowpart (result_mode, x);
11323 return x;
11324 }
11325
11326
11327 /* A subroutine of recog_for_combine. See there for arguments and
11328 return value. */
11329
11330 static int
recog_for_combine_1(rtx * pnewpat,rtx_insn * insn,rtx * pnotes)11331 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11332 {
11333 rtx pat = *pnewpat;
11334 rtx pat_without_clobbers;
11335 int insn_code_number;
11336 int num_clobbers_to_add = 0;
11337 int i;
11338 rtx notes = NULL_RTX;
11339 rtx old_notes, old_pat;
11340 int old_icode;
11341
11342 /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11343 we use to indicate that something didn't match. If we find such a
11344 thing, force rejection. */
11345 if (GET_CODE (pat) == PARALLEL)
11346 for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11347 if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11348 && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11349 return -1;
11350
11351 old_pat = PATTERN (insn);
11352 old_notes = REG_NOTES (insn);
11353 PATTERN (insn) = pat;
11354 REG_NOTES (insn) = NULL_RTX;
11355
11356 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11357 if (dump_file && (dump_flags & TDF_DETAILS))
11358 {
11359 if (insn_code_number < 0)
11360 fputs ("Failed to match this instruction:\n", dump_file);
11361 else
11362 fputs ("Successfully matched this instruction:\n", dump_file);
11363 print_rtl_single (dump_file, pat);
11364 }
11365
11366 /* If it isn't, there is the possibility that we previously had an insn
11367 that clobbered some register as a side effect, but the combined
11368 insn doesn't need to do that. So try once more without the clobbers
11369 unless this represents an ASM insn. */
11370
11371 if (insn_code_number < 0 && ! check_asm_operands (pat)
11372 && GET_CODE (pat) == PARALLEL)
11373 {
11374 int pos;
11375
11376 for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11377 if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11378 {
11379 if (i != pos)
11380 SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11381 pos++;
11382 }
11383
11384 SUBST_INT (XVECLEN (pat, 0), pos);
11385
11386 if (pos == 1)
11387 pat = XVECEXP (pat, 0, 0);
11388
11389 PATTERN (insn) = pat;
11390 insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11391 if (dump_file && (dump_flags & TDF_DETAILS))
11392 {
11393 if (insn_code_number < 0)
11394 fputs ("Failed to match this instruction:\n", dump_file);
11395 else
11396 fputs ("Successfully matched this instruction:\n", dump_file);
11397 print_rtl_single (dump_file, pat);
11398 }
11399 }
11400
11401 pat_without_clobbers = pat;
11402
11403 PATTERN (insn) = old_pat;
11404 REG_NOTES (insn) = old_notes;
11405
11406 /* Recognize all noop sets, these will be killed by followup pass. */
11407 if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11408 insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11409
11410 /* If we had any clobbers to add, make a new pattern than contains
11411 them. Then check to make sure that all of them are dead. */
11412 if (num_clobbers_to_add)
11413 {
11414 rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11415 rtvec_alloc (GET_CODE (pat) == PARALLEL
11416 ? (XVECLEN (pat, 0)
11417 + num_clobbers_to_add)
11418 : num_clobbers_to_add + 1));
11419
11420 if (GET_CODE (pat) == PARALLEL)
11421 for (i = 0; i < XVECLEN (pat, 0); i++)
11422 XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11423 else
11424 XVECEXP (newpat, 0, 0) = pat;
11425
11426 add_clobbers (newpat, insn_code_number);
11427
11428 for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11429 i < XVECLEN (newpat, 0); i++)
11430 {
11431 if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11432 && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11433 return -1;
11434 if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11435 {
11436 gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11437 notes = alloc_reg_note (REG_UNUSED,
11438 XEXP (XVECEXP (newpat, 0, i), 0), notes);
11439 }
11440 }
11441 pat = newpat;
11442 }
11443
11444 if (insn_code_number >= 0
11445 && insn_code_number != NOOP_MOVE_INSN_CODE)
11446 {
11447 old_pat = PATTERN (insn);
11448 old_notes = REG_NOTES (insn);
11449 old_icode = INSN_CODE (insn);
11450 PATTERN (insn) = pat;
11451 REG_NOTES (insn) = notes;
11452 INSN_CODE (insn) = insn_code_number;
11453
11454 /* Allow targets to reject combined insn. */
11455 if (!targetm.legitimate_combined_insn (insn))
11456 {
11457 if (dump_file && (dump_flags & TDF_DETAILS))
11458 fputs ("Instruction not appropriate for target.",
11459 dump_file);
11460
11461 /* Callers expect recog_for_combine to strip
11462 clobbers from the pattern on failure. */
11463 pat = pat_without_clobbers;
11464 notes = NULL_RTX;
11465
11466 insn_code_number = -1;
11467 }
11468
11469 PATTERN (insn) = old_pat;
11470 REG_NOTES (insn) = old_notes;
11471 INSN_CODE (insn) = old_icode;
11472 }
11473
11474 *pnewpat = pat;
11475 *pnotes = notes;
11476
11477 return insn_code_number;
11478 }
11479
11480 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11481 expressed as an AND and maybe an LSHIFTRT, to that formulation.
11482 Return whether anything was so changed. */
11483
11484 static bool
change_zero_ext(rtx pat)11485 change_zero_ext (rtx pat)
11486 {
11487 bool changed = false;
11488 rtx *src = &SET_SRC (pat);
11489
11490 subrtx_ptr_iterator::array_type array;
11491 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11492 {
11493 rtx x = **iter;
11494 scalar_int_mode mode, inner_mode;
11495 if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11496 continue;
11497 int size;
11498
11499 if (GET_CODE (x) == ZERO_EXTRACT
11500 && CONST_INT_P (XEXP (x, 1))
11501 && CONST_INT_P (XEXP (x, 2))
11502 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11503 && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11504 {
11505 size = INTVAL (XEXP (x, 1));
11506
11507 int start = INTVAL (XEXP (x, 2));
11508 if (BITS_BIG_ENDIAN)
11509 start = GET_MODE_PRECISION (inner_mode) - size - start;
11510
11511 if (start != 0)
11512 x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11513 gen_int_shift_amount (inner_mode, start));
11514 else
11515 x = XEXP (x, 0);
11516
11517 if (mode != inner_mode)
11518 {
11519 if (REG_P (x) && HARD_REGISTER_P (x)
11520 && !can_change_dest_mode (x, 0, mode))
11521 continue;
11522
11523 x = gen_lowpart_SUBREG (mode, x);
11524 }
11525 }
11526 else if (GET_CODE (x) == ZERO_EXTEND
11527 && GET_CODE (XEXP (x, 0)) == SUBREG
11528 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11529 && !paradoxical_subreg_p (XEXP (x, 0))
11530 && subreg_lowpart_p (XEXP (x, 0)))
11531 {
11532 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11533 size = GET_MODE_PRECISION (inner_mode);
11534 x = SUBREG_REG (XEXP (x, 0));
11535 if (GET_MODE (x) != mode)
11536 {
11537 if (REG_P (x) && HARD_REGISTER_P (x)
11538 && !can_change_dest_mode (x, 0, mode))
11539 continue;
11540
11541 x = gen_lowpart_SUBREG (mode, x);
11542 }
11543 }
11544 else if (GET_CODE (x) == ZERO_EXTEND
11545 && REG_P (XEXP (x, 0))
11546 && HARD_REGISTER_P (XEXP (x, 0))
11547 && can_change_dest_mode (XEXP (x, 0), 0, mode))
11548 {
11549 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11550 size = GET_MODE_PRECISION (inner_mode);
11551 x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11552 }
11553 else
11554 continue;
11555
11556 if (!(GET_CODE (x) == LSHIFTRT
11557 && CONST_INT_P (XEXP (x, 1))
11558 && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11559 {
11560 wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11561 x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11562 }
11563
11564 SUBST (**iter, x);
11565 changed = true;
11566 }
11567
11568 if (changed)
11569 FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11570 maybe_swap_commutative_operands (**iter);
11571
11572 rtx *dst = &SET_DEST (pat);
11573 scalar_int_mode mode;
11574 if (GET_CODE (*dst) == ZERO_EXTRACT
11575 && REG_P (XEXP (*dst, 0))
11576 && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11577 && CONST_INT_P (XEXP (*dst, 1))
11578 && CONST_INT_P (XEXP (*dst, 2)))
11579 {
11580 rtx reg = XEXP (*dst, 0);
11581 int width = INTVAL (XEXP (*dst, 1));
11582 int offset = INTVAL (XEXP (*dst, 2));
11583 int reg_width = GET_MODE_PRECISION (mode);
11584 if (BITS_BIG_ENDIAN)
11585 offset = reg_width - width - offset;
11586
11587 rtx x, y, z, w;
11588 wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11589 wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11590 x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11591 if (offset)
11592 y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11593 else
11594 y = SET_SRC (pat);
11595 z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11596 w = gen_rtx_IOR (mode, x, z);
11597 SUBST (SET_DEST (pat), reg);
11598 SUBST (SET_SRC (pat), w);
11599
11600 changed = true;
11601 }
11602
11603 return changed;
11604 }
11605
11606 /* Like recog, but we receive the address of a pointer to a new pattern.
11607 We try to match the rtx that the pointer points to.
11608 If that fails, we may try to modify or replace the pattern,
11609 storing the replacement into the same pointer object.
11610
11611 Modifications include deletion or addition of CLOBBERs. If the
11612 instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11613 to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11614 (and undo if that fails).
11615
11616 PNOTES is a pointer to a location where any REG_UNUSED notes added for
11617 the CLOBBERs are placed.
11618
11619 The value is the final insn code from the pattern ultimately matched,
11620 or -1. */
11621
11622 static int
recog_for_combine(rtx * pnewpat,rtx_insn * insn,rtx * pnotes)11623 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11624 {
11625 rtx pat = *pnewpat;
11626 int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11627 if (insn_code_number >= 0 || check_asm_operands (pat))
11628 return insn_code_number;
11629
11630 void *marker = get_undo_marker ();
11631 bool changed = false;
11632
11633 if (GET_CODE (pat) == SET)
11634 changed = change_zero_ext (pat);
11635 else if (GET_CODE (pat) == PARALLEL)
11636 {
11637 int i;
11638 for (i = 0; i < XVECLEN (pat, 0); i++)
11639 {
11640 rtx set = XVECEXP (pat, 0, i);
11641 if (GET_CODE (set) == SET)
11642 changed |= change_zero_ext (set);
11643 }
11644 }
11645
11646 if (changed)
11647 {
11648 insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11649
11650 if (insn_code_number < 0)
11651 undo_to_marker (marker);
11652 }
11653
11654 return insn_code_number;
11655 }
11656
11657 /* Like gen_lowpart_general but for use by combine. In combine it
11658 is not possible to create any new pseudoregs. However, it is
11659 safe to create invalid memory addresses, because combine will
11660 try to recognize them and all they will do is make the combine
11661 attempt fail.
11662
11663 If for some reason this cannot do its job, an rtx
11664 (clobber (const_int 0)) is returned.
11665 An insn containing that will not be recognized. */
11666
11667 static rtx
gen_lowpart_for_combine(machine_mode omode,rtx x)11668 gen_lowpart_for_combine (machine_mode omode, rtx x)
11669 {
11670 machine_mode imode = GET_MODE (x);
11671 rtx result;
11672
11673 if (omode == imode)
11674 return x;
11675
11676 /* We can only support MODE being wider than a word if X is a
11677 constant integer or has a mode the same size. */
11678 if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11679 && ! (CONST_SCALAR_INT_P (x)
11680 || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11681 goto fail;
11682
11683 /* X might be a paradoxical (subreg (mem)). In that case, gen_lowpart
11684 won't know what to do. So we will strip off the SUBREG here and
11685 process normally. */
11686 if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11687 {
11688 x = SUBREG_REG (x);
11689
11690 /* For use in case we fall down into the address adjustments
11691 further below, we need to adjust the known mode and size of
11692 x; imode and isize, since we just adjusted x. */
11693 imode = GET_MODE (x);
11694
11695 if (imode == omode)
11696 return x;
11697 }
11698
11699 result = gen_lowpart_common (omode, x);
11700
11701 if (result)
11702 return result;
11703
11704 if (MEM_P (x))
11705 {
11706 /* Refuse to work on a volatile memory ref or one with a mode-dependent
11707 address. */
11708 if (MEM_VOLATILE_P (x)
11709 || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11710 goto fail;
11711
11712 /* If we want to refer to something bigger than the original memref,
11713 generate a paradoxical subreg instead. That will force a reload
11714 of the original memref X. */
11715 if (paradoxical_subreg_p (omode, imode))
11716 return gen_rtx_SUBREG (omode, x, 0);
11717
11718 poly_int64 offset = byte_lowpart_offset (omode, imode);
11719 return adjust_address_nv (x, omode, offset);
11720 }
11721
11722 /* If X is a comparison operator, rewrite it in a new mode. This
11723 probably won't match, but may allow further simplifications. */
11724 else if (COMPARISON_P (x))
11725 return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11726
11727 /* If we couldn't simplify X any other way, just enclose it in a
11728 SUBREG. Normally, this SUBREG won't match, but some patterns may
11729 include an explicit SUBREG or we may simplify it further in combine. */
11730 else
11731 {
11732 rtx res;
11733
11734 if (imode == VOIDmode)
11735 {
11736 imode = int_mode_for_mode (omode).require ();
11737 x = gen_lowpart_common (imode, x);
11738 if (x == NULL)
11739 goto fail;
11740 }
11741 res = lowpart_subreg (omode, x, imode);
11742 if (res)
11743 return res;
11744 }
11745
11746 fail:
11747 return gen_rtx_CLOBBER (omode, const0_rtx);
11748 }
11749
11750 /* Try to simplify a comparison between OP0 and a constant OP1,
11751 where CODE is the comparison code that will be tested, into a
11752 (CODE OP0 const0_rtx) form.
11753
11754 The result is a possibly different comparison code to use.
11755 *POP1 may be updated. */
11756
11757 static enum rtx_code
simplify_compare_const(enum rtx_code code,machine_mode mode,rtx op0,rtx * pop1)11758 simplify_compare_const (enum rtx_code code, machine_mode mode,
11759 rtx op0, rtx *pop1)
11760 {
11761 scalar_int_mode int_mode;
11762 HOST_WIDE_INT const_op = INTVAL (*pop1);
11763
11764 /* Get the constant we are comparing against and turn off all bits
11765 not on in our mode. */
11766 if (mode != VOIDmode)
11767 const_op = trunc_int_for_mode (const_op, mode);
11768
11769 /* If we are comparing against a constant power of two and the value
11770 being compared can only have that single bit nonzero (e.g., it was
11771 `and'ed with that bit), we can replace this with a comparison
11772 with zero. */
11773 if (const_op
11774 && (code == EQ || code == NE || code == GE || code == GEU
11775 || code == LT || code == LTU)
11776 && is_a <scalar_int_mode> (mode, &int_mode)
11777 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11778 && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11779 && (nonzero_bits (op0, int_mode)
11780 == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11781 {
11782 code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11783 const_op = 0;
11784 }
11785
11786 /* Similarly, if we are comparing a value known to be either -1 or
11787 0 with -1, change it to the opposite comparison against zero. */
11788 if (const_op == -1
11789 && (code == EQ || code == NE || code == GT || code == LE
11790 || code == GEU || code == LTU)
11791 && is_a <scalar_int_mode> (mode, &int_mode)
11792 && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11793 {
11794 code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11795 const_op = 0;
11796 }
11797
11798 /* Do some canonicalizations based on the comparison code. We prefer
11799 comparisons against zero and then prefer equality comparisons.
11800 If we can reduce the size of a constant, we will do that too. */
11801 switch (code)
11802 {
11803 case LT:
11804 /* < C is equivalent to <= (C - 1) */
11805 if (const_op > 0)
11806 {
11807 const_op -= 1;
11808 code = LE;
11809 /* ... fall through to LE case below. */
11810 gcc_fallthrough ();
11811 }
11812 else
11813 break;
11814
11815 case LE:
11816 /* <= C is equivalent to < (C + 1); we do this for C < 0 */
11817 if (const_op < 0)
11818 {
11819 const_op += 1;
11820 code = LT;
11821 }
11822
11823 /* If we are doing a <= 0 comparison on a value known to have
11824 a zero sign bit, we can replace this with == 0. */
11825 else if (const_op == 0
11826 && is_a <scalar_int_mode> (mode, &int_mode)
11827 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11828 && (nonzero_bits (op0, int_mode)
11829 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11830 == 0)
11831 code = EQ;
11832 break;
11833
11834 case GE:
11835 /* >= C is equivalent to > (C - 1). */
11836 if (const_op > 0)
11837 {
11838 const_op -= 1;
11839 code = GT;
11840 /* ... fall through to GT below. */
11841 gcc_fallthrough ();
11842 }
11843 else
11844 break;
11845
11846 case GT:
11847 /* > C is equivalent to >= (C + 1); we do this for C < 0. */
11848 if (const_op < 0)
11849 {
11850 const_op += 1;
11851 code = GE;
11852 }
11853
11854 /* If we are doing a > 0 comparison on a value known to have
11855 a zero sign bit, we can replace this with != 0. */
11856 else if (const_op == 0
11857 && is_a <scalar_int_mode> (mode, &int_mode)
11858 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11859 && (nonzero_bits (op0, int_mode)
11860 & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11861 == 0)
11862 code = NE;
11863 break;
11864
11865 case LTU:
11866 /* < C is equivalent to <= (C - 1). */
11867 if (const_op > 0)
11868 {
11869 const_op -= 1;
11870 code = LEU;
11871 /* ... fall through ... */
11872 gcc_fallthrough ();
11873 }
11874 /* (unsigned) < 0x80000000 is equivalent to >= 0. */
11875 else if (is_a <scalar_int_mode> (mode, &int_mode)
11876 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11877 && ((unsigned HOST_WIDE_INT) const_op
11878 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11879 {
11880 const_op = 0;
11881 code = GE;
11882 break;
11883 }
11884 else
11885 break;
11886
11887 case LEU:
11888 /* unsigned <= 0 is equivalent to == 0 */
11889 if (const_op == 0)
11890 code = EQ;
11891 /* (unsigned) <= 0x7fffffff is equivalent to >= 0. */
11892 else if (is_a <scalar_int_mode> (mode, &int_mode)
11893 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11894 && ((unsigned HOST_WIDE_INT) const_op
11895 == ((HOST_WIDE_INT_1U
11896 << (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
11897 {
11898 const_op = 0;
11899 code = GE;
11900 }
11901 break;
11902
11903 case GEU:
11904 /* >= C is equivalent to > (C - 1). */
11905 if (const_op > 1)
11906 {
11907 const_op -= 1;
11908 code = GTU;
11909 /* ... fall through ... */
11910 gcc_fallthrough ();
11911 }
11912
11913 /* (unsigned) >= 0x80000000 is equivalent to < 0. */
11914 else if (is_a <scalar_int_mode> (mode, &int_mode)
11915 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11916 && ((unsigned HOST_WIDE_INT) const_op
11917 == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11918 {
11919 const_op = 0;
11920 code = LT;
11921 break;
11922 }
11923 else
11924 break;
11925
11926 case GTU:
11927 /* unsigned > 0 is equivalent to != 0 */
11928 if (const_op == 0)
11929 code = NE;
11930 /* (unsigned) > 0x7fffffff is equivalent to < 0. */
11931 else if (is_a <scalar_int_mode> (mode, &int_mode)
11932 && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11933 && ((unsigned HOST_WIDE_INT) const_op
11934 == (HOST_WIDE_INT_1U
11935 << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
11936 {
11937 const_op = 0;
11938 code = LT;
11939 }
11940 break;
11941
11942 default:
11943 break;
11944 }
11945
11946 *pop1 = GEN_INT (const_op);
11947 return code;
11948 }
11949
11950 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
11951 comparison code that will be tested.
11952
11953 The result is a possibly different comparison code to use. *POP0 and
11954 *POP1 may be updated.
11955
11956 It is possible that we might detect that a comparison is either always
11957 true or always false. However, we do not perform general constant
11958 folding in combine, so this knowledge isn't useful. Such tautologies
11959 should have been detected earlier. Hence we ignore all such cases. */
11960
11961 static enum rtx_code
simplify_comparison(enum rtx_code code,rtx * pop0,rtx * pop1)11962 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
11963 {
11964 rtx op0 = *pop0;
11965 rtx op1 = *pop1;
11966 rtx tem, tem1;
11967 int i;
11968 scalar_int_mode mode, inner_mode, tmode;
11969 opt_scalar_int_mode tmode_iter;
11970
11971 /* Try a few ways of applying the same transformation to both operands. */
11972 while (1)
11973 {
11974 /* The test below this one won't handle SIGN_EXTENDs on these machines,
11975 so check specially. */
11976 if (!WORD_REGISTER_OPERATIONS
11977 && code != GTU && code != GEU && code != LTU && code != LEU
11978 && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
11979 && GET_CODE (XEXP (op0, 0)) == ASHIFT
11980 && GET_CODE (XEXP (op1, 0)) == ASHIFT
11981 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
11982 && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
11983 && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
11984 && (is_a <scalar_int_mode>
11985 (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
11986 && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
11987 && CONST_INT_P (XEXP (op0, 1))
11988 && XEXP (op0, 1) == XEXP (op1, 1)
11989 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
11990 && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
11991 && (INTVAL (XEXP (op0, 1))
11992 == (GET_MODE_PRECISION (mode)
11993 - GET_MODE_PRECISION (inner_mode))))
11994 {
11995 op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
11996 op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
11997 }
11998
11999 /* If both operands are the same constant shift, see if we can ignore the
12000 shift. We can if the shift is a rotate or if the bits shifted out of
12001 this shift are known to be zero for both inputs and if the type of
12002 comparison is compatible with the shift. */
12003 if (GET_CODE (op0) == GET_CODE (op1)
12004 && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12005 && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12006 || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12007 && (code != GT && code != LT && code != GE && code != LE))
12008 || (GET_CODE (op0) == ASHIFTRT
12009 && (code != GTU && code != LTU
12010 && code != GEU && code != LEU)))
12011 && CONST_INT_P (XEXP (op0, 1))
12012 && INTVAL (XEXP (op0, 1)) >= 0
12013 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12014 && XEXP (op0, 1) == XEXP (op1, 1))
12015 {
12016 machine_mode mode = GET_MODE (op0);
12017 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12018 int shift_count = INTVAL (XEXP (op0, 1));
12019
12020 if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12021 mask &= (mask >> shift_count) << shift_count;
12022 else if (GET_CODE (op0) == ASHIFT)
12023 mask = (mask & (mask << shift_count)) >> shift_count;
12024
12025 if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12026 && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12027 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12028 else
12029 break;
12030 }
12031
12032 /* If both operands are AND's of a paradoxical SUBREG by constant, the
12033 SUBREGs are of the same mode, and, in both cases, the AND would
12034 be redundant if the comparison was done in the narrower mode,
12035 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12036 and the operand's possibly nonzero bits are 0xffffff01; in that case
12037 if we only care about QImode, we don't need the AND). This case
12038 occurs if the output mode of an scc insn is not SImode and
12039 STORE_FLAG_VALUE == 1 (e.g., the 386).
12040
12041 Similarly, check for a case where the AND's are ZERO_EXTEND
12042 operations from some narrower mode even though a SUBREG is not
12043 present. */
12044
12045 else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12046 && CONST_INT_P (XEXP (op0, 1))
12047 && CONST_INT_P (XEXP (op1, 1)))
12048 {
12049 rtx inner_op0 = XEXP (op0, 0);
12050 rtx inner_op1 = XEXP (op1, 0);
12051 HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12052 HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12053 int changed = 0;
12054
12055 if (paradoxical_subreg_p (inner_op0)
12056 && GET_CODE (inner_op1) == SUBREG
12057 && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12058 && (GET_MODE (SUBREG_REG (inner_op0))
12059 == GET_MODE (SUBREG_REG (inner_op1)))
12060 && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12061 GET_MODE (SUBREG_REG (inner_op0)))) == 0
12062 && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12063 GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12064 {
12065 op0 = SUBREG_REG (inner_op0);
12066 op1 = SUBREG_REG (inner_op1);
12067
12068 /* The resulting comparison is always unsigned since we masked
12069 off the original sign bit. */
12070 code = unsigned_condition (code);
12071
12072 changed = 1;
12073 }
12074
12075 else if (c0 == c1)
12076 FOR_EACH_MODE_UNTIL (tmode,
12077 as_a <scalar_int_mode> (GET_MODE (op0)))
12078 if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12079 {
12080 op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12081 op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12082 code = unsigned_condition (code);
12083 changed = 1;
12084 break;
12085 }
12086
12087 if (! changed)
12088 break;
12089 }
12090
12091 /* If both operands are NOT, we can strip off the outer operation
12092 and adjust the comparison code for swapped operands; similarly for
12093 NEG, except that this must be an equality comparison. */
12094 else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12095 || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12096 && (code == EQ || code == NE)))
12097 op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12098
12099 else
12100 break;
12101 }
12102
12103 /* If the first operand is a constant, swap the operands and adjust the
12104 comparison code appropriately, but don't do this if the second operand
12105 is already a constant integer. */
12106 if (swap_commutative_operands_p (op0, op1))
12107 {
12108 std::swap (op0, op1);
12109 code = swap_condition (code);
12110 }
12111
12112 /* We now enter a loop during which we will try to simplify the comparison.
12113 For the most part, we only are concerned with comparisons with zero,
12114 but some things may really be comparisons with zero but not start
12115 out looking that way. */
12116
12117 while (CONST_INT_P (op1))
12118 {
12119 machine_mode raw_mode = GET_MODE (op0);
12120 scalar_int_mode int_mode;
12121 int equality_comparison_p;
12122 int sign_bit_comparison_p;
12123 int unsigned_comparison_p;
12124 HOST_WIDE_INT const_op;
12125
12126 /* We only want to handle integral modes. This catches VOIDmode,
12127 CCmode, and the floating-point modes. An exception is that we
12128 can handle VOIDmode if OP0 is a COMPARE or a comparison
12129 operation. */
12130
12131 if (GET_MODE_CLASS (raw_mode) != MODE_INT
12132 && ! (raw_mode == VOIDmode
12133 && (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12134 break;
12135
12136 /* Try to simplify the compare to constant, possibly changing the
12137 comparison op, and/or changing op1 to zero. */
12138 code = simplify_compare_const (code, raw_mode, op0, &op1);
12139 const_op = INTVAL (op1);
12140
12141 /* Compute some predicates to simplify code below. */
12142
12143 equality_comparison_p = (code == EQ || code == NE);
12144 sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12145 unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12146 || code == GEU);
12147
12148 /* If this is a sign bit comparison and we can do arithmetic in
12149 MODE, say that we will only be needing the sign bit of OP0. */
12150 if (sign_bit_comparison_p
12151 && is_a <scalar_int_mode> (raw_mode, &int_mode)
12152 && HWI_COMPUTABLE_MODE_P (int_mode))
12153 op0 = force_to_mode (op0, int_mode,
12154 HOST_WIDE_INT_1U
12155 << (GET_MODE_PRECISION (int_mode) - 1),
12156 0);
12157
12158 if (COMPARISON_P (op0))
12159 {
12160 /* We can't do anything if OP0 is a condition code value, rather
12161 than an actual data value. */
12162 if (const_op != 0
12163 || CC0_P (XEXP (op0, 0))
12164 || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12165 break;
12166
12167 /* Get the two operands being compared. */
12168 if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12169 tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12170 else
12171 tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12172
12173 /* Check for the cases where we simply want the result of the
12174 earlier test or the opposite of that result. */
12175 if (code == NE || code == EQ
12176 || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12177 && (code == LT || code == GE)))
12178 {
12179 enum rtx_code new_code;
12180 if (code == LT || code == NE)
12181 new_code = GET_CODE (op0);
12182 else
12183 new_code = reversed_comparison_code (op0, NULL);
12184
12185 if (new_code != UNKNOWN)
12186 {
12187 code = new_code;
12188 op0 = tem;
12189 op1 = tem1;
12190 continue;
12191 }
12192 }
12193 break;
12194 }
12195
12196 if (raw_mode == VOIDmode)
12197 break;
12198 scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12199
12200 /* Now try cases based on the opcode of OP0. If none of the cases
12201 does a "continue", we exit this loop immediately after the
12202 switch. */
12203
12204 unsigned int mode_width = GET_MODE_PRECISION (mode);
12205 unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12206 switch (GET_CODE (op0))
12207 {
12208 case ZERO_EXTRACT:
12209 /* If we are extracting a single bit from a variable position in
12210 a constant that has only a single bit set and are comparing it
12211 with zero, we can convert this into an equality comparison
12212 between the position and the location of the single bit. */
12213 /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12214 have already reduced the shift count modulo the word size. */
12215 if (!SHIFT_COUNT_TRUNCATED
12216 && CONST_INT_P (XEXP (op0, 0))
12217 && XEXP (op0, 1) == const1_rtx
12218 && equality_comparison_p && const_op == 0
12219 && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12220 {
12221 if (BITS_BIG_ENDIAN)
12222 i = BITS_PER_WORD - 1 - i;
12223
12224 op0 = XEXP (op0, 2);
12225 op1 = GEN_INT (i);
12226 const_op = i;
12227
12228 /* Result is nonzero iff shift count is equal to I. */
12229 code = reverse_condition (code);
12230 continue;
12231 }
12232
12233 /* fall through */
12234
12235 case SIGN_EXTRACT:
12236 tem = expand_compound_operation (op0);
12237 if (tem != op0)
12238 {
12239 op0 = tem;
12240 continue;
12241 }
12242 break;
12243
12244 case NOT:
12245 /* If testing for equality, we can take the NOT of the constant. */
12246 if (equality_comparison_p
12247 && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12248 {
12249 op0 = XEXP (op0, 0);
12250 op1 = tem;
12251 continue;
12252 }
12253
12254 /* If just looking at the sign bit, reverse the sense of the
12255 comparison. */
12256 if (sign_bit_comparison_p)
12257 {
12258 op0 = XEXP (op0, 0);
12259 code = (code == GE ? LT : GE);
12260 continue;
12261 }
12262 break;
12263
12264 case NEG:
12265 /* If testing for equality, we can take the NEG of the constant. */
12266 if (equality_comparison_p
12267 && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12268 {
12269 op0 = XEXP (op0, 0);
12270 op1 = tem;
12271 continue;
12272 }
12273
12274 /* The remaining cases only apply to comparisons with zero. */
12275 if (const_op != 0)
12276 break;
12277
12278 /* When X is ABS or is known positive,
12279 (neg X) is < 0 if and only if X != 0. */
12280
12281 if (sign_bit_comparison_p
12282 && (GET_CODE (XEXP (op0, 0)) == ABS
12283 || (mode_width <= HOST_BITS_PER_WIDE_INT
12284 && (nonzero_bits (XEXP (op0, 0), mode)
12285 & (HOST_WIDE_INT_1U << (mode_width - 1)))
12286 == 0)))
12287 {
12288 op0 = XEXP (op0, 0);
12289 code = (code == LT ? NE : EQ);
12290 continue;
12291 }
12292
12293 /* If we have NEG of something whose two high-order bits are the
12294 same, we know that "(-a) < 0" is equivalent to "a > 0". */
12295 if (num_sign_bit_copies (op0, mode) >= 2)
12296 {
12297 op0 = XEXP (op0, 0);
12298 code = swap_condition (code);
12299 continue;
12300 }
12301 break;
12302
12303 case ROTATE:
12304 /* If we are testing equality and our count is a constant, we
12305 can perform the inverse operation on our RHS. */
12306 if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12307 && (tem = simplify_binary_operation (ROTATERT, mode,
12308 op1, XEXP (op0, 1))) != 0)
12309 {
12310 op0 = XEXP (op0, 0);
12311 op1 = tem;
12312 continue;
12313 }
12314
12315 /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12316 a particular bit. Convert it to an AND of a constant of that
12317 bit. This will be converted into a ZERO_EXTRACT. */
12318 if (const_op == 0 && sign_bit_comparison_p
12319 && CONST_INT_P (XEXP (op0, 1))
12320 && mode_width <= HOST_BITS_PER_WIDE_INT
12321 && UINTVAL (XEXP (op0, 1)) < mode_width)
12322 {
12323 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12324 (HOST_WIDE_INT_1U
12325 << (mode_width - 1
12326 - INTVAL (XEXP (op0, 1)))));
12327 code = (code == LT ? NE : EQ);
12328 continue;
12329 }
12330
12331 /* Fall through. */
12332
12333 case ABS:
12334 /* ABS is ignorable inside an equality comparison with zero. */
12335 if (const_op == 0 && equality_comparison_p)
12336 {
12337 op0 = XEXP (op0, 0);
12338 continue;
12339 }
12340 break;
12341
12342 case SIGN_EXTEND:
12343 /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12344 (compare FOO CONST) if CONST fits in FOO's mode and we
12345 are either testing inequality or have an unsigned
12346 comparison with ZERO_EXTEND or a signed comparison with
12347 SIGN_EXTEND. But don't do it if we don't have a compare
12348 insn of the given mode, since we'd have to revert it
12349 later on, and then we wouldn't know whether to sign- or
12350 zero-extend. */
12351 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12352 && ! unsigned_comparison_p
12353 && HWI_COMPUTABLE_MODE_P (mode)
12354 && trunc_int_for_mode (const_op, mode) == const_op
12355 && have_insn_for (COMPARE, mode))
12356 {
12357 op0 = XEXP (op0, 0);
12358 continue;
12359 }
12360 break;
12361
12362 case SUBREG:
12363 /* Check for the case where we are comparing A - C1 with C2, that is
12364
12365 (subreg:MODE (plus (A) (-C1))) op (C2)
12366
12367 with C1 a constant, and try to lift the SUBREG, i.e. to do the
12368 comparison in the wider mode. One of the following two conditions
12369 must be true in order for this to be valid:
12370
12371 1. The mode extension results in the same bit pattern being added
12372 on both sides and the comparison is equality or unsigned. As
12373 C2 has been truncated to fit in MODE, the pattern can only be
12374 all 0s or all 1s.
12375
12376 2. The mode extension results in the sign bit being copied on
12377 each side.
12378
12379 The difficulty here is that we have predicates for A but not for
12380 (A - C1) so we need to check that C1 is within proper bounds so
12381 as to perturbate A as little as possible. */
12382
12383 if (mode_width <= HOST_BITS_PER_WIDE_INT
12384 && subreg_lowpart_p (op0)
12385 && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12386 &inner_mode)
12387 && GET_MODE_PRECISION (inner_mode) > mode_width
12388 && GET_CODE (SUBREG_REG (op0)) == PLUS
12389 && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12390 {
12391 rtx a = XEXP (SUBREG_REG (op0), 0);
12392 HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12393
12394 if ((c1 > 0
12395 && (unsigned HOST_WIDE_INT) c1
12396 < HOST_WIDE_INT_1U << (mode_width - 1)
12397 && (equality_comparison_p || unsigned_comparison_p)
12398 /* (A - C1) zero-extends if it is positive and sign-extends
12399 if it is negative, C2 both zero- and sign-extends. */
12400 && (((nonzero_bits (a, inner_mode)
12401 & ~GET_MODE_MASK (mode)) == 0
12402 && const_op >= 0)
12403 /* (A - C1) sign-extends if it is positive and 1-extends
12404 if it is negative, C2 both sign- and 1-extends. */
12405 || (num_sign_bit_copies (a, inner_mode)
12406 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12407 - mode_width)
12408 && const_op < 0)))
12409 || ((unsigned HOST_WIDE_INT) c1
12410 < HOST_WIDE_INT_1U << (mode_width - 2)
12411 /* (A - C1) always sign-extends, like C2. */
12412 && num_sign_bit_copies (a, inner_mode)
12413 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12414 - (mode_width - 1))))
12415 {
12416 op0 = SUBREG_REG (op0);
12417 continue;
12418 }
12419 }
12420
12421 /* If the inner mode is narrower and we are extracting the low part,
12422 we can treat the SUBREG as if it were a ZERO_EXTEND. */
12423 if (paradoxical_subreg_p (op0))
12424 ;
12425 else if (subreg_lowpart_p (op0)
12426 && GET_MODE_CLASS (mode) == MODE_INT
12427 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12428 && (code == NE || code == EQ)
12429 && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12430 && !paradoxical_subreg_p (op0)
12431 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12432 & ~GET_MODE_MASK (mode)) == 0)
12433 {
12434 /* Remove outer subregs that don't do anything. */
12435 tem = gen_lowpart (inner_mode, op1);
12436
12437 if ((nonzero_bits (tem, inner_mode)
12438 & ~GET_MODE_MASK (mode)) == 0)
12439 {
12440 op0 = SUBREG_REG (op0);
12441 op1 = tem;
12442 continue;
12443 }
12444 break;
12445 }
12446 else
12447 break;
12448
12449 /* FALLTHROUGH */
12450
12451 case ZERO_EXTEND:
12452 if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12453 && (unsigned_comparison_p || equality_comparison_p)
12454 && HWI_COMPUTABLE_MODE_P (mode)
12455 && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12456 && const_op >= 0
12457 && have_insn_for (COMPARE, mode))
12458 {
12459 op0 = XEXP (op0, 0);
12460 continue;
12461 }
12462 break;
12463
12464 case PLUS:
12465 /* (eq (plus X A) B) -> (eq X (minus B A)). We can only do
12466 this for equality comparisons due to pathological cases involving
12467 overflows. */
12468 if (equality_comparison_p
12469 && (tem = simplify_binary_operation (MINUS, mode,
12470 op1, XEXP (op0, 1))) != 0)
12471 {
12472 op0 = XEXP (op0, 0);
12473 op1 = tem;
12474 continue;
12475 }
12476
12477 /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0. */
12478 if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12479 && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12480 {
12481 op0 = XEXP (XEXP (op0, 0), 0);
12482 code = (code == LT ? EQ : NE);
12483 continue;
12484 }
12485 break;
12486
12487 case MINUS:
12488 /* We used to optimize signed comparisons against zero, but that
12489 was incorrect. Unsigned comparisons against zero (GTU, LEU)
12490 arrive here as equality comparisons, or (GEU, LTU) are
12491 optimized away. No need to special-case them. */
12492
12493 /* (eq (minus A B) C) -> (eq A (plus B C)) or
12494 (eq B (minus A C)), whichever simplifies. We can only do
12495 this for equality comparisons due to pathological cases involving
12496 overflows. */
12497 if (equality_comparison_p
12498 && (tem = simplify_binary_operation (PLUS, mode,
12499 XEXP (op0, 1), op1)) != 0)
12500 {
12501 op0 = XEXP (op0, 0);
12502 op1 = tem;
12503 continue;
12504 }
12505
12506 if (equality_comparison_p
12507 && (tem = simplify_binary_operation (MINUS, mode,
12508 XEXP (op0, 0), op1)) != 0)
12509 {
12510 op0 = XEXP (op0, 1);
12511 op1 = tem;
12512 continue;
12513 }
12514
12515 /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12516 of bits in X minus 1, is one iff X > 0. */
12517 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12518 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12519 && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12520 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12521 {
12522 op0 = XEXP (op0, 1);
12523 code = (code == GE ? LE : GT);
12524 continue;
12525 }
12526 break;
12527
12528 case XOR:
12529 /* (eq (xor A B) C) -> (eq A (xor B C)). This is a simplification
12530 if C is zero or B is a constant. */
12531 if (equality_comparison_p
12532 && (tem = simplify_binary_operation (XOR, mode,
12533 XEXP (op0, 1), op1)) != 0)
12534 {
12535 op0 = XEXP (op0, 0);
12536 op1 = tem;
12537 continue;
12538 }
12539 break;
12540
12541
12542 case IOR:
12543 /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12544 iff X <= 0. */
12545 if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12546 && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12547 && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12548 {
12549 op0 = XEXP (op0, 1);
12550 code = (code == GE ? GT : LE);
12551 continue;
12552 }
12553 break;
12554
12555 case AND:
12556 /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1). This
12557 will be converted to a ZERO_EXTRACT later. */
12558 if (const_op == 0 && equality_comparison_p
12559 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12560 && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12561 {
12562 op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12563 XEXP (XEXP (op0, 0), 1));
12564 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12565 continue;
12566 }
12567
12568 /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12569 zero and X is a comparison and C1 and C2 describe only bits set
12570 in STORE_FLAG_VALUE, we can compare with X. */
12571 if (const_op == 0 && equality_comparison_p
12572 && mode_width <= HOST_BITS_PER_WIDE_INT
12573 && CONST_INT_P (XEXP (op0, 1))
12574 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12575 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12576 && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12577 && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12578 {
12579 mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12580 << INTVAL (XEXP (XEXP (op0, 0), 1)));
12581 if ((~STORE_FLAG_VALUE & mask) == 0
12582 && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12583 || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12584 && COMPARISON_P (tem))))
12585 {
12586 op0 = XEXP (XEXP (op0, 0), 0);
12587 continue;
12588 }
12589 }
12590
12591 /* If we are doing an equality comparison of an AND of a bit equal
12592 to the sign bit, replace this with a LT or GE comparison of
12593 the underlying value. */
12594 if (equality_comparison_p
12595 && const_op == 0
12596 && CONST_INT_P (XEXP (op0, 1))
12597 && mode_width <= HOST_BITS_PER_WIDE_INT
12598 && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12599 == HOST_WIDE_INT_1U << (mode_width - 1)))
12600 {
12601 op0 = XEXP (op0, 0);
12602 code = (code == EQ ? GE : LT);
12603 continue;
12604 }
12605
12606 /* If this AND operation is really a ZERO_EXTEND from a narrower
12607 mode, the constant fits within that mode, and this is either an
12608 equality or unsigned comparison, try to do this comparison in
12609 the narrower mode.
12610
12611 Note that in:
12612
12613 (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12614 -> (ne:DI (reg:SI 4) (const_int 0))
12615
12616 unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12617 known to hold a value of the required mode the
12618 transformation is invalid. */
12619 if ((equality_comparison_p || unsigned_comparison_p)
12620 && CONST_INT_P (XEXP (op0, 1))
12621 && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12622 & GET_MODE_MASK (mode))
12623 + 1)) >= 0
12624 && const_op >> i == 0
12625 && int_mode_for_size (i, 1).exists (&tmode))
12626 {
12627 op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12628 continue;
12629 }
12630
12631 /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12632 fits in both M1 and M2 and the SUBREG is either paradoxical
12633 or represents the low part, permute the SUBREG and the AND
12634 and try again. */
12635 if (GET_CODE (XEXP (op0, 0)) == SUBREG
12636 && CONST_INT_P (XEXP (op0, 1)))
12637 {
12638 unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12639 /* Require an integral mode, to avoid creating something like
12640 (AND:SF ...). */
12641 if ((is_a <scalar_int_mode>
12642 (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12643 /* It is unsafe to commute the AND into the SUBREG if the
12644 SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12645 not defined. As originally written the upper bits
12646 have a defined value due to the AND operation.
12647 However, if we commute the AND inside the SUBREG then
12648 they no longer have defined values and the meaning of
12649 the code has been changed.
12650 Also C1 should not change value in the smaller mode,
12651 see PR67028 (a positive C1 can become negative in the
12652 smaller mode, so that the AND does no longer mask the
12653 upper bits). */
12654 && ((WORD_REGISTER_OPERATIONS
12655 && mode_width > GET_MODE_PRECISION (tmode)
12656 && mode_width <= BITS_PER_WORD
12657 && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12658 || (mode_width <= GET_MODE_PRECISION (tmode)
12659 && subreg_lowpart_p (XEXP (op0, 0))))
12660 && mode_width <= HOST_BITS_PER_WIDE_INT
12661 && HWI_COMPUTABLE_MODE_P (tmode)
12662 && (c1 & ~mask) == 0
12663 && (c1 & ~GET_MODE_MASK (tmode)) == 0
12664 && c1 != mask
12665 && c1 != GET_MODE_MASK (tmode))
12666 {
12667 op0 = simplify_gen_binary (AND, tmode,
12668 SUBREG_REG (XEXP (op0, 0)),
12669 gen_int_mode (c1, tmode));
12670 op0 = gen_lowpart (mode, op0);
12671 continue;
12672 }
12673 }
12674
12675 /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0). */
12676 if (const_op == 0 && equality_comparison_p
12677 && XEXP (op0, 1) == const1_rtx
12678 && GET_CODE (XEXP (op0, 0)) == NOT)
12679 {
12680 op0 = simplify_and_const_int (NULL_RTX, mode,
12681 XEXP (XEXP (op0, 0), 0), 1);
12682 code = (code == NE ? EQ : NE);
12683 continue;
12684 }
12685
12686 /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12687 (eq (and (lshiftrt X) 1) 0).
12688 Also handle the case where (not X) is expressed using xor. */
12689 if (const_op == 0 && equality_comparison_p
12690 && XEXP (op0, 1) == const1_rtx
12691 && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12692 {
12693 rtx shift_op = XEXP (XEXP (op0, 0), 0);
12694 rtx shift_count = XEXP (XEXP (op0, 0), 1);
12695
12696 if (GET_CODE (shift_op) == NOT
12697 || (GET_CODE (shift_op) == XOR
12698 && CONST_INT_P (XEXP (shift_op, 1))
12699 && CONST_INT_P (shift_count)
12700 && HWI_COMPUTABLE_MODE_P (mode)
12701 && (UINTVAL (XEXP (shift_op, 1))
12702 == HOST_WIDE_INT_1U
12703 << INTVAL (shift_count))))
12704 {
12705 op0
12706 = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12707 op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12708 code = (code == NE ? EQ : NE);
12709 continue;
12710 }
12711 }
12712 break;
12713
12714 case ASHIFT:
12715 /* If we have (compare (ashift FOO N) (const_int C)) and
12716 the high order N bits of FOO (N+1 if an inequality comparison)
12717 are known to be zero, we can do this by comparing FOO with C
12718 shifted right N bits so long as the low-order N bits of C are
12719 zero. */
12720 if (CONST_INT_P (XEXP (op0, 1))
12721 && INTVAL (XEXP (op0, 1)) >= 0
12722 && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12723 < HOST_BITS_PER_WIDE_INT)
12724 && (((unsigned HOST_WIDE_INT) const_op
12725 & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12726 - 1)) == 0)
12727 && mode_width <= HOST_BITS_PER_WIDE_INT
12728 && (nonzero_bits (XEXP (op0, 0), mode)
12729 & ~(mask >> (INTVAL (XEXP (op0, 1))
12730 + ! equality_comparison_p))) == 0)
12731 {
12732 /* We must perform a logical shift, not an arithmetic one,
12733 as we want the top N bits of C to be zero. */
12734 unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12735
12736 temp >>= INTVAL (XEXP (op0, 1));
12737 op1 = gen_int_mode (temp, mode);
12738 op0 = XEXP (op0, 0);
12739 continue;
12740 }
12741
12742 /* If we are doing a sign bit comparison, it means we are testing
12743 a particular bit. Convert it to the appropriate AND. */
12744 if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12745 && mode_width <= HOST_BITS_PER_WIDE_INT)
12746 {
12747 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12748 (HOST_WIDE_INT_1U
12749 << (mode_width - 1
12750 - INTVAL (XEXP (op0, 1)))));
12751 code = (code == LT ? NE : EQ);
12752 continue;
12753 }
12754
12755 /* If this an equality comparison with zero and we are shifting
12756 the low bit to the sign bit, we can convert this to an AND of the
12757 low-order bit. */
12758 if (const_op == 0 && equality_comparison_p
12759 && CONST_INT_P (XEXP (op0, 1))
12760 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12761 {
12762 op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12763 continue;
12764 }
12765 break;
12766
12767 case ASHIFTRT:
12768 /* If this is an equality comparison with zero, we can do this
12769 as a logical shift, which might be much simpler. */
12770 if (equality_comparison_p && const_op == 0
12771 && CONST_INT_P (XEXP (op0, 1)))
12772 {
12773 op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12774 XEXP (op0, 0),
12775 INTVAL (XEXP (op0, 1)));
12776 continue;
12777 }
12778
12779 /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12780 do the comparison in a narrower mode. */
12781 if (! unsigned_comparison_p
12782 && CONST_INT_P (XEXP (op0, 1))
12783 && GET_CODE (XEXP (op0, 0)) == ASHIFT
12784 && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12785 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12786 .exists (&tmode))
12787 && (((unsigned HOST_WIDE_INT) const_op
12788 + (GET_MODE_MASK (tmode) >> 1) + 1)
12789 <= GET_MODE_MASK (tmode)))
12790 {
12791 op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12792 continue;
12793 }
12794
12795 /* Likewise if OP0 is a PLUS of a sign extension with a
12796 constant, which is usually represented with the PLUS
12797 between the shifts. */
12798 if (! unsigned_comparison_p
12799 && CONST_INT_P (XEXP (op0, 1))
12800 && GET_CODE (XEXP (op0, 0)) == PLUS
12801 && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12802 && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12803 && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12804 && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12805 .exists (&tmode))
12806 && (((unsigned HOST_WIDE_INT) const_op
12807 + (GET_MODE_MASK (tmode) >> 1) + 1)
12808 <= GET_MODE_MASK (tmode)))
12809 {
12810 rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12811 rtx add_const = XEXP (XEXP (op0, 0), 1);
12812 rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12813 add_const, XEXP (op0, 1));
12814
12815 op0 = simplify_gen_binary (PLUS, tmode,
12816 gen_lowpart (tmode, inner),
12817 new_const);
12818 continue;
12819 }
12820
12821 /* FALLTHROUGH */
12822 case LSHIFTRT:
12823 /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12824 the low order N bits of FOO are known to be zero, we can do this
12825 by comparing FOO with C shifted left N bits so long as no
12826 overflow occurs. Even if the low order N bits of FOO aren't known
12827 to be zero, if the comparison is >= or < we can use the same
12828 optimization and for > or <= by setting all the low
12829 order N bits in the comparison constant. */
12830 if (CONST_INT_P (XEXP (op0, 1))
12831 && INTVAL (XEXP (op0, 1)) > 0
12832 && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12833 && mode_width <= HOST_BITS_PER_WIDE_INT
12834 && (((unsigned HOST_WIDE_INT) const_op
12835 + (GET_CODE (op0) != LSHIFTRT
12836 ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12837 + 1)
12838 : 0))
12839 <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12840 {
12841 unsigned HOST_WIDE_INT low_bits
12842 = (nonzero_bits (XEXP (op0, 0), mode)
12843 & ((HOST_WIDE_INT_1U
12844 << INTVAL (XEXP (op0, 1))) - 1));
12845 if (low_bits == 0 || !equality_comparison_p)
12846 {
12847 /* If the shift was logical, then we must make the condition
12848 unsigned. */
12849 if (GET_CODE (op0) == LSHIFTRT)
12850 code = unsigned_condition (code);
12851
12852 const_op = (unsigned HOST_WIDE_INT) const_op
12853 << INTVAL (XEXP (op0, 1));
12854 if (low_bits != 0
12855 && (code == GT || code == GTU
12856 || code == LE || code == LEU))
12857 const_op
12858 |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12859 op1 = GEN_INT (const_op);
12860 op0 = XEXP (op0, 0);
12861 continue;
12862 }
12863 }
12864
12865 /* If we are using this shift to extract just the sign bit, we
12866 can replace this with an LT or GE comparison. */
12867 if (const_op == 0
12868 && (equality_comparison_p || sign_bit_comparison_p)
12869 && CONST_INT_P (XEXP (op0, 1))
12870 && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12871 {
12872 op0 = XEXP (op0, 0);
12873 code = (code == NE || code == GT ? LT : GE);
12874 continue;
12875 }
12876 break;
12877
12878 default:
12879 break;
12880 }
12881
12882 break;
12883 }
12884
12885 /* Now make any compound operations involved in this comparison. Then,
12886 check for an outmost SUBREG on OP0 that is not doing anything or is
12887 paradoxical. The latter transformation must only be performed when
12888 it is known that the "extra" bits will be the same in op0 and op1 or
12889 that they don't matter. There are three cases to consider:
12890
12891 1. SUBREG_REG (op0) is a register. In this case the bits are don't
12892 care bits and we can assume they have any convenient value. So
12893 making the transformation is safe.
12894
12895 2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
12896 In this case the upper bits of op0 are undefined. We should not make
12897 the simplification in that case as we do not know the contents of
12898 those bits.
12899
12900 3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
12901 In that case we know those bits are zeros or ones. We must also be
12902 sure that they are the same as the upper bits of op1.
12903
12904 We can never remove a SUBREG for a non-equality comparison because
12905 the sign bit is in a different place in the underlying object. */
12906
12907 rtx_code op0_mco_code = SET;
12908 if (op1 == const0_rtx)
12909 op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
12910
12911 op0 = make_compound_operation (op0, op0_mco_code);
12912 op1 = make_compound_operation (op1, SET);
12913
12914 if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
12915 && is_int_mode (GET_MODE (op0), &mode)
12916 && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12917 && (code == NE || code == EQ))
12918 {
12919 if (paradoxical_subreg_p (op0))
12920 {
12921 /* For paradoxical subregs, allow case 1 as above. Case 3 isn't
12922 implemented. */
12923 if (REG_P (SUBREG_REG (op0)))
12924 {
12925 op0 = SUBREG_REG (op0);
12926 op1 = gen_lowpart (inner_mode, op1);
12927 }
12928 }
12929 else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12930 && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12931 & ~GET_MODE_MASK (mode)) == 0)
12932 {
12933 tem = gen_lowpart (inner_mode, op1);
12934
12935 if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
12936 op0 = SUBREG_REG (op0), op1 = tem;
12937 }
12938 }
12939
12940 /* We now do the opposite procedure: Some machines don't have compare
12941 insns in all modes. If OP0's mode is an integer mode smaller than a
12942 word and we can't do a compare in that mode, see if there is a larger
12943 mode for which we can do the compare. There are a number of cases in
12944 which we can use the wider mode. */
12945
12946 if (is_int_mode (GET_MODE (op0), &mode)
12947 && GET_MODE_SIZE (mode) < UNITS_PER_WORD
12948 && ! have_insn_for (COMPARE, mode))
12949 FOR_EACH_WIDER_MODE (tmode_iter, mode)
12950 {
12951 tmode = tmode_iter.require ();
12952 if (!HWI_COMPUTABLE_MODE_P (tmode))
12953 break;
12954 if (have_insn_for (COMPARE, tmode))
12955 {
12956 int zero_extended;
12957
12958 /* If this is a test for negative, we can make an explicit
12959 test of the sign bit. Test this first so we can use
12960 a paradoxical subreg to extend OP0. */
12961
12962 if (op1 == const0_rtx && (code == LT || code == GE)
12963 && HWI_COMPUTABLE_MODE_P (mode))
12964 {
12965 unsigned HOST_WIDE_INT sign
12966 = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
12967 op0 = simplify_gen_binary (AND, tmode,
12968 gen_lowpart (tmode, op0),
12969 gen_int_mode (sign, tmode));
12970 code = (code == LT) ? NE : EQ;
12971 break;
12972 }
12973
12974 /* If the only nonzero bits in OP0 and OP1 are those in the
12975 narrower mode and this is an equality or unsigned comparison,
12976 we can use the wider mode. Similarly for sign-extended
12977 values, in which case it is true for all comparisons. */
12978 zero_extended = ((code == EQ || code == NE
12979 || code == GEU || code == GTU
12980 || code == LEU || code == LTU)
12981 && (nonzero_bits (op0, tmode)
12982 & ~GET_MODE_MASK (mode)) == 0
12983 && ((CONST_INT_P (op1)
12984 || (nonzero_bits (op1, tmode)
12985 & ~GET_MODE_MASK (mode)) == 0)));
12986
12987 if (zero_extended
12988 || ((num_sign_bit_copies (op0, tmode)
12989 > (unsigned int) (GET_MODE_PRECISION (tmode)
12990 - GET_MODE_PRECISION (mode)))
12991 && (num_sign_bit_copies (op1, tmode)
12992 > (unsigned int) (GET_MODE_PRECISION (tmode)
12993 - GET_MODE_PRECISION (mode)))))
12994 {
12995 /* If OP0 is an AND and we don't have an AND in MODE either,
12996 make a new AND in the proper mode. */
12997 if (GET_CODE (op0) == AND
12998 && !have_insn_for (AND, mode))
12999 op0 = simplify_gen_binary (AND, tmode,
13000 gen_lowpart (tmode,
13001 XEXP (op0, 0)),
13002 gen_lowpart (tmode,
13003 XEXP (op0, 1)));
13004 else
13005 {
13006 if (zero_extended)
13007 {
13008 op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13009 op0, mode);
13010 op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13011 op1, mode);
13012 }
13013 else
13014 {
13015 op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13016 op0, mode);
13017 op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13018 op1, mode);
13019 }
13020 break;
13021 }
13022 }
13023 }
13024 }
13025
13026 /* We may have changed the comparison operands. Re-canonicalize. */
13027 if (swap_commutative_operands_p (op0, op1))
13028 {
13029 std::swap (op0, op1);
13030 code = swap_condition (code);
13031 }
13032
13033 /* If this machine only supports a subset of valid comparisons, see if we
13034 can convert an unsupported one into a supported one. */
13035 target_canonicalize_comparison (&code, &op0, &op1, 0);
13036
13037 *pop0 = op0;
13038 *pop1 = op1;
13039
13040 return code;
13041 }
13042
13043 /* Utility function for record_value_for_reg. Count number of
13044 rtxs in X. */
13045 static int
count_rtxs(rtx x)13046 count_rtxs (rtx x)
13047 {
13048 enum rtx_code code = GET_CODE (x);
13049 const char *fmt;
13050 int i, j, ret = 1;
13051
13052 if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13053 || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13054 {
13055 rtx x0 = XEXP (x, 0);
13056 rtx x1 = XEXP (x, 1);
13057
13058 if (x0 == x1)
13059 return 1 + 2 * count_rtxs (x0);
13060
13061 if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13062 || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13063 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13064 return 2 + 2 * count_rtxs (x0)
13065 + count_rtxs (x == XEXP (x1, 0)
13066 ? XEXP (x1, 1) : XEXP (x1, 0));
13067
13068 if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13069 || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13070 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13071 return 2 + 2 * count_rtxs (x1)
13072 + count_rtxs (x == XEXP (x0, 0)
13073 ? XEXP (x0, 1) : XEXP (x0, 0));
13074 }
13075
13076 fmt = GET_RTX_FORMAT (code);
13077 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13078 if (fmt[i] == 'e')
13079 ret += count_rtxs (XEXP (x, i));
13080 else if (fmt[i] == 'E')
13081 for (j = 0; j < XVECLEN (x, i); j++)
13082 ret += count_rtxs (XVECEXP (x, i, j));
13083
13084 return ret;
13085 }
13086
13087 /* Utility function for following routine. Called when X is part of a value
13088 being stored into last_set_value. Sets last_set_table_tick
13089 for each register mentioned. Similar to mention_regs in cse.c */
13090
13091 static void
update_table_tick(rtx x)13092 update_table_tick (rtx x)
13093 {
13094 enum rtx_code code = GET_CODE (x);
13095 const char *fmt = GET_RTX_FORMAT (code);
13096 int i, j;
13097
13098 if (code == REG)
13099 {
13100 unsigned int regno = REGNO (x);
13101 unsigned int endregno = END_REGNO (x);
13102 unsigned int r;
13103
13104 for (r = regno; r < endregno; r++)
13105 {
13106 reg_stat_type *rsp = ®_stat[r];
13107 rsp->last_set_table_tick = label_tick;
13108 }
13109
13110 return;
13111 }
13112
13113 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13114 if (fmt[i] == 'e')
13115 {
13116 /* Check for identical subexpressions. If x contains
13117 identical subexpression we only have to traverse one of
13118 them. */
13119 if (i == 0 && ARITHMETIC_P (x))
13120 {
13121 /* Note that at this point x1 has already been
13122 processed. */
13123 rtx x0 = XEXP (x, 0);
13124 rtx x1 = XEXP (x, 1);
13125
13126 /* If x0 and x1 are identical then there is no need to
13127 process x0. */
13128 if (x0 == x1)
13129 break;
13130
13131 /* If x0 is identical to a subexpression of x1 then while
13132 processing x1, x0 has already been processed. Thus we
13133 are done with x. */
13134 if (ARITHMETIC_P (x1)
13135 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13136 break;
13137
13138 /* If x1 is identical to a subexpression of x0 then we
13139 still have to process the rest of x0. */
13140 if (ARITHMETIC_P (x0)
13141 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13142 {
13143 update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13144 break;
13145 }
13146 }
13147
13148 update_table_tick (XEXP (x, i));
13149 }
13150 else if (fmt[i] == 'E')
13151 for (j = 0; j < XVECLEN (x, i); j++)
13152 update_table_tick (XVECEXP (x, i, j));
13153 }
13154
13155 /* Record that REG is set to VALUE in insn INSN. If VALUE is zero, we
13156 are saying that the register is clobbered and we no longer know its
13157 value. If INSN is zero, don't update reg_stat[].last_set; this is
13158 only permitted with VALUE also zero and is used to invalidate the
13159 register. */
13160
13161 static void
record_value_for_reg(rtx reg,rtx_insn * insn,rtx value)13162 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13163 {
13164 unsigned int regno = REGNO (reg);
13165 unsigned int endregno = END_REGNO (reg);
13166 unsigned int i;
13167 reg_stat_type *rsp;
13168
13169 /* If VALUE contains REG and we have a previous value for REG, substitute
13170 the previous value. */
13171 if (value && insn && reg_overlap_mentioned_p (reg, value))
13172 {
13173 rtx tem;
13174
13175 /* Set things up so get_last_value is allowed to see anything set up to
13176 our insn. */
13177 subst_low_luid = DF_INSN_LUID (insn);
13178 tem = get_last_value (reg);
13179
13180 /* If TEM is simply a binary operation with two CLOBBERs as operands,
13181 it isn't going to be useful and will take a lot of time to process,
13182 so just use the CLOBBER. */
13183
13184 if (tem)
13185 {
13186 if (ARITHMETIC_P (tem)
13187 && GET_CODE (XEXP (tem, 0)) == CLOBBER
13188 && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13189 tem = XEXP (tem, 0);
13190 else if (count_occurrences (value, reg, 1) >= 2)
13191 {
13192 /* If there are two or more occurrences of REG in VALUE,
13193 prevent the value from growing too much. */
13194 if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13195 tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13196 }
13197
13198 value = replace_rtx (copy_rtx (value), reg, tem);
13199 }
13200 }
13201
13202 /* For each register modified, show we don't know its value, that
13203 we don't know about its bitwise content, that its value has been
13204 updated, and that we don't know the location of the death of the
13205 register. */
13206 for (i = regno; i < endregno; i++)
13207 {
13208 rsp = ®_stat[i];
13209
13210 if (insn)
13211 rsp->last_set = insn;
13212
13213 rsp->last_set_value = 0;
13214 rsp->last_set_mode = VOIDmode;
13215 rsp->last_set_nonzero_bits = 0;
13216 rsp->last_set_sign_bit_copies = 0;
13217 rsp->last_death = 0;
13218 rsp->truncated_to_mode = VOIDmode;
13219 }
13220
13221 /* Mark registers that are being referenced in this value. */
13222 if (value)
13223 update_table_tick (value);
13224
13225 /* Now update the status of each register being set.
13226 If someone is using this register in this block, set this register
13227 to invalid since we will get confused between the two lives in this
13228 basic block. This makes using this register always invalid. In cse, we
13229 scan the table to invalidate all entries using this register, but this
13230 is too much work for us. */
13231
13232 for (i = regno; i < endregno; i++)
13233 {
13234 rsp = ®_stat[i];
13235 rsp->last_set_label = label_tick;
13236 if (!insn
13237 || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13238 rsp->last_set_invalid = 1;
13239 else
13240 rsp->last_set_invalid = 0;
13241 }
13242
13243 /* The value being assigned might refer to X (like in "x++;"). In that
13244 case, we must replace it with (clobber (const_int 0)) to prevent
13245 infinite loops. */
13246 rsp = ®_stat[regno];
13247 if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13248 {
13249 value = copy_rtx (value);
13250 if (!get_last_value_validate (&value, insn, label_tick, 1))
13251 value = 0;
13252 }
13253
13254 /* For the main register being modified, update the value, the mode, the
13255 nonzero bits, and the number of sign bit copies. */
13256
13257 rsp->last_set_value = value;
13258
13259 if (value)
13260 {
13261 machine_mode mode = GET_MODE (reg);
13262 subst_low_luid = DF_INSN_LUID (insn);
13263 rsp->last_set_mode = mode;
13264 if (GET_MODE_CLASS (mode) == MODE_INT
13265 && HWI_COMPUTABLE_MODE_P (mode))
13266 mode = nonzero_bits_mode;
13267 rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13268 rsp->last_set_sign_bit_copies
13269 = num_sign_bit_copies (value, GET_MODE (reg));
13270 }
13271 }
13272
13273 /* Called via note_stores from record_dead_and_set_regs to handle one
13274 SET or CLOBBER in an insn. DATA is the instruction in which the
13275 set is occurring. */
13276
13277 static void
record_dead_and_set_regs_1(rtx dest,const_rtx setter,void * data)13278 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13279 {
13280 rtx_insn *record_dead_insn = (rtx_insn *) data;
13281
13282 if (GET_CODE (dest) == SUBREG)
13283 dest = SUBREG_REG (dest);
13284
13285 if (!record_dead_insn)
13286 {
13287 if (REG_P (dest))
13288 record_value_for_reg (dest, NULL, NULL_RTX);
13289 return;
13290 }
13291
13292 if (REG_P (dest))
13293 {
13294 /* If we are setting the whole register, we know its value. Otherwise
13295 show that we don't know the value. We can handle a SUBREG if it's
13296 the low part, but we must be careful with paradoxical SUBREGs on
13297 RISC architectures because we cannot strip e.g. an extension around
13298 a load and record the naked load since the RTL middle-end considers
13299 that the upper bits are defined according to LOAD_EXTEND_OP. */
13300 if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13301 record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13302 else if (GET_CODE (setter) == SET
13303 && GET_CODE (SET_DEST (setter)) == SUBREG
13304 && SUBREG_REG (SET_DEST (setter)) == dest
13305 && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13306 BITS_PER_WORD)
13307 && subreg_lowpart_p (SET_DEST (setter)))
13308 record_value_for_reg (dest, record_dead_insn,
13309 WORD_REGISTER_OPERATIONS
13310 && word_register_operation_p (SET_SRC (setter))
13311 && paradoxical_subreg_p (SET_DEST (setter))
13312 ? SET_SRC (setter)
13313 : gen_lowpart (GET_MODE (dest),
13314 SET_SRC (setter)));
13315 else
13316 record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13317 }
13318 else if (MEM_P (dest)
13319 /* Ignore pushes, they clobber nothing. */
13320 && ! push_operand (dest, GET_MODE (dest)))
13321 mem_last_set = DF_INSN_LUID (record_dead_insn);
13322 }
13323
13324 /* Update the records of when each REG was most recently set or killed
13325 for the things done by INSN. This is the last thing done in processing
13326 INSN in the combiner loop.
13327
13328 We update reg_stat[], in particular fields last_set, last_set_value,
13329 last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13330 last_death, and also the similar information mem_last_set (which insn
13331 most recently modified memory) and last_call_luid (which insn was the
13332 most recent subroutine call). */
13333
13334 static void
record_dead_and_set_regs(rtx_insn * insn)13335 record_dead_and_set_regs (rtx_insn *insn)
13336 {
13337 rtx link;
13338 unsigned int i;
13339
13340 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13341 {
13342 if (REG_NOTE_KIND (link) == REG_DEAD
13343 && REG_P (XEXP (link, 0)))
13344 {
13345 unsigned int regno = REGNO (XEXP (link, 0));
13346 unsigned int endregno = END_REGNO (XEXP (link, 0));
13347
13348 for (i = regno; i < endregno; i++)
13349 {
13350 reg_stat_type *rsp;
13351
13352 rsp = ®_stat[i];
13353 rsp->last_death = insn;
13354 }
13355 }
13356 else if (REG_NOTE_KIND (link) == REG_INC)
13357 record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13358 }
13359
13360 if (CALL_P (insn))
13361 {
13362 hard_reg_set_iterator hrsi;
13363 EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13364 {
13365 reg_stat_type *rsp;
13366
13367 rsp = ®_stat[i];
13368 rsp->last_set_invalid = 1;
13369 rsp->last_set = insn;
13370 rsp->last_set_value = 0;
13371 rsp->last_set_mode = VOIDmode;
13372 rsp->last_set_nonzero_bits = 0;
13373 rsp->last_set_sign_bit_copies = 0;
13374 rsp->last_death = 0;
13375 rsp->truncated_to_mode = VOIDmode;
13376 }
13377
13378 last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13379
13380 /* We can't combine into a call pattern. Remember, though, that
13381 the return value register is set at this LUID. We could
13382 still replace a register with the return value from the
13383 wrong subroutine call! */
13384 note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13385 }
13386 else
13387 note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13388 }
13389
13390 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13391 register present in the SUBREG, so for each such SUBREG go back and
13392 adjust nonzero and sign bit information of the registers that are
13393 known to have some zero/sign bits set.
13394
13395 This is needed because when combine blows the SUBREGs away, the
13396 information on zero/sign bits is lost and further combines can be
13397 missed because of that. */
13398
13399 static void
record_promoted_value(rtx_insn * insn,rtx subreg)13400 record_promoted_value (rtx_insn *insn, rtx subreg)
13401 {
13402 struct insn_link *links;
13403 rtx set;
13404 unsigned int regno = REGNO (SUBREG_REG (subreg));
13405 machine_mode mode = GET_MODE (subreg);
13406
13407 if (!HWI_COMPUTABLE_MODE_P (mode))
13408 return;
13409
13410 for (links = LOG_LINKS (insn); links;)
13411 {
13412 reg_stat_type *rsp;
13413
13414 insn = links->insn;
13415 set = single_set (insn);
13416
13417 if (! set || !REG_P (SET_DEST (set))
13418 || REGNO (SET_DEST (set)) != regno
13419 || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13420 {
13421 links = links->next;
13422 continue;
13423 }
13424
13425 rsp = ®_stat[regno];
13426 if (rsp->last_set == insn)
13427 {
13428 if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13429 rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13430 }
13431
13432 if (REG_P (SET_SRC (set)))
13433 {
13434 regno = REGNO (SET_SRC (set));
13435 links = LOG_LINKS (insn);
13436 }
13437 else
13438 break;
13439 }
13440 }
13441
13442 /* Check if X, a register, is known to contain a value already
13443 truncated to MODE. In this case we can use a subreg to refer to
13444 the truncated value even though in the generic case we would need
13445 an explicit truncation. */
13446
13447 static bool
reg_truncated_to_mode(machine_mode mode,const_rtx x)13448 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13449 {
13450 reg_stat_type *rsp = ®_stat[REGNO (x)];
13451 machine_mode truncated = rsp->truncated_to_mode;
13452
13453 if (truncated == 0
13454 || rsp->truncation_label < label_tick_ebb_start)
13455 return false;
13456 if (!partial_subreg_p (mode, truncated))
13457 return true;
13458 if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13459 return true;
13460 return false;
13461 }
13462
13463 /* If X is a hard reg or a subreg record the mode that the register is
13464 accessed in. For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13465 able to turn a truncate into a subreg using this information. Return true
13466 if traversing X is complete. */
13467
13468 static bool
record_truncated_value(rtx x)13469 record_truncated_value (rtx x)
13470 {
13471 machine_mode truncated_mode;
13472 reg_stat_type *rsp;
13473
13474 if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13475 {
13476 machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13477 truncated_mode = GET_MODE (x);
13478
13479 if (!partial_subreg_p (truncated_mode, original_mode))
13480 return true;
13481
13482 truncated_mode = GET_MODE (x);
13483 if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13484 return true;
13485
13486 x = SUBREG_REG (x);
13487 }
13488 /* ??? For hard-regs we now record everything. We might be able to
13489 optimize this using last_set_mode. */
13490 else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13491 truncated_mode = GET_MODE (x);
13492 else
13493 return false;
13494
13495 rsp = ®_stat[REGNO (x)];
13496 if (rsp->truncated_to_mode == 0
13497 || rsp->truncation_label < label_tick_ebb_start
13498 || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13499 {
13500 rsp->truncated_to_mode = truncated_mode;
13501 rsp->truncation_label = label_tick;
13502 }
13503
13504 return true;
13505 }
13506
13507 /* Callback for note_uses. Find hardregs and subregs of pseudos and
13508 the modes they are used in. This can help truning TRUNCATEs into
13509 SUBREGs. */
13510
13511 static void
record_truncated_values(rtx * loc,void * data ATTRIBUTE_UNUSED)13512 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13513 {
13514 subrtx_var_iterator::array_type array;
13515 FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13516 if (record_truncated_value (*iter))
13517 iter.skip_subrtxes ();
13518 }
13519
13520 /* Scan X for promoted SUBREGs. For each one found,
13521 note what it implies to the registers used in it. */
13522
13523 static void
check_promoted_subreg(rtx_insn * insn,rtx x)13524 check_promoted_subreg (rtx_insn *insn, rtx x)
13525 {
13526 if (GET_CODE (x) == SUBREG
13527 && SUBREG_PROMOTED_VAR_P (x)
13528 && REG_P (SUBREG_REG (x)))
13529 record_promoted_value (insn, x);
13530 else
13531 {
13532 const char *format = GET_RTX_FORMAT (GET_CODE (x));
13533 int i, j;
13534
13535 for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13536 switch (format[i])
13537 {
13538 case 'e':
13539 check_promoted_subreg (insn, XEXP (x, i));
13540 break;
13541 case 'V':
13542 case 'E':
13543 if (XVEC (x, i) != 0)
13544 for (j = 0; j < XVECLEN (x, i); j++)
13545 check_promoted_subreg (insn, XVECEXP (x, i, j));
13546 break;
13547 }
13548 }
13549 }
13550
13551 /* Verify that all the registers and memory references mentioned in *LOC are
13552 still valid. *LOC was part of a value set in INSN when label_tick was
13553 equal to TICK. Return 0 if some are not. If REPLACE is nonzero, replace
13554 the invalid references with (clobber (const_int 0)) and return 1. This
13555 replacement is useful because we often can get useful information about
13556 the form of a value (e.g., if it was produced by a shift that always
13557 produces -1 or 0) even though we don't know exactly what registers it
13558 was produced from. */
13559
13560 static int
get_last_value_validate(rtx * loc,rtx_insn * insn,int tick,int replace)13561 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13562 {
13563 rtx x = *loc;
13564 const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13565 int len = GET_RTX_LENGTH (GET_CODE (x));
13566 int i, j;
13567
13568 if (REG_P (x))
13569 {
13570 unsigned int regno = REGNO (x);
13571 unsigned int endregno = END_REGNO (x);
13572 unsigned int j;
13573
13574 for (j = regno; j < endregno; j++)
13575 {
13576 reg_stat_type *rsp = ®_stat[j];
13577 if (rsp->last_set_invalid
13578 /* If this is a pseudo-register that was only set once and not
13579 live at the beginning of the function, it is always valid. */
13580 || (! (regno >= FIRST_PSEUDO_REGISTER
13581 && regno < reg_n_sets_max
13582 && REG_N_SETS (regno) == 1
13583 && (!REGNO_REG_SET_P
13584 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13585 regno)))
13586 && rsp->last_set_label > tick))
13587 {
13588 if (replace)
13589 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13590 return replace;
13591 }
13592 }
13593
13594 return 1;
13595 }
13596 /* If this is a memory reference, make sure that there were no stores after
13597 it that might have clobbered the value. We don't have alias info, so we
13598 assume any store invalidates it. Moreover, we only have local UIDs, so
13599 we also assume that there were stores in the intervening basic blocks. */
13600 else if (MEM_P (x) && !MEM_READONLY_P (x)
13601 && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13602 {
13603 if (replace)
13604 *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13605 return replace;
13606 }
13607
13608 for (i = 0; i < len; i++)
13609 {
13610 if (fmt[i] == 'e')
13611 {
13612 /* Check for identical subexpressions. If x contains
13613 identical subexpression we only have to traverse one of
13614 them. */
13615 if (i == 1 && ARITHMETIC_P (x))
13616 {
13617 /* Note that at this point x0 has already been checked
13618 and found valid. */
13619 rtx x0 = XEXP (x, 0);
13620 rtx x1 = XEXP (x, 1);
13621
13622 /* If x0 and x1 are identical then x is also valid. */
13623 if (x0 == x1)
13624 return 1;
13625
13626 /* If x1 is identical to a subexpression of x0 then
13627 while checking x0, x1 has already been checked. Thus
13628 it is valid and so as x. */
13629 if (ARITHMETIC_P (x0)
13630 && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13631 return 1;
13632
13633 /* If x0 is identical to a subexpression of x1 then x is
13634 valid iff the rest of x1 is valid. */
13635 if (ARITHMETIC_P (x1)
13636 && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13637 return
13638 get_last_value_validate (&XEXP (x1,
13639 x0 == XEXP (x1, 0) ? 1 : 0),
13640 insn, tick, replace);
13641 }
13642
13643 if (get_last_value_validate (&XEXP (x, i), insn, tick,
13644 replace) == 0)
13645 return 0;
13646 }
13647 else if (fmt[i] == 'E')
13648 for (j = 0; j < XVECLEN (x, i); j++)
13649 if (get_last_value_validate (&XVECEXP (x, i, j),
13650 insn, tick, replace) == 0)
13651 return 0;
13652 }
13653
13654 /* If we haven't found a reason for it to be invalid, it is valid. */
13655 return 1;
13656 }
13657
13658 /* Get the last value assigned to X, if known. Some registers
13659 in the value may be replaced with (clobber (const_int 0)) if their value
13660 is known longer known reliably. */
13661
13662 static rtx
get_last_value(const_rtx x)13663 get_last_value (const_rtx x)
13664 {
13665 unsigned int regno;
13666 rtx value;
13667 reg_stat_type *rsp;
13668
13669 /* If this is a non-paradoxical SUBREG, get the value of its operand and
13670 then convert it to the desired mode. If this is a paradoxical SUBREG,
13671 we cannot predict what values the "extra" bits might have. */
13672 if (GET_CODE (x) == SUBREG
13673 && subreg_lowpart_p (x)
13674 && !paradoxical_subreg_p (x)
13675 && (value = get_last_value (SUBREG_REG (x))) != 0)
13676 return gen_lowpart (GET_MODE (x), value);
13677
13678 if (!REG_P (x))
13679 return 0;
13680
13681 regno = REGNO (x);
13682 rsp = ®_stat[regno];
13683 value = rsp->last_set_value;
13684
13685 /* If we don't have a value, or if it isn't for this basic block and
13686 it's either a hard register, set more than once, or it's a live
13687 at the beginning of the function, return 0.
13688
13689 Because if it's not live at the beginning of the function then the reg
13690 is always set before being used (is never used without being set).
13691 And, if it's set only once, and it's always set before use, then all
13692 uses must have the same last value, even if it's not from this basic
13693 block. */
13694
13695 if (value == 0
13696 || (rsp->last_set_label < label_tick_ebb_start
13697 && (regno < FIRST_PSEUDO_REGISTER
13698 || regno >= reg_n_sets_max
13699 || REG_N_SETS (regno) != 1
13700 || REGNO_REG_SET_P
13701 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13702 return 0;
13703
13704 /* If the value was set in a later insn than the ones we are processing,
13705 we can't use it even if the register was only set once. */
13706 if (rsp->last_set_label == label_tick
13707 && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13708 return 0;
13709
13710 /* If fewer bits were set than what we are asked for now, we cannot use
13711 the value. */
13712 if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13713 GET_MODE_PRECISION (GET_MODE (x))))
13714 return 0;
13715
13716 /* If the value has all its registers valid, return it. */
13717 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13718 return value;
13719
13720 /* Otherwise, make a copy and replace any invalid register with
13721 (clobber (const_int 0)). If that fails for some reason, return 0. */
13722
13723 value = copy_rtx (value);
13724 if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13725 return value;
13726
13727 return 0;
13728 }
13729
13730 /* Define three variables used for communication between the following
13731 routines. */
13732
13733 static unsigned int reg_dead_regno, reg_dead_endregno;
13734 static int reg_dead_flag;
13735
13736 /* Function called via note_stores from reg_dead_at_p.
13737
13738 If DEST is within [reg_dead_regno, reg_dead_endregno), set
13739 reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET. */
13740
13741 static void
reg_dead_at_p_1(rtx dest,const_rtx x,void * data ATTRIBUTE_UNUSED)13742 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13743 {
13744 unsigned int regno, endregno;
13745
13746 if (!REG_P (dest))
13747 return;
13748
13749 regno = REGNO (dest);
13750 endregno = END_REGNO (dest);
13751 if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13752 reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13753 }
13754
13755 /* Return nonzero if REG is known to be dead at INSN.
13756
13757 We scan backwards from INSN. If we hit a REG_DEAD note or a CLOBBER
13758 referencing REG, it is dead. If we hit a SET referencing REG, it is
13759 live. Otherwise, see if it is live or dead at the start of the basic
13760 block we are in. Hard regs marked as being live in NEWPAT_USED_REGS
13761 must be assumed to be always live. */
13762
13763 static int
reg_dead_at_p(rtx reg,rtx_insn * insn)13764 reg_dead_at_p (rtx reg, rtx_insn *insn)
13765 {
13766 basic_block block;
13767 unsigned int i;
13768
13769 /* Set variables for reg_dead_at_p_1. */
13770 reg_dead_regno = REGNO (reg);
13771 reg_dead_endregno = END_REGNO (reg);
13772
13773 reg_dead_flag = 0;
13774
13775 /* Check that reg isn't mentioned in NEWPAT_USED_REGS. For fixed registers
13776 we allow the machine description to decide whether use-and-clobber
13777 patterns are OK. */
13778 if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13779 {
13780 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13781 if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13782 return 0;
13783 }
13784
13785 /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13786 beginning of basic block. */
13787 block = BLOCK_FOR_INSN (insn);
13788 for (;;)
13789 {
13790 if (INSN_P (insn))
13791 {
13792 if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13793 return 1;
13794
13795 note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13796 if (reg_dead_flag)
13797 return reg_dead_flag == 1 ? 1 : 0;
13798
13799 if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13800 return 1;
13801 }
13802
13803 if (insn == BB_HEAD (block))
13804 break;
13805
13806 insn = PREV_INSN (insn);
13807 }
13808
13809 /* Look at live-in sets for the basic block that we were in. */
13810 for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13811 if (REGNO_REG_SET_P (df_get_live_in (block), i))
13812 return 0;
13813
13814 return 1;
13815 }
13816
13817 /* Note hard registers in X that are used. */
13818
13819 static void
mark_used_regs_combine(rtx x)13820 mark_used_regs_combine (rtx x)
13821 {
13822 RTX_CODE code = GET_CODE (x);
13823 unsigned int regno;
13824 int i;
13825
13826 switch (code)
13827 {
13828 case LABEL_REF:
13829 case SYMBOL_REF:
13830 case CONST:
13831 CASE_CONST_ANY:
13832 case PC:
13833 case ADDR_VEC:
13834 case ADDR_DIFF_VEC:
13835 case ASM_INPUT:
13836 /* CC0 must die in the insn after it is set, so we don't need to take
13837 special note of it here. */
13838 case CC0:
13839 return;
13840
13841 case CLOBBER:
13842 /* If we are clobbering a MEM, mark any hard registers inside the
13843 address as used. */
13844 if (MEM_P (XEXP (x, 0)))
13845 mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13846 return;
13847
13848 case REG:
13849 regno = REGNO (x);
13850 /* A hard reg in a wide mode may really be multiple registers.
13851 If so, mark all of them just like the first. */
13852 if (regno < FIRST_PSEUDO_REGISTER)
13853 {
13854 /* None of this applies to the stack, frame or arg pointers. */
13855 if (regno == STACK_POINTER_REGNUM
13856 || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13857 && regno == HARD_FRAME_POINTER_REGNUM)
13858 || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13859 && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13860 || regno == FRAME_POINTER_REGNUM)
13861 return;
13862
13863 add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13864 }
13865 return;
13866
13867 case SET:
13868 {
13869 /* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13870 the address. */
13871 rtx testreg = SET_DEST (x);
13872
13873 while (GET_CODE (testreg) == SUBREG
13874 || GET_CODE (testreg) == ZERO_EXTRACT
13875 || GET_CODE (testreg) == STRICT_LOW_PART)
13876 testreg = XEXP (testreg, 0);
13877
13878 if (MEM_P (testreg))
13879 mark_used_regs_combine (XEXP (testreg, 0));
13880
13881 mark_used_regs_combine (SET_SRC (x));
13882 }
13883 return;
13884
13885 default:
13886 break;
13887 }
13888
13889 /* Recursively scan the operands of this expression. */
13890
13891 {
13892 const char *fmt = GET_RTX_FORMAT (code);
13893
13894 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13895 {
13896 if (fmt[i] == 'e')
13897 mark_used_regs_combine (XEXP (x, i));
13898 else if (fmt[i] == 'E')
13899 {
13900 int j;
13901
13902 for (j = 0; j < XVECLEN (x, i); j++)
13903 mark_used_regs_combine (XVECEXP (x, i, j));
13904 }
13905 }
13906 }
13907 }
13908
13909 /* Remove register number REGNO from the dead registers list of INSN.
13910
13911 Return the note used to record the death, if there was one. */
13912
13913 rtx
remove_death(unsigned int regno,rtx_insn * insn)13914 remove_death (unsigned int regno, rtx_insn *insn)
13915 {
13916 rtx note = find_regno_note (insn, REG_DEAD, regno);
13917
13918 if (note)
13919 remove_note (insn, note);
13920
13921 return note;
13922 }
13923
13924 /* For each register (hardware or pseudo) used within expression X, if its
13925 death is in an instruction with luid between FROM_LUID (inclusive) and
13926 TO_INSN (exclusive), put a REG_DEAD note for that register in the
13927 list headed by PNOTES.
13928
13929 That said, don't move registers killed by maybe_kill_insn.
13930
13931 This is done when X is being merged by combination into TO_INSN. These
13932 notes will then be distributed as needed. */
13933
13934 static void
move_deaths(rtx x,rtx maybe_kill_insn,int from_luid,rtx_insn * to_insn,rtx * pnotes)13935 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
13936 rtx *pnotes)
13937 {
13938 const char *fmt;
13939 int len, i;
13940 enum rtx_code code = GET_CODE (x);
13941
13942 if (code == REG)
13943 {
13944 unsigned int regno = REGNO (x);
13945 rtx_insn *where_dead = reg_stat[regno].last_death;
13946
13947 /* If we do not know where the register died, it may still die between
13948 FROM_LUID and TO_INSN. If so, find it. This is PR83304. */
13949 if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
13950 {
13951 rtx_insn *insn = prev_real_nondebug_insn (to_insn);
13952 while (insn
13953 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
13954 && DF_INSN_LUID (insn) >= from_luid)
13955 {
13956 if (dead_or_set_regno_p (insn, regno))
13957 {
13958 if (find_regno_note (insn, REG_DEAD, regno))
13959 where_dead = insn;
13960 break;
13961 }
13962
13963 insn = prev_real_nondebug_insn (insn);
13964 }
13965 }
13966
13967 /* Don't move the register if it gets killed in between from and to. */
13968 if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
13969 && ! reg_referenced_p (x, maybe_kill_insn))
13970 return;
13971
13972 if (where_dead
13973 && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
13974 && DF_INSN_LUID (where_dead) >= from_luid
13975 && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
13976 {
13977 rtx note = remove_death (regno, where_dead);
13978
13979 /* It is possible for the call above to return 0. This can occur
13980 when last_death points to I2 or I1 that we combined with.
13981 In that case make a new note.
13982
13983 We must also check for the case where X is a hard register
13984 and NOTE is a death note for a range of hard registers
13985 including X. In that case, we must put REG_DEAD notes for
13986 the remaining registers in place of NOTE. */
13987
13988 if (note != 0 && regno < FIRST_PSEUDO_REGISTER
13989 && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
13990 {
13991 unsigned int deadregno = REGNO (XEXP (note, 0));
13992 unsigned int deadend = END_REGNO (XEXP (note, 0));
13993 unsigned int ourend = END_REGNO (x);
13994 unsigned int i;
13995
13996 for (i = deadregno; i < deadend; i++)
13997 if (i < regno || i >= ourend)
13998 add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
13999 }
14000
14001 /* If we didn't find any note, or if we found a REG_DEAD note that
14002 covers only part of the given reg, and we have a multi-reg hard
14003 register, then to be safe we must check for REG_DEAD notes
14004 for each register other than the first. They could have
14005 their own REG_DEAD notes lying around. */
14006 else if ((note == 0
14007 || (note != 0
14008 && partial_subreg_p (GET_MODE (XEXP (note, 0)),
14009 GET_MODE (x))))
14010 && regno < FIRST_PSEUDO_REGISTER
14011 && REG_NREGS (x) > 1)
14012 {
14013 unsigned int ourend = END_REGNO (x);
14014 unsigned int i, offset;
14015 rtx oldnotes = 0;
14016
14017 if (note)
14018 offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14019 else
14020 offset = 1;
14021
14022 for (i = regno + offset; i < ourend; i++)
14023 move_deaths (regno_reg_rtx[i],
14024 maybe_kill_insn, from_luid, to_insn, &oldnotes);
14025 }
14026
14027 if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14028 {
14029 XEXP (note, 1) = *pnotes;
14030 *pnotes = note;
14031 }
14032 else
14033 *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14034 }
14035
14036 return;
14037 }
14038
14039 else if (GET_CODE (x) == SET)
14040 {
14041 rtx dest = SET_DEST (x);
14042
14043 move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14044
14045 /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14046 that accesses one word of a multi-word item, some
14047 piece of everything register in the expression is used by
14048 this insn, so remove any old death. */
14049 /* ??? So why do we test for equality of the sizes? */
14050
14051 if (GET_CODE (dest) == ZERO_EXTRACT
14052 || GET_CODE (dest) == STRICT_LOW_PART
14053 || (GET_CODE (dest) == SUBREG
14054 && !read_modify_subreg_p (dest)))
14055 {
14056 move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14057 return;
14058 }
14059
14060 /* If this is some other SUBREG, we know it replaces the entire
14061 value, so use that as the destination. */
14062 if (GET_CODE (dest) == SUBREG)
14063 dest = SUBREG_REG (dest);
14064
14065 /* If this is a MEM, adjust deaths of anything used in the address.
14066 For a REG (the only other possibility), the entire value is
14067 being replaced so the old value is not used in this insn. */
14068
14069 if (MEM_P (dest))
14070 move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14071 to_insn, pnotes);
14072 return;
14073 }
14074
14075 else if (GET_CODE (x) == CLOBBER)
14076 return;
14077
14078 len = GET_RTX_LENGTH (code);
14079 fmt = GET_RTX_FORMAT (code);
14080
14081 for (i = 0; i < len; i++)
14082 {
14083 if (fmt[i] == 'E')
14084 {
14085 int j;
14086 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14087 move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14088 to_insn, pnotes);
14089 }
14090 else if (fmt[i] == 'e')
14091 move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14092 }
14093 }
14094
14095 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14096 pattern of an insn. X must be a REG. */
14097
14098 static int
reg_bitfield_target_p(rtx x,rtx body)14099 reg_bitfield_target_p (rtx x, rtx body)
14100 {
14101 int i;
14102
14103 if (GET_CODE (body) == SET)
14104 {
14105 rtx dest = SET_DEST (body);
14106 rtx target;
14107 unsigned int regno, tregno, endregno, endtregno;
14108
14109 if (GET_CODE (dest) == ZERO_EXTRACT)
14110 target = XEXP (dest, 0);
14111 else if (GET_CODE (dest) == STRICT_LOW_PART)
14112 target = SUBREG_REG (XEXP (dest, 0));
14113 else
14114 return 0;
14115
14116 if (GET_CODE (target) == SUBREG)
14117 target = SUBREG_REG (target);
14118
14119 if (!REG_P (target))
14120 return 0;
14121
14122 tregno = REGNO (target), regno = REGNO (x);
14123 if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14124 return target == x;
14125
14126 endtregno = end_hard_regno (GET_MODE (target), tregno);
14127 endregno = end_hard_regno (GET_MODE (x), regno);
14128
14129 return endregno > tregno && regno < endtregno;
14130 }
14131
14132 else if (GET_CODE (body) == PARALLEL)
14133 for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14134 if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14135 return 1;
14136
14137 return 0;
14138 }
14139
14140 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14141 as appropriate. I3 and I2 are the insns resulting from the combination
14142 insns including FROM (I2 may be zero).
14143
14144 ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14145 not need REG_DEAD notes because they are being substituted for. This
14146 saves searching in the most common cases.
14147
14148 Each note in the list is either ignored or placed on some insns, depending
14149 on the type of note. */
14150
14151 static void
distribute_notes(rtx notes,rtx_insn * from_insn,rtx_insn * i3,rtx_insn * i2,rtx elim_i2,rtx elim_i1,rtx elim_i0)14152 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14153 rtx elim_i2, rtx elim_i1, rtx elim_i0)
14154 {
14155 rtx note, next_note;
14156 rtx tem_note;
14157 rtx_insn *tem_insn;
14158
14159 for (note = notes; note; note = next_note)
14160 {
14161 rtx_insn *place = 0, *place2 = 0;
14162
14163 next_note = XEXP (note, 1);
14164 switch (REG_NOTE_KIND (note))
14165 {
14166 case REG_BR_PROB:
14167 case REG_BR_PRED:
14168 /* Doesn't matter much where we put this, as long as it's somewhere.
14169 It is preferable to keep these notes on branches, which is most
14170 likely to be i3. */
14171 place = i3;
14172 break;
14173
14174 case REG_NON_LOCAL_GOTO:
14175 if (JUMP_P (i3))
14176 place = i3;
14177 else
14178 {
14179 gcc_assert (i2 && JUMP_P (i2));
14180 place = i2;
14181 }
14182 break;
14183
14184 case REG_EH_REGION:
14185 /* These notes must remain with the call or trapping instruction. */
14186 if (CALL_P (i3))
14187 place = i3;
14188 else if (i2 && CALL_P (i2))
14189 place = i2;
14190 else
14191 {
14192 gcc_assert (cfun->can_throw_non_call_exceptions);
14193 if (may_trap_p (i3))
14194 place = i3;
14195 else if (i2 && may_trap_p (i2))
14196 place = i2;
14197 /* ??? Otherwise assume we've combined things such that we
14198 can now prove that the instructions can't trap. Drop the
14199 note in this case. */
14200 }
14201 break;
14202
14203 case REG_ARGS_SIZE:
14204 /* ??? How to distribute between i3-i1. Assume i3 contains the
14205 entire adjustment. Assert i3 contains at least some adjust. */
14206 if (!noop_move_p (i3))
14207 {
14208 poly_int64 old_size, args_size = get_args_size (note);
14209 /* fixup_args_size_notes looks at REG_NORETURN note,
14210 so ensure the note is placed there first. */
14211 if (CALL_P (i3))
14212 {
14213 rtx *np;
14214 for (np = &next_note; *np; np = &XEXP (*np, 1))
14215 if (REG_NOTE_KIND (*np) == REG_NORETURN)
14216 {
14217 rtx n = *np;
14218 *np = XEXP (n, 1);
14219 XEXP (n, 1) = REG_NOTES (i3);
14220 REG_NOTES (i3) = n;
14221 break;
14222 }
14223 }
14224 old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14225 /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14226 REG_ARGS_SIZE note to all noreturn calls, allow that here. */
14227 gcc_assert (maybe_ne (old_size, args_size)
14228 || (CALL_P (i3)
14229 && !ACCUMULATE_OUTGOING_ARGS
14230 && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14231 }
14232 break;
14233
14234 case REG_NORETURN:
14235 case REG_SETJMP:
14236 case REG_TM:
14237 case REG_CALL_DECL:
14238 case REG_CALL_NOCF_CHECK:
14239 /* These notes must remain with the call. It should not be
14240 possible for both I2 and I3 to be a call. */
14241 if (CALL_P (i3))
14242 place = i3;
14243 else
14244 {
14245 gcc_assert (i2 && CALL_P (i2));
14246 place = i2;
14247 }
14248 break;
14249
14250 case REG_UNUSED:
14251 /* Any clobbers for i3 may still exist, and so we must process
14252 REG_UNUSED notes from that insn.
14253
14254 Any clobbers from i2 or i1 can only exist if they were added by
14255 recog_for_combine. In that case, recog_for_combine created the
14256 necessary REG_UNUSED notes. Trying to keep any original
14257 REG_UNUSED notes from these insns can cause incorrect output
14258 if it is for the same register as the original i3 dest.
14259 In that case, we will notice that the register is set in i3,
14260 and then add a REG_UNUSED note for the destination of i3, which
14261 is wrong. However, it is possible to have REG_UNUSED notes from
14262 i2 or i1 for register which were both used and clobbered, so
14263 we keep notes from i2 or i1 if they will turn into REG_DEAD
14264 notes. */
14265
14266 /* If this register is set or clobbered in I3, put the note there
14267 unless there is one already. */
14268 if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14269 {
14270 if (from_insn != i3)
14271 break;
14272
14273 if (! (REG_P (XEXP (note, 0))
14274 ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14275 : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14276 place = i3;
14277 }
14278 /* Otherwise, if this register is used by I3, then this register
14279 now dies here, so we must put a REG_DEAD note here unless there
14280 is one already. */
14281 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14282 && ! (REG_P (XEXP (note, 0))
14283 ? find_regno_note (i3, REG_DEAD,
14284 REGNO (XEXP (note, 0)))
14285 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14286 {
14287 PUT_REG_NOTE_KIND (note, REG_DEAD);
14288 place = i3;
14289 }
14290
14291 /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14292 but we can't tell which at this point. We must reset any
14293 expectations we had about the value that was previously
14294 stored in the reg. ??? Ideally, we'd adjust REG_N_SETS
14295 and, if appropriate, restore its previous value, but we
14296 don't have enough information for that at this point. */
14297 else
14298 {
14299 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14300
14301 /* Otherwise, if this register is now referenced in i2
14302 then the register used to be modified in one of the
14303 original insns. If it was i3 (say, in an unused
14304 parallel), it's now completely gone, so the note can
14305 be discarded. But if it was modified in i2, i1 or i0
14306 and we still reference it in i2, then we're
14307 referencing the previous value, and since the
14308 register was modified and REG_UNUSED, we know that
14309 the previous value is now dead. So, if we only
14310 reference the register in i2, we change the note to
14311 REG_DEAD, to reflect the previous value. However, if
14312 we're also setting or clobbering the register as
14313 scratch, we know (because the register was not
14314 referenced in i3) that it's unused, just as it was
14315 unused before, and we place the note in i2. */
14316 if (from_insn != i3 && i2 && INSN_P (i2)
14317 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14318 {
14319 if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14320 PUT_REG_NOTE_KIND (note, REG_DEAD);
14321 if (! (REG_P (XEXP (note, 0))
14322 ? find_regno_note (i2, REG_NOTE_KIND (note),
14323 REGNO (XEXP (note, 0)))
14324 : find_reg_note (i2, REG_NOTE_KIND (note),
14325 XEXP (note, 0))))
14326 place = i2;
14327 }
14328 }
14329
14330 break;
14331
14332 case REG_EQUAL:
14333 case REG_EQUIV:
14334 case REG_NOALIAS:
14335 /* These notes say something about results of an insn. We can
14336 only support them if they used to be on I3 in which case they
14337 remain on I3. Otherwise they are ignored.
14338
14339 If the note refers to an expression that is not a constant, we
14340 must also ignore the note since we cannot tell whether the
14341 equivalence is still true. It might be possible to do
14342 slightly better than this (we only have a problem if I2DEST
14343 or I1DEST is present in the expression), but it doesn't
14344 seem worth the trouble. */
14345
14346 if (from_insn == i3
14347 && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14348 place = i3;
14349 break;
14350
14351 case REG_INC:
14352 /* These notes say something about how a register is used. They must
14353 be present on any use of the register in I2 or I3. */
14354 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14355 place = i3;
14356
14357 if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14358 {
14359 if (place)
14360 place2 = i2;
14361 else
14362 place = i2;
14363 }
14364 break;
14365
14366 case REG_LABEL_TARGET:
14367 case REG_LABEL_OPERAND:
14368 /* This can show up in several ways -- either directly in the
14369 pattern, or hidden off in the constant pool with (or without?)
14370 a REG_EQUAL note. */
14371 /* ??? Ignore the without-reg_equal-note problem for now. */
14372 if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14373 || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14374 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14375 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14376 place = i3;
14377
14378 if (i2
14379 && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14380 || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14381 && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14382 && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14383 {
14384 if (place)
14385 place2 = i2;
14386 else
14387 place = i2;
14388 }
14389
14390 /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14391 as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14392 there. */
14393 if (place && JUMP_P (place)
14394 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14395 && (JUMP_LABEL (place) == NULL
14396 || JUMP_LABEL (place) == XEXP (note, 0)))
14397 {
14398 rtx label = JUMP_LABEL (place);
14399
14400 if (!label)
14401 JUMP_LABEL (place) = XEXP (note, 0);
14402 else if (LABEL_P (label))
14403 LABEL_NUSES (label)--;
14404 }
14405
14406 if (place2 && JUMP_P (place2)
14407 && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14408 && (JUMP_LABEL (place2) == NULL
14409 || JUMP_LABEL (place2) == XEXP (note, 0)))
14410 {
14411 rtx label = JUMP_LABEL (place2);
14412
14413 if (!label)
14414 JUMP_LABEL (place2) = XEXP (note, 0);
14415 else if (LABEL_P (label))
14416 LABEL_NUSES (label)--;
14417 place2 = 0;
14418 }
14419 break;
14420
14421 case REG_NONNEG:
14422 /* This note says something about the value of a register prior
14423 to the execution of an insn. It is too much trouble to see
14424 if the note is still correct in all situations. It is better
14425 to simply delete it. */
14426 break;
14427
14428 case REG_DEAD:
14429 /* If we replaced the right hand side of FROM_INSN with a
14430 REG_EQUAL note, the original use of the dying register
14431 will not have been combined into I3 and I2. In such cases,
14432 FROM_INSN is guaranteed to be the first of the combined
14433 instructions, so we simply need to search back before
14434 FROM_INSN for the previous use or set of this register,
14435 then alter the notes there appropriately.
14436
14437 If the register is used as an input in I3, it dies there.
14438 Similarly for I2, if it is nonzero and adjacent to I3.
14439
14440 If the register is not used as an input in either I3 or I2
14441 and it is not one of the registers we were supposed to eliminate,
14442 there are two possibilities. We might have a non-adjacent I2
14443 or we might have somehow eliminated an additional register
14444 from a computation. For example, we might have had A & B where
14445 we discover that B will always be zero. In this case we will
14446 eliminate the reference to A.
14447
14448 In both cases, we must search to see if we can find a previous
14449 use of A and put the death note there. */
14450
14451 if (from_insn
14452 && from_insn == i2mod
14453 && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14454 tem_insn = from_insn;
14455 else
14456 {
14457 if (from_insn
14458 && CALL_P (from_insn)
14459 && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14460 place = from_insn;
14461 else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14462 {
14463 /* If the new I2 sets the same register that is marked
14464 dead in the note, we do not in general know where to
14465 put the note. One important case we _can_ handle is
14466 when the note comes from I3. */
14467 if (from_insn == i3)
14468 place = i3;
14469 else
14470 break;
14471 }
14472 else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14473 place = i3;
14474 else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14475 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14476 place = i2;
14477 else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14478 && !(i2mod
14479 && reg_overlap_mentioned_p (XEXP (note, 0),
14480 i2mod_old_rhs)))
14481 || rtx_equal_p (XEXP (note, 0), elim_i1)
14482 || rtx_equal_p (XEXP (note, 0), elim_i0))
14483 break;
14484 tem_insn = i3;
14485 }
14486
14487 if (place == 0)
14488 {
14489 basic_block bb = this_basic_block;
14490
14491 for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14492 {
14493 if (!NONDEBUG_INSN_P (tem_insn))
14494 {
14495 if (tem_insn == BB_HEAD (bb))
14496 break;
14497 continue;
14498 }
14499
14500 /* If the register is being set at TEM_INSN, see if that is all
14501 TEM_INSN is doing. If so, delete TEM_INSN. Otherwise, make this
14502 into a REG_UNUSED note instead. Don't delete sets to
14503 global register vars. */
14504 if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14505 || !global_regs[REGNO (XEXP (note, 0))])
14506 && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14507 {
14508 rtx set = single_set (tem_insn);
14509 rtx inner_dest = 0;
14510 rtx_insn *cc0_setter = NULL;
14511
14512 if (set != 0)
14513 for (inner_dest = SET_DEST (set);
14514 (GET_CODE (inner_dest) == STRICT_LOW_PART
14515 || GET_CODE (inner_dest) == SUBREG
14516 || GET_CODE (inner_dest) == ZERO_EXTRACT);
14517 inner_dest = XEXP (inner_dest, 0))
14518 ;
14519
14520 /* Verify that it was the set, and not a clobber that
14521 modified the register.
14522
14523 CC0 targets must be careful to maintain setter/user
14524 pairs. If we cannot delete the setter due to side
14525 effects, mark the user with an UNUSED note instead
14526 of deleting it. */
14527
14528 if (set != 0 && ! side_effects_p (SET_SRC (set))
14529 && rtx_equal_p (XEXP (note, 0), inner_dest)
14530 && (!HAVE_cc0
14531 || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14532 || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14533 && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14534 {
14535 /* Move the notes and links of TEM_INSN elsewhere.
14536 This might delete other dead insns recursively.
14537 First set the pattern to something that won't use
14538 any register. */
14539 rtx old_notes = REG_NOTES (tem_insn);
14540
14541 PATTERN (tem_insn) = pc_rtx;
14542 REG_NOTES (tem_insn) = NULL;
14543
14544 distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14545 NULL_RTX, NULL_RTX, NULL_RTX);
14546 distribute_links (LOG_LINKS (tem_insn));
14547
14548 unsigned int regno = REGNO (XEXP (note, 0));
14549 reg_stat_type *rsp = ®_stat[regno];
14550 if (rsp->last_set == tem_insn)
14551 record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14552
14553 SET_INSN_DELETED (tem_insn);
14554 if (tem_insn == i2)
14555 i2 = NULL;
14556
14557 /* Delete the setter too. */
14558 if (cc0_setter)
14559 {
14560 PATTERN (cc0_setter) = pc_rtx;
14561 old_notes = REG_NOTES (cc0_setter);
14562 REG_NOTES (cc0_setter) = NULL;
14563
14564 distribute_notes (old_notes, cc0_setter,
14565 cc0_setter, NULL,
14566 NULL_RTX, NULL_RTX, NULL_RTX);
14567 distribute_links (LOG_LINKS (cc0_setter));
14568
14569 SET_INSN_DELETED (cc0_setter);
14570 if (cc0_setter == i2)
14571 i2 = NULL;
14572 }
14573 }
14574 else
14575 {
14576 PUT_REG_NOTE_KIND (note, REG_UNUSED);
14577
14578 /* If there isn't already a REG_UNUSED note, put one
14579 here. Do not place a REG_DEAD note, even if
14580 the register is also used here; that would not
14581 match the algorithm used in lifetime analysis
14582 and can cause the consistency check in the
14583 scheduler to fail. */
14584 if (! find_regno_note (tem_insn, REG_UNUSED,
14585 REGNO (XEXP (note, 0))))
14586 place = tem_insn;
14587 break;
14588 }
14589 }
14590 else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14591 || (CALL_P (tem_insn)
14592 && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14593 {
14594 place = tem_insn;
14595
14596 /* If we are doing a 3->2 combination, and we have a
14597 register which formerly died in i3 and was not used
14598 by i2, which now no longer dies in i3 and is used in
14599 i2 but does not die in i2, and place is between i2
14600 and i3, then we may need to move a link from place to
14601 i2. */
14602 if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14603 && from_insn
14604 && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14605 && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14606 {
14607 struct insn_link *links = LOG_LINKS (place);
14608 LOG_LINKS (place) = NULL;
14609 distribute_links (links);
14610 }
14611 break;
14612 }
14613
14614 if (tem_insn == BB_HEAD (bb))
14615 break;
14616 }
14617
14618 }
14619
14620 /* If the register is set or already dead at PLACE, we needn't do
14621 anything with this note if it is still a REG_DEAD note.
14622 We check here if it is set at all, not if is it totally replaced,
14623 which is what `dead_or_set_p' checks, so also check for it being
14624 set partially. */
14625
14626 if (place && REG_NOTE_KIND (note) == REG_DEAD)
14627 {
14628 unsigned int regno = REGNO (XEXP (note, 0));
14629 reg_stat_type *rsp = ®_stat[regno];
14630
14631 if (dead_or_set_p (place, XEXP (note, 0))
14632 || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14633 {
14634 /* Unless the register previously died in PLACE, clear
14635 last_death. [I no longer understand why this is
14636 being done.] */
14637 if (rsp->last_death != place)
14638 rsp->last_death = 0;
14639 place = 0;
14640 }
14641 else
14642 rsp->last_death = place;
14643
14644 /* If this is a death note for a hard reg that is occupying
14645 multiple registers, ensure that we are still using all
14646 parts of the object. If we find a piece of the object
14647 that is unused, we must arrange for an appropriate REG_DEAD
14648 note to be added for it. However, we can't just emit a USE
14649 and tag the note to it, since the register might actually
14650 be dead; so we recourse, and the recursive call then finds
14651 the previous insn that used this register. */
14652
14653 if (place && REG_NREGS (XEXP (note, 0)) > 1)
14654 {
14655 unsigned int endregno = END_REGNO (XEXP (note, 0));
14656 bool all_used = true;
14657 unsigned int i;
14658
14659 for (i = regno; i < endregno; i++)
14660 if ((! refers_to_regno_p (i, PATTERN (place))
14661 && ! find_regno_fusage (place, USE, i))
14662 || dead_or_set_regno_p (place, i))
14663 {
14664 all_used = false;
14665 break;
14666 }
14667
14668 if (! all_used)
14669 {
14670 /* Put only REG_DEAD notes for pieces that are
14671 not already dead or set. */
14672
14673 for (i = regno; i < endregno;
14674 i += hard_regno_nregs (i, reg_raw_mode[i]))
14675 {
14676 rtx piece = regno_reg_rtx[i];
14677 basic_block bb = this_basic_block;
14678
14679 if (! dead_or_set_p (place, piece)
14680 && ! reg_bitfield_target_p (piece,
14681 PATTERN (place)))
14682 {
14683 rtx new_note = alloc_reg_note (REG_DEAD, piece,
14684 NULL_RTX);
14685
14686 distribute_notes (new_note, place, place,
14687 NULL, NULL_RTX, NULL_RTX,
14688 NULL_RTX);
14689 }
14690 else if (! refers_to_regno_p (i, PATTERN (place))
14691 && ! find_regno_fusage (place, USE, i))
14692 for (tem_insn = PREV_INSN (place); ;
14693 tem_insn = PREV_INSN (tem_insn))
14694 {
14695 if (!NONDEBUG_INSN_P (tem_insn))
14696 {
14697 if (tem_insn == BB_HEAD (bb))
14698 break;
14699 continue;
14700 }
14701 if (dead_or_set_p (tem_insn, piece)
14702 || reg_bitfield_target_p (piece,
14703 PATTERN (tem_insn)))
14704 {
14705 add_reg_note (tem_insn, REG_UNUSED, piece);
14706 break;
14707 }
14708 }
14709 }
14710
14711 place = 0;
14712 }
14713 }
14714 }
14715 break;
14716
14717 default:
14718 /* Any other notes should not be present at this point in the
14719 compilation. */
14720 gcc_unreachable ();
14721 }
14722
14723 if (place)
14724 {
14725 XEXP (note, 1) = REG_NOTES (place);
14726 REG_NOTES (place) = note;
14727
14728 /* Set added_notes_insn to the earliest insn we added a note to. */
14729 if (added_notes_insn == 0
14730 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14731 added_notes_insn = place;
14732 }
14733
14734 if (place2)
14735 {
14736 add_shallow_copy_of_reg_note (place2, note);
14737
14738 /* Set added_notes_insn to the earliest insn we added a note to. */
14739 if (added_notes_insn == 0
14740 || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14741 added_notes_insn = place2;
14742 }
14743 }
14744 }
14745
14746 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14747 I3, I2, and I1 to new locations. This is also called to add a link
14748 pointing at I3 when I3's destination is changed. */
14749
14750 static void
distribute_links(struct insn_link * links)14751 distribute_links (struct insn_link *links)
14752 {
14753 struct insn_link *link, *next_link;
14754
14755 for (link = links; link; link = next_link)
14756 {
14757 rtx_insn *place = 0;
14758 rtx_insn *insn;
14759 rtx set, reg;
14760
14761 next_link = link->next;
14762
14763 /* If the insn that this link points to is a NOTE, ignore it. */
14764 if (NOTE_P (link->insn))
14765 continue;
14766
14767 set = 0;
14768 rtx pat = PATTERN (link->insn);
14769 if (GET_CODE (pat) == SET)
14770 set = pat;
14771 else if (GET_CODE (pat) == PARALLEL)
14772 {
14773 int i;
14774 for (i = 0; i < XVECLEN (pat, 0); i++)
14775 {
14776 set = XVECEXP (pat, 0, i);
14777 if (GET_CODE (set) != SET)
14778 continue;
14779
14780 reg = SET_DEST (set);
14781 while (GET_CODE (reg) == ZERO_EXTRACT
14782 || GET_CODE (reg) == STRICT_LOW_PART
14783 || GET_CODE (reg) == SUBREG)
14784 reg = XEXP (reg, 0);
14785
14786 if (!REG_P (reg))
14787 continue;
14788
14789 if (REGNO (reg) == link->regno)
14790 break;
14791 }
14792 if (i == XVECLEN (pat, 0))
14793 continue;
14794 }
14795 else
14796 continue;
14797
14798 reg = SET_DEST (set);
14799
14800 while (GET_CODE (reg) == ZERO_EXTRACT
14801 || GET_CODE (reg) == STRICT_LOW_PART
14802 || GET_CODE (reg) == SUBREG)
14803 reg = XEXP (reg, 0);
14804
14805 if (reg == pc_rtx)
14806 continue;
14807
14808 /* A LOG_LINK is defined as being placed on the first insn that uses
14809 a register and points to the insn that sets the register. Start
14810 searching at the next insn after the target of the link and stop
14811 when we reach a set of the register or the end of the basic block.
14812
14813 Note that this correctly handles the link that used to point from
14814 I3 to I2. Also note that not much searching is typically done here
14815 since most links don't point very far away. */
14816
14817 for (insn = NEXT_INSN (link->insn);
14818 (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14819 || BB_HEAD (this_basic_block->next_bb) != insn));
14820 insn = NEXT_INSN (insn))
14821 if (DEBUG_INSN_P (insn))
14822 continue;
14823 else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14824 {
14825 if (reg_referenced_p (reg, PATTERN (insn)))
14826 place = insn;
14827 break;
14828 }
14829 else if (CALL_P (insn)
14830 && find_reg_fusage (insn, USE, reg))
14831 {
14832 place = insn;
14833 break;
14834 }
14835 else if (INSN_P (insn) && reg_set_p (reg, insn))
14836 break;
14837
14838 /* If we found a place to put the link, place it there unless there
14839 is already a link to the same insn as LINK at that point. */
14840
14841 if (place)
14842 {
14843 struct insn_link *link2;
14844
14845 FOR_EACH_LOG_LINK (link2, place)
14846 if (link2->insn == link->insn && link2->regno == link->regno)
14847 break;
14848
14849 if (link2 == NULL)
14850 {
14851 link->next = LOG_LINKS (place);
14852 LOG_LINKS (place) = link;
14853
14854 /* Set added_links_insn to the earliest insn we added a
14855 link to. */
14856 if (added_links_insn == 0
14857 || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14858 added_links_insn = place;
14859 }
14860 }
14861 }
14862 }
14863
14864 /* Check for any register or memory mentioned in EQUIV that is not
14865 mentioned in EXPR. This is used to restrict EQUIV to "specializations"
14866 of EXPR where some registers may have been replaced by constants. */
14867
14868 static bool
unmentioned_reg_p(rtx equiv,rtx expr)14869 unmentioned_reg_p (rtx equiv, rtx expr)
14870 {
14871 subrtx_iterator::array_type array;
14872 FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14873 {
14874 const_rtx x = *iter;
14875 if ((REG_P (x) || MEM_P (x))
14876 && !reg_mentioned_p (x, expr))
14877 return true;
14878 }
14879 return false;
14880 }
14881
14882 DEBUG_FUNCTION void
dump_combine_stats(FILE * file)14883 dump_combine_stats (FILE *file)
14884 {
14885 fprintf
14886 (file,
14887 ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
14888 combine_attempts, combine_merges, combine_extras, combine_successes);
14889 }
14890
14891 void
dump_combine_total_stats(FILE * file)14892 dump_combine_total_stats (FILE *file)
14893 {
14894 fprintf
14895 (file,
14896 "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
14897 total_attempts, total_merges, total_extras, total_successes);
14898 }
14899
14900 /* Try combining insns through substitution. */
14901 static unsigned int
rest_of_handle_combine(void)14902 rest_of_handle_combine (void)
14903 {
14904 int rebuild_jump_labels_after_combine;
14905
14906 df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
14907 df_note_add_problem ();
14908 df_analyze ();
14909
14910 regstat_init_n_sets_and_refs ();
14911 reg_n_sets_max = max_reg_num ();
14912
14913 rebuild_jump_labels_after_combine
14914 = combine_instructions (get_insns (), max_reg_num ());
14915
14916 /* Combining insns may have turned an indirect jump into a
14917 direct jump. Rebuild the JUMP_LABEL fields of jumping
14918 instructions. */
14919 if (rebuild_jump_labels_after_combine)
14920 {
14921 if (dom_info_available_p (CDI_DOMINATORS))
14922 free_dominance_info (CDI_DOMINATORS);
14923 timevar_push (TV_JUMP);
14924 rebuild_jump_labels (get_insns ());
14925 cleanup_cfg (0);
14926 timevar_pop (TV_JUMP);
14927 }
14928
14929 regstat_free_n_sets_and_refs ();
14930 return 0;
14931 }
14932
14933 namespace {
14934
14935 const pass_data pass_data_combine =
14936 {
14937 RTL_PASS, /* type */
14938 "combine", /* name */
14939 OPTGROUP_NONE, /* optinfo_flags */
14940 TV_COMBINE, /* tv_id */
14941 PROP_cfglayout, /* properties_required */
14942 0, /* properties_provided */
14943 0, /* properties_destroyed */
14944 0, /* todo_flags_start */
14945 TODO_df_finish, /* todo_flags_finish */
14946 };
14947
14948 class pass_combine : public rtl_opt_pass
14949 {
14950 public:
pass_combine(gcc::context * ctxt)14951 pass_combine (gcc::context *ctxt)
14952 : rtl_opt_pass (pass_data_combine, ctxt)
14953 {}
14954
14955 /* opt_pass methods: */
gate(function *)14956 virtual bool gate (function *) { return (optimize > 0); }
execute(function *)14957 virtual unsigned int execute (function *)
14958 {
14959 return rest_of_handle_combine ();
14960 }
14961
14962 }; // class pass_combine
14963
14964 } // anon namespace
14965
14966 rtl_opt_pass *
make_pass_combine(gcc::context * ctxt)14967 make_pass_combine (gcc::context *ctxt)
14968 {
14969 return new pass_combine (ctxt);
14970 }
14971