1 /* Optimize by combining instructions for GNU compiler.
2    Copyright (C) 1987-2019 Free Software Foundation, Inc.
3 
4 This file is part of GCC.
5 
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10 
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14 for more details.
15 
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3.  If not see
18 <http://www.gnu.org/licenses/>.  */
19 
20 /* This module is essentially the "combiner" phase of the U. of Arizona
21    Portable Optimizer, but redone to work on our list-structured
22    representation for RTL instead of their string representation.
23 
24    The LOG_LINKS of each insn identify the most recent assignment
25    to each REG used in the insn.  It is a list of previous insns,
26    each of which contains a SET for a REG that is used in this insn
27    and not used or set in between.  LOG_LINKs never cross basic blocks.
28    They were set up by the preceding pass (lifetime analysis).
29 
30    We try to combine each pair of insns joined by a logical link.
31    We also try to combine triplets of insns A, B and C when C has
32    a link back to B and B has a link back to A.  Likewise for a
33    small number of quadruplets of insns A, B, C and D for which
34    there's high likelihood of success.
35 
36    LOG_LINKS does not have links for use of the CC0.  They don't
37    need to, because the insn that sets the CC0 is always immediately
38    before the insn that tests it.  So we always regard a branch
39    insn as having a logical link to the preceding insn.  The same is true
40    for an insn explicitly using CC0.
41 
42    We check (with modified_between_p) to avoid combining in such a way
43    as to move a computation to a place where its value would be different.
44 
45    Combination is done by mathematically substituting the previous
46    insn(s) values for the regs they set into the expressions in
47    the later insns that refer to these regs.  If the result is a valid insn
48    for our target machine, according to the machine description,
49    we install it, delete the earlier insns, and update the data flow
50    information (LOG_LINKS and REG_NOTES) for what we did.
51 
52    There are a few exceptions where the dataflow information isn't
53    completely updated (however this is only a local issue since it is
54    regenerated before the next pass that uses it):
55 
56    - reg_live_length is not updated
57    - reg_n_refs is not adjusted in the rare case when a register is
58      no longer required in a computation
59    - there are extremely rare cases (see distribute_notes) when a
60      REG_DEAD note is lost
61    - a LOG_LINKS entry that refers to an insn with multiple SETs may be
62      removed because there is no way to know which register it was
63      linking
64 
65    To simplify substitution, we combine only when the earlier insn(s)
66    consist of only a single assignment.  To simplify updating afterward,
67    we never combine when a subroutine call appears in the middle.
68 
69    Since we do not represent assignments to CC0 explicitly except when that
70    is all an insn does, there is no LOG_LINKS entry in an insn that uses
71    the condition code for the insn that set the condition code.
72    Fortunately, these two insns must be consecutive.
73    Therefore, every JUMP_INSN is taken to have an implicit logical link
74    to the preceding insn.  This is not quite right, since non-jumps can
75    also use the condition code; but in practice such insns would not
76    combine anyway.  */
77 
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "backend.h"
82 #include "target.h"
83 #include "rtl.h"
84 #include "tree.h"
85 #include "cfghooks.h"
86 #include "predict.h"
87 #include "df.h"
88 #include "memmodel.h"
89 #include "tm_p.h"
90 #include "optabs.h"
91 #include "regs.h"
92 #include "emit-rtl.h"
93 #include "recog.h"
94 #include "cgraph.h"
95 #include "stor-layout.h"
96 #include "cfgrtl.h"
97 #include "cfgcleanup.h"
98 /* Include expr.h after insn-config.h so we get HAVE_conditional_move.  */
99 #include "explow.h"
100 #include "insn-attr.h"
101 #include "rtlhooks-def.h"
102 #include "expr.h"
103 #include "params.h"
104 #include "tree-pass.h"
105 #include "valtrack.h"
106 #include "rtl-iter.h"
107 #include "print-rtl.h"
108 
109 /* Number of attempts to combine instructions in this function.  */
110 
111 static int combine_attempts;
112 
113 /* Number of attempts that got as far as substitution in this function.  */
114 
115 static int combine_merges;
116 
117 /* Number of instructions combined with added SETs in this function.  */
118 
119 static int combine_extras;
120 
121 /* Number of instructions combined in this function.  */
122 
123 static int combine_successes;
124 
125 /* Totals over entire compilation.  */
126 
127 static int total_attempts, total_merges, total_extras, total_successes;
128 
129 /* combine_instructions may try to replace the right hand side of the
130    second instruction with the value of an associated REG_EQUAL note
131    before throwing it at try_combine.  That is problematic when there
132    is a REG_DEAD note for a register used in the old right hand side
133    and can cause distribute_notes to do wrong things.  This is the
134    second instruction if it has been so modified, null otherwise.  */
135 
136 static rtx_insn *i2mod;
137 
138 /* When I2MOD is nonnull, this is a copy of the old right hand side.  */
139 
140 static rtx i2mod_old_rhs;
141 
142 /* When I2MOD is nonnull, this is a copy of the new right hand side.  */
143 
144 static rtx i2mod_new_rhs;
145 
146 struct reg_stat_type {
147   /* Record last point of death of (hard or pseudo) register n.  */
148   rtx_insn			*last_death;
149 
150   /* Record last point of modification of (hard or pseudo) register n.  */
151   rtx_insn			*last_set;
152 
153   /* The next group of fields allows the recording of the last value assigned
154      to (hard or pseudo) register n.  We use this information to see if an
155      operation being processed is redundant given a prior operation performed
156      on the register.  For example, an `and' with a constant is redundant if
157      all the zero bits are already known to be turned off.
158 
159      We use an approach similar to that used by cse, but change it in the
160      following ways:
161 
162      (1) We do not want to reinitialize at each label.
163      (2) It is useful, but not critical, to know the actual value assigned
164 	 to a register.  Often just its form is helpful.
165 
166      Therefore, we maintain the following fields:
167 
168      last_set_value		the last value assigned
169      last_set_label		records the value of label_tick when the
170 				register was assigned
171      last_set_table_tick	records the value of label_tick when a
172 				value using the register is assigned
173      last_set_invalid		set to nonzero when it is not valid
174 				to use the value of this register in some
175 				register's value
176 
177      To understand the usage of these tables, it is important to understand
178      the distinction between the value in last_set_value being valid and
179      the register being validly contained in some other expression in the
180      table.
181 
182      (The next two parameters are out of date).
183 
184      reg_stat[i].last_set_value is valid if it is nonzero, and either
185      reg_n_sets[i] is 1 or reg_stat[i].last_set_label == label_tick.
186 
187      Register I may validly appear in any expression returned for the value
188      of another register if reg_n_sets[i] is 1.  It may also appear in the
189      value for register J if reg_stat[j].last_set_invalid is zero, or
190      reg_stat[i].last_set_label < reg_stat[j].last_set_label.
191 
192      If an expression is found in the table containing a register which may
193      not validly appear in an expression, the register is replaced by
194      something that won't match, (clobber (const_int 0)).  */
195 
196   /* Record last value assigned to (hard or pseudo) register n.  */
197 
198   rtx				last_set_value;
199 
200   /* Record the value of label_tick when an expression involving register n
201      is placed in last_set_value.  */
202 
203   int				last_set_table_tick;
204 
205   /* Record the value of label_tick when the value for register n is placed in
206      last_set_value.  */
207 
208   int				last_set_label;
209 
210   /* These fields are maintained in parallel with last_set_value and are
211      used to store the mode in which the register was last set, the bits
212      that were known to be zero when it was last set, and the number of
213      sign bits copies it was known to have when it was last set.  */
214 
215   unsigned HOST_WIDE_INT	last_set_nonzero_bits;
216   char				last_set_sign_bit_copies;
217   ENUM_BITFIELD(machine_mode)	last_set_mode : 8;
218 
219   /* Set nonzero if references to register n in expressions should not be
220      used.  last_set_invalid is set nonzero when this register is being
221      assigned to and last_set_table_tick == label_tick.  */
222 
223   char				last_set_invalid;
224 
225   /* Some registers that are set more than once and used in more than one
226      basic block are nevertheless always set in similar ways.  For example,
227      a QImode register may be loaded from memory in two places on a machine
228      where byte loads zero extend.
229 
230      We record in the following fields if a register has some leading bits
231      that are always equal to the sign bit, and what we know about the
232      nonzero bits of a register, specifically which bits are known to be
233      zero.
234 
235      If an entry is zero, it means that we don't know anything special.  */
236 
237   unsigned char			sign_bit_copies;
238 
239   unsigned HOST_WIDE_INT	nonzero_bits;
240 
241   /* Record the value of the label_tick when the last truncation
242      happened.  The field truncated_to_mode is only valid if
243      truncation_label == label_tick.  */
244 
245   int				truncation_label;
246 
247   /* Record the last truncation seen for this register.  If truncation
248      is not a nop to this mode we might be able to save an explicit
249      truncation if we know that value already contains a truncated
250      value.  */
251 
252   ENUM_BITFIELD(machine_mode)	truncated_to_mode : 8;
253 };
254 
255 
256 static vec<reg_stat_type> reg_stat;
257 
258 /* One plus the highest pseudo for which we track REG_N_SETS.
259    regstat_init_n_sets_and_refs allocates the array for REG_N_SETS just once,
260    but during combine_split_insns new pseudos can be created.  As we don't have
261    updated DF information in that case, it is hard to initialize the array
262    after growing.  The combiner only cares about REG_N_SETS (regno) == 1,
263    so instead of growing the arrays, just assume all newly created pseudos
264    during combine might be set multiple times.  */
265 
266 static unsigned int reg_n_sets_max;
267 
268 /* Record the luid of the last insn that invalidated memory
269    (anything that writes memory, and subroutine calls, but not pushes).  */
270 
271 static int mem_last_set;
272 
273 /* Record the luid of the last CALL_INSN
274    so we can tell whether a potential combination crosses any calls.  */
275 
276 static int last_call_luid;
277 
278 /* When `subst' is called, this is the insn that is being modified
279    (by combining in a previous insn).  The PATTERN of this insn
280    is still the old pattern partially modified and it should not be
281    looked at, but this may be used to examine the successors of the insn
282    to judge whether a simplification is valid.  */
283 
284 static rtx_insn *subst_insn;
285 
286 /* This is the lowest LUID that `subst' is currently dealing with.
287    get_last_value will not return a value if the register was set at or
288    after this LUID.  If not for this mechanism, we could get confused if
289    I2 or I1 in try_combine were an insn that used the old value of a register
290    to obtain a new value.  In that case, we might erroneously get the
291    new value of the register when we wanted the old one.  */
292 
293 static int subst_low_luid;
294 
295 /* This contains any hard registers that are used in newpat; reg_dead_at_p
296    must consider all these registers to be always live.  */
297 
298 static HARD_REG_SET newpat_used_regs;
299 
300 /* This is an insn to which a LOG_LINKS entry has been added.  If this
301    insn is the earlier than I2 or I3, combine should rescan starting at
302    that location.  */
303 
304 static rtx_insn *added_links_insn;
305 
306 /* And similarly, for notes.  */
307 
308 static rtx_insn *added_notes_insn;
309 
310 /* Basic block in which we are performing combines.  */
311 static basic_block this_basic_block;
312 static bool optimize_this_for_speed_p;
313 
314 
315 /* Length of the currently allocated uid_insn_cost array.  */
316 
317 static int max_uid_known;
318 
319 /* The following array records the insn_cost for every insn
320    in the instruction stream.  */
321 
322 static int *uid_insn_cost;
323 
324 /* The following array records the LOG_LINKS for every insn in the
325    instruction stream as struct insn_link pointers.  */
326 
327 struct insn_link {
328   rtx_insn *insn;
329   unsigned int regno;
330   struct insn_link *next;
331 };
332 
333 static struct insn_link **uid_log_links;
334 
335 static inline int
insn_uid_check(const_rtx insn)336 insn_uid_check (const_rtx insn)
337 {
338   int uid = INSN_UID (insn);
339   gcc_checking_assert (uid <= max_uid_known);
340   return uid;
341 }
342 
343 #define INSN_COST(INSN)		(uid_insn_cost[insn_uid_check (INSN)])
344 #define LOG_LINKS(INSN)		(uid_log_links[insn_uid_check (INSN)])
345 
346 #define FOR_EACH_LOG_LINK(L, INSN)				\
347   for ((L) = LOG_LINKS (INSN); (L); (L) = (L)->next)
348 
349 /* Links for LOG_LINKS are allocated from this obstack.  */
350 
351 static struct obstack insn_link_obstack;
352 
353 /* Allocate a link.  */
354 
355 static inline struct insn_link *
alloc_insn_link(rtx_insn * insn,unsigned int regno,struct insn_link * next)356 alloc_insn_link (rtx_insn *insn, unsigned int regno, struct insn_link *next)
357 {
358   struct insn_link *l
359     = (struct insn_link *) obstack_alloc (&insn_link_obstack,
360 					  sizeof (struct insn_link));
361   l->insn = insn;
362   l->regno = regno;
363   l->next = next;
364   return l;
365 }
366 
367 /* Incremented for each basic block.  */
368 
369 static int label_tick;
370 
371 /* Reset to label_tick for each extended basic block in scanning order.  */
372 
373 static int label_tick_ebb_start;
374 
375 /* Mode used to compute significance in reg_stat[].nonzero_bits.  It is the
376    largest integer mode that can fit in HOST_BITS_PER_WIDE_INT.  */
377 
378 static scalar_int_mode nonzero_bits_mode;
379 
380 /* Nonzero when reg_stat[].nonzero_bits and reg_stat[].sign_bit_copies can
381    be safely used.  It is zero while computing them and after combine has
382    completed.  This former test prevents propagating values based on
383    previously set values, which can be incorrect if a variable is modified
384    in a loop.  */
385 
386 static int nonzero_sign_valid;
387 
388 
389 /* Record one modification to rtl structure
390    to be undone by storing old_contents into *where.  */
391 
392 enum undo_kind { UNDO_RTX, UNDO_INT, UNDO_MODE, UNDO_LINKS };
393 
394 struct undo
395 {
396   struct undo *next;
397   enum undo_kind kind;
398   union { rtx r; int i; machine_mode m; struct insn_link *l; } old_contents;
399   union { rtx *r; int *i; struct insn_link **l; } where;
400 };
401 
402 /* Record a bunch of changes to be undone, up to MAX_UNDO of them.
403    num_undo says how many are currently recorded.
404 
405    other_insn is nonzero if we have modified some other insn in the process
406    of working on subst_insn.  It must be verified too.  */
407 
408 struct undobuf
409 {
410   struct undo *undos;
411   struct undo *frees;
412   rtx_insn *other_insn;
413 };
414 
415 static struct undobuf undobuf;
416 
417 /* Number of times the pseudo being substituted for
418    was found and replaced.  */
419 
420 static int n_occurrences;
421 
422 static rtx reg_nonzero_bits_for_combine (const_rtx, scalar_int_mode,
423 					 scalar_int_mode,
424 					 unsigned HOST_WIDE_INT *);
425 static rtx reg_num_sign_bit_copies_for_combine (const_rtx, scalar_int_mode,
426 						scalar_int_mode,
427 						unsigned int *);
428 static void do_SUBST (rtx *, rtx);
429 static void do_SUBST_INT (int *, int);
430 static void init_reg_last (void);
431 static void setup_incoming_promotions (rtx_insn *);
432 static void set_nonzero_bits_and_sign_copies (rtx, const_rtx, void *);
433 static int cant_combine_insn_p (rtx_insn *);
434 static int can_combine_p (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
435 			  rtx_insn *, rtx_insn *, rtx *, rtx *);
436 static int combinable_i3pat (rtx_insn *, rtx *, rtx, rtx, rtx, int, int, rtx *);
437 static int contains_muldiv (rtx);
438 static rtx_insn *try_combine (rtx_insn *, rtx_insn *, rtx_insn *, rtx_insn *,
439 			      int *, rtx_insn *);
440 static void undo_all (void);
441 static void undo_commit (void);
442 static rtx *find_split_point (rtx *, rtx_insn *, bool);
443 static rtx subst (rtx, rtx, rtx, int, int, int);
444 static rtx combine_simplify_rtx (rtx, machine_mode, int, int);
445 static rtx simplify_if_then_else (rtx);
446 static rtx simplify_set (rtx);
447 static rtx simplify_logical (rtx);
448 static rtx expand_compound_operation (rtx);
449 static const_rtx expand_field_assignment (const_rtx);
450 static rtx make_extraction (machine_mode, rtx, HOST_WIDE_INT,
451 			    rtx, unsigned HOST_WIDE_INT, int, int, int);
452 static int get_pos_from_mask (unsigned HOST_WIDE_INT,
453 			      unsigned HOST_WIDE_INT *);
454 static rtx canon_reg_for_combine (rtx, rtx);
455 static rtx force_int_to_mode (rtx, scalar_int_mode, scalar_int_mode,
456 			      scalar_int_mode, unsigned HOST_WIDE_INT, int);
457 static rtx force_to_mode (rtx, machine_mode,
458 			  unsigned HOST_WIDE_INT, int);
459 static rtx if_then_else_cond (rtx, rtx *, rtx *);
460 static rtx known_cond (rtx, enum rtx_code, rtx, rtx);
461 static int rtx_equal_for_field_assignment_p (rtx, rtx, bool = false);
462 static rtx make_field_assignment (rtx);
463 static rtx apply_distributive_law (rtx);
464 static rtx distribute_and_simplify_rtx (rtx, int);
465 static rtx simplify_and_const_int_1 (scalar_int_mode, rtx,
466 				     unsigned HOST_WIDE_INT);
467 static rtx simplify_and_const_int (rtx, scalar_int_mode, rtx,
468 				   unsigned HOST_WIDE_INT);
469 static int merge_outer_ops (enum rtx_code *, HOST_WIDE_INT *, enum rtx_code,
470 			    HOST_WIDE_INT, machine_mode, int *);
471 static rtx simplify_shift_const_1 (enum rtx_code, machine_mode, rtx, int);
472 static rtx simplify_shift_const (rtx, enum rtx_code, machine_mode, rtx,
473 				 int);
474 static int recog_for_combine (rtx *, rtx_insn *, rtx *);
475 static rtx gen_lowpart_for_combine (machine_mode, rtx);
476 static enum rtx_code simplify_compare_const (enum rtx_code, machine_mode,
477 					     rtx, rtx *);
478 static enum rtx_code simplify_comparison (enum rtx_code, rtx *, rtx *);
479 static void update_table_tick (rtx);
480 static void record_value_for_reg (rtx, rtx_insn *, rtx);
481 static void check_promoted_subreg (rtx_insn *, rtx);
482 static void record_dead_and_set_regs_1 (rtx, const_rtx, void *);
483 static void record_dead_and_set_regs (rtx_insn *);
484 static int get_last_value_validate (rtx *, rtx_insn *, int, int);
485 static rtx get_last_value (const_rtx);
486 static void reg_dead_at_p_1 (rtx, const_rtx, void *);
487 static int reg_dead_at_p (rtx, rtx_insn *);
488 static void move_deaths (rtx, rtx, int, rtx_insn *, rtx *);
489 static int reg_bitfield_target_p (rtx, rtx);
490 static void distribute_notes (rtx, rtx_insn *, rtx_insn *, rtx_insn *, rtx, rtx, rtx);
491 static void distribute_links (struct insn_link *);
492 static void mark_used_regs_combine (rtx);
493 static void record_promoted_value (rtx_insn *, rtx);
494 static bool unmentioned_reg_p (rtx, rtx);
495 static void record_truncated_values (rtx *, void *);
496 static bool reg_truncated_to_mode (machine_mode, const_rtx);
497 static rtx gen_lowpart_or_truncate (machine_mode, rtx);
498 
499 
500 /* It is not safe to use ordinary gen_lowpart in combine.
501    See comments in gen_lowpart_for_combine.  */
502 #undef RTL_HOOKS_GEN_LOWPART
503 #define RTL_HOOKS_GEN_LOWPART              gen_lowpart_for_combine
504 
505 /* Our implementation of gen_lowpart never emits a new pseudo.  */
506 #undef RTL_HOOKS_GEN_LOWPART_NO_EMIT
507 #define RTL_HOOKS_GEN_LOWPART_NO_EMIT      gen_lowpart_for_combine
508 
509 #undef RTL_HOOKS_REG_NONZERO_REG_BITS
510 #define RTL_HOOKS_REG_NONZERO_REG_BITS     reg_nonzero_bits_for_combine
511 
512 #undef RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES
513 #define RTL_HOOKS_REG_NUM_SIGN_BIT_COPIES  reg_num_sign_bit_copies_for_combine
514 
515 #undef RTL_HOOKS_REG_TRUNCATED_TO_MODE
516 #define RTL_HOOKS_REG_TRUNCATED_TO_MODE    reg_truncated_to_mode
517 
518 static const struct rtl_hooks combine_rtl_hooks = RTL_HOOKS_INITIALIZER;
519 
520 
521 /* Convenience wrapper for the canonicalize_comparison target hook.
522    Target hooks cannot use enum rtx_code.  */
523 static inline void
target_canonicalize_comparison(enum rtx_code * code,rtx * op0,rtx * op1,bool op0_preserve_value)524 target_canonicalize_comparison (enum rtx_code *code, rtx *op0, rtx *op1,
525 				bool op0_preserve_value)
526 {
527   int code_int = (int)*code;
528   targetm.canonicalize_comparison (&code_int, op0, op1, op0_preserve_value);
529   *code = (enum rtx_code)code_int;
530 }
531 
532 /* Try to split PATTERN found in INSN.  This returns NULL_RTX if
533    PATTERN cannot be split.  Otherwise, it returns an insn sequence.
534    This is a wrapper around split_insns which ensures that the
535    reg_stat vector is made larger if the splitter creates a new
536    register.  */
537 
538 static rtx_insn *
combine_split_insns(rtx pattern,rtx_insn * insn)539 combine_split_insns (rtx pattern, rtx_insn *insn)
540 {
541   rtx_insn *ret;
542   unsigned int nregs;
543 
544   ret = split_insns (pattern, insn);
545   nregs = max_reg_num ();
546   if (nregs > reg_stat.length ())
547     reg_stat.safe_grow_cleared (nregs);
548   return ret;
549 }
550 
551 /* This is used by find_single_use to locate an rtx in LOC that
552    contains exactly one use of DEST, which is typically either a REG
553    or CC0.  It returns a pointer to the innermost rtx expression
554    containing DEST.  Appearances of DEST that are being used to
555    totally replace it are not counted.  */
556 
557 static rtx *
find_single_use_1(rtx dest,rtx * loc)558 find_single_use_1 (rtx dest, rtx *loc)
559 {
560   rtx x = *loc;
561   enum rtx_code code = GET_CODE (x);
562   rtx *result = NULL;
563   rtx *this_result;
564   int i;
565   const char *fmt;
566 
567   switch (code)
568     {
569     case CONST:
570     case LABEL_REF:
571     case SYMBOL_REF:
572     CASE_CONST_ANY:
573     case CLOBBER:
574     case CLOBBER_HIGH:
575       return 0;
576 
577     case SET:
578       /* If the destination is anything other than CC0, PC, a REG or a SUBREG
579 	 of a REG that occupies all of the REG, the insn uses DEST if
580 	 it is mentioned in the destination or the source.  Otherwise, we
581 	 need just check the source.  */
582       if (GET_CODE (SET_DEST (x)) != CC0
583 	  && GET_CODE (SET_DEST (x)) != PC
584 	  && !REG_P (SET_DEST (x))
585 	  && ! (GET_CODE (SET_DEST (x)) == SUBREG
586 		&& REG_P (SUBREG_REG (SET_DEST (x)))
587 		&& !read_modify_subreg_p (SET_DEST (x))))
588 	break;
589 
590       return find_single_use_1 (dest, &SET_SRC (x));
591 
592     case MEM:
593     case SUBREG:
594       return find_single_use_1 (dest, &XEXP (x, 0));
595 
596     default:
597       break;
598     }
599 
600   /* If it wasn't one of the common cases above, check each expression and
601      vector of this code.  Look for a unique usage of DEST.  */
602 
603   fmt = GET_RTX_FORMAT (code);
604   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
605     {
606       if (fmt[i] == 'e')
607 	{
608 	  if (dest == XEXP (x, i)
609 	      || (REG_P (dest) && REG_P (XEXP (x, i))
610 		  && REGNO (dest) == REGNO (XEXP (x, i))))
611 	    this_result = loc;
612 	  else
613 	    this_result = find_single_use_1 (dest, &XEXP (x, i));
614 
615 	  if (result == NULL)
616 	    result = this_result;
617 	  else if (this_result)
618 	    /* Duplicate usage.  */
619 	    return NULL;
620 	}
621       else if (fmt[i] == 'E')
622 	{
623 	  int j;
624 
625 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
626 	    {
627 	      if (XVECEXP (x, i, j) == dest
628 		  || (REG_P (dest)
629 		      && REG_P (XVECEXP (x, i, j))
630 		      && REGNO (XVECEXP (x, i, j)) == REGNO (dest)))
631 		this_result = loc;
632 	      else
633 		this_result = find_single_use_1 (dest, &XVECEXP (x, i, j));
634 
635 	      if (result == NULL)
636 		result = this_result;
637 	      else if (this_result)
638 		return NULL;
639 	    }
640 	}
641     }
642 
643   return result;
644 }
645 
646 
647 /* See if DEST, produced in INSN, is used only a single time in the
648    sequel.  If so, return a pointer to the innermost rtx expression in which
649    it is used.
650 
651    If PLOC is nonzero, *PLOC is set to the insn containing the single use.
652 
653    If DEST is cc0_rtx, we look only at the next insn.  In that case, we don't
654    care about REG_DEAD notes or LOG_LINKS.
655 
656    Otherwise, we find the single use by finding an insn that has a
657    LOG_LINKS pointing at INSN and has a REG_DEAD note for DEST.  If DEST is
658    only referenced once in that insn, we know that it must be the first
659    and last insn referencing DEST.  */
660 
661 static rtx *
find_single_use(rtx dest,rtx_insn * insn,rtx_insn ** ploc)662 find_single_use (rtx dest, rtx_insn *insn, rtx_insn **ploc)
663 {
664   basic_block bb;
665   rtx_insn *next;
666   rtx *result;
667   struct insn_link *link;
668 
669   if (dest == cc0_rtx)
670     {
671       next = NEXT_INSN (insn);
672       if (next == 0
673 	  || (!NONJUMP_INSN_P (next) && !JUMP_P (next)))
674 	return 0;
675 
676       result = find_single_use_1 (dest, &PATTERN (next));
677       if (result && ploc)
678 	*ploc = next;
679       return result;
680     }
681 
682   if (!REG_P (dest))
683     return 0;
684 
685   bb = BLOCK_FOR_INSN (insn);
686   for (next = NEXT_INSN (insn);
687        next && BLOCK_FOR_INSN (next) == bb;
688        next = NEXT_INSN (next))
689     if (NONDEBUG_INSN_P (next) && dead_or_set_p (next, dest))
690       {
691 	FOR_EACH_LOG_LINK (link, next)
692 	  if (link->insn == insn && link->regno == REGNO (dest))
693 	    break;
694 
695 	if (link)
696 	  {
697 	    result = find_single_use_1 (dest, &PATTERN (next));
698 	    if (ploc)
699 	      *ploc = next;
700 	    return result;
701 	  }
702       }
703 
704   return 0;
705 }
706 
707 /* Substitute NEWVAL, an rtx expression, into INTO, a place in some
708    insn.  The substitution can be undone by undo_all.  If INTO is already
709    set to NEWVAL, do not record this change.  Because computing NEWVAL might
710    also call SUBST, we have to compute it before we put anything into
711    the undo table.  */
712 
713 static void
do_SUBST(rtx * into,rtx newval)714 do_SUBST (rtx *into, rtx newval)
715 {
716   struct undo *buf;
717   rtx oldval = *into;
718 
719   if (oldval == newval)
720     return;
721 
722   /* We'd like to catch as many invalid transformations here as
723      possible.  Unfortunately, there are way too many mode changes
724      that are perfectly valid, so we'd waste too much effort for
725      little gain doing the checks here.  Focus on catching invalid
726      transformations involving integer constants.  */
727   if (GET_MODE_CLASS (GET_MODE (oldval)) == MODE_INT
728       && CONST_INT_P (newval))
729     {
730       /* Sanity check that we're replacing oldval with a CONST_INT
731 	 that is a valid sign-extension for the original mode.  */
732       gcc_assert (INTVAL (newval)
733 		  == trunc_int_for_mode (INTVAL (newval), GET_MODE (oldval)));
734 
735       /* Replacing the operand of a SUBREG or a ZERO_EXTEND with a
736 	 CONST_INT is not valid, because after the replacement, the
737 	 original mode would be gone.  Unfortunately, we can't tell
738 	 when do_SUBST is called to replace the operand thereof, so we
739 	 perform this test on oldval instead, checking whether an
740 	 invalid replacement took place before we got here.  */
741       gcc_assert (!(GET_CODE (oldval) == SUBREG
742 		    && CONST_INT_P (SUBREG_REG (oldval))));
743       gcc_assert (!(GET_CODE (oldval) == ZERO_EXTEND
744 		    && CONST_INT_P (XEXP (oldval, 0))));
745     }
746 
747   if (undobuf.frees)
748     buf = undobuf.frees, undobuf.frees = buf->next;
749   else
750     buf = XNEW (struct undo);
751 
752   buf->kind = UNDO_RTX;
753   buf->where.r = into;
754   buf->old_contents.r = oldval;
755   *into = newval;
756 
757   buf->next = undobuf.undos, undobuf.undos = buf;
758 }
759 
760 #define SUBST(INTO, NEWVAL)	do_SUBST (&(INTO), (NEWVAL))
761 
762 /* Similar to SUBST, but NEWVAL is an int expression.  Note that substitution
763    for the value of a HOST_WIDE_INT value (including CONST_INT) is
764    not safe.  */
765 
766 static void
do_SUBST_INT(int * into,int newval)767 do_SUBST_INT (int *into, int newval)
768 {
769   struct undo *buf;
770   int oldval = *into;
771 
772   if (oldval == newval)
773     return;
774 
775   if (undobuf.frees)
776     buf = undobuf.frees, undobuf.frees = buf->next;
777   else
778     buf = XNEW (struct undo);
779 
780   buf->kind = UNDO_INT;
781   buf->where.i = into;
782   buf->old_contents.i = oldval;
783   *into = newval;
784 
785   buf->next = undobuf.undos, undobuf.undos = buf;
786 }
787 
788 #define SUBST_INT(INTO, NEWVAL)  do_SUBST_INT (&(INTO), (NEWVAL))
789 
790 /* Similar to SUBST, but just substitute the mode.  This is used when
791    changing the mode of a pseudo-register, so that any other
792    references to the entry in the regno_reg_rtx array will change as
793    well.  */
794 
795 static void
do_SUBST_MODE(rtx * into,machine_mode newval)796 do_SUBST_MODE (rtx *into, machine_mode newval)
797 {
798   struct undo *buf;
799   machine_mode oldval = GET_MODE (*into);
800 
801   if (oldval == newval)
802     return;
803 
804   if (undobuf.frees)
805     buf = undobuf.frees, undobuf.frees = buf->next;
806   else
807     buf = XNEW (struct undo);
808 
809   buf->kind = UNDO_MODE;
810   buf->where.r = into;
811   buf->old_contents.m = oldval;
812   adjust_reg_mode (*into, newval);
813 
814   buf->next = undobuf.undos, undobuf.undos = buf;
815 }
816 
817 #define SUBST_MODE(INTO, NEWVAL)  do_SUBST_MODE (&(INTO), (NEWVAL))
818 
819 /* Similar to SUBST, but NEWVAL is a LOG_LINKS expression.  */
820 
821 static void
do_SUBST_LINK(struct insn_link ** into,struct insn_link * newval)822 do_SUBST_LINK (struct insn_link **into, struct insn_link *newval)
823 {
824   struct undo *buf;
825   struct insn_link * oldval = *into;
826 
827   if (oldval == newval)
828     return;
829 
830   if (undobuf.frees)
831     buf = undobuf.frees, undobuf.frees = buf->next;
832   else
833     buf = XNEW (struct undo);
834 
835   buf->kind = UNDO_LINKS;
836   buf->where.l = into;
837   buf->old_contents.l = oldval;
838   *into = newval;
839 
840   buf->next = undobuf.undos, undobuf.undos = buf;
841 }
842 
843 #define SUBST_LINK(oldval, newval) do_SUBST_LINK (&oldval, newval)
844 
845 /* Subroutine of try_combine.  Determine whether the replacement patterns
846    NEWPAT, NEWI2PAT and NEWOTHERPAT are cheaper according to insn_cost
847    than the original sequence I0, I1, I2, I3 and undobuf.other_insn.  Note
848    that I0, I1 and/or NEWI2PAT may be NULL_RTX.  Similarly, NEWOTHERPAT and
849    undobuf.other_insn may also both be NULL_RTX.  Return false if the cost
850    of all the instructions can be estimated and the replacements are more
851    expensive than the original sequence.  */
852 
853 static bool
combine_validate_cost(rtx_insn * i0,rtx_insn * i1,rtx_insn * i2,rtx_insn * i3,rtx newpat,rtx newi2pat,rtx newotherpat)854 combine_validate_cost (rtx_insn *i0, rtx_insn *i1, rtx_insn *i2, rtx_insn *i3,
855 		       rtx newpat, rtx newi2pat, rtx newotherpat)
856 {
857   int i0_cost, i1_cost, i2_cost, i3_cost;
858   int new_i2_cost, new_i3_cost;
859   int old_cost, new_cost;
860 
861   /* Lookup the original insn_costs.  */
862   i2_cost = INSN_COST (i2);
863   i3_cost = INSN_COST (i3);
864 
865   if (i1)
866     {
867       i1_cost = INSN_COST (i1);
868       if (i0)
869 	{
870 	  i0_cost = INSN_COST (i0);
871 	  old_cost = (i0_cost > 0 && i1_cost > 0 && i2_cost > 0 && i3_cost > 0
872 		      ? i0_cost + i1_cost + i2_cost + i3_cost : 0);
873 	}
874       else
875 	{
876 	  old_cost = (i1_cost > 0 && i2_cost > 0 && i3_cost > 0
877 		      ? i1_cost + i2_cost + i3_cost : 0);
878 	  i0_cost = 0;
879 	}
880     }
881   else
882     {
883       old_cost = (i2_cost > 0 && i3_cost > 0) ? i2_cost + i3_cost : 0;
884       i1_cost = i0_cost = 0;
885     }
886 
887   /* If we have split a PARALLEL I2 to I1,I2, we have counted its cost twice;
888      correct that.  */
889   if (old_cost && i1 && INSN_UID (i1) == INSN_UID (i2))
890     old_cost -= i1_cost;
891 
892 
893   /* Calculate the replacement insn_costs.  */
894   rtx tmp = PATTERN (i3);
895   PATTERN (i3) = newpat;
896   int tmpi = INSN_CODE (i3);
897   INSN_CODE (i3) = -1;
898   new_i3_cost = insn_cost (i3, optimize_this_for_speed_p);
899   PATTERN (i3) = tmp;
900   INSN_CODE (i3) = tmpi;
901   if (newi2pat)
902     {
903       tmp = PATTERN (i2);
904       PATTERN (i2) = newi2pat;
905       tmpi = INSN_CODE (i2);
906       INSN_CODE (i2) = -1;
907       new_i2_cost = insn_cost (i2, optimize_this_for_speed_p);
908       PATTERN (i2) = tmp;
909       INSN_CODE (i2) = tmpi;
910       new_cost = (new_i2_cost > 0 && new_i3_cost > 0)
911 		 ? new_i2_cost + new_i3_cost : 0;
912     }
913   else
914     {
915       new_cost = new_i3_cost;
916       new_i2_cost = 0;
917     }
918 
919   if (undobuf.other_insn)
920     {
921       int old_other_cost, new_other_cost;
922 
923       old_other_cost = INSN_COST (undobuf.other_insn);
924       tmp = PATTERN (undobuf.other_insn);
925       PATTERN (undobuf.other_insn) = newotherpat;
926       tmpi = INSN_CODE (undobuf.other_insn);
927       INSN_CODE (undobuf.other_insn) = -1;
928       new_other_cost = insn_cost (undobuf.other_insn,
929 				  optimize_this_for_speed_p);
930       PATTERN (undobuf.other_insn) = tmp;
931       INSN_CODE (undobuf.other_insn) = tmpi;
932       if (old_other_cost > 0 && new_other_cost > 0)
933 	{
934 	  old_cost += old_other_cost;
935 	  new_cost += new_other_cost;
936 	}
937       else
938 	old_cost = 0;
939     }
940 
941   /* Disallow this combination if both new_cost and old_cost are greater than
942      zero, and new_cost is greater than old cost.  */
943   int reject = old_cost > 0 && new_cost > old_cost;
944 
945   if (dump_file)
946     {
947       fprintf (dump_file, "%s combination of insns ",
948 	       reject ? "rejecting" : "allowing");
949       if (i0)
950 	fprintf (dump_file, "%d, ", INSN_UID (i0));
951       if (i1 && INSN_UID (i1) != INSN_UID (i2))
952 	fprintf (dump_file, "%d, ", INSN_UID (i1));
953       fprintf (dump_file, "%d and %d\n", INSN_UID (i2), INSN_UID (i3));
954 
955       fprintf (dump_file, "original costs ");
956       if (i0)
957 	fprintf (dump_file, "%d + ", i0_cost);
958       if (i1 && INSN_UID (i1) != INSN_UID (i2))
959 	fprintf (dump_file, "%d + ", i1_cost);
960       fprintf (dump_file, "%d + %d = %d\n", i2_cost, i3_cost, old_cost);
961 
962       if (newi2pat)
963 	fprintf (dump_file, "replacement costs %d + %d = %d\n",
964 		 new_i2_cost, new_i3_cost, new_cost);
965       else
966 	fprintf (dump_file, "replacement cost %d\n", new_cost);
967     }
968 
969   if (reject)
970     return false;
971 
972   /* Update the uid_insn_cost array with the replacement costs.  */
973   INSN_COST (i2) = new_i2_cost;
974   INSN_COST (i3) = new_i3_cost;
975   if (i1)
976     {
977       INSN_COST (i1) = 0;
978       if (i0)
979 	INSN_COST (i0) = 0;
980     }
981 
982   return true;
983 }
984 
985 
986 /* Delete any insns that copy a register to itself.
987    Return true if the CFG was changed.  */
988 
989 static bool
delete_noop_moves(void)990 delete_noop_moves (void)
991 {
992   rtx_insn *insn, *next;
993   basic_block bb;
994 
995   bool edges_deleted = false;
996 
997   FOR_EACH_BB_FN (bb, cfun)
998     {
999       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
1000 	{
1001 	  next = NEXT_INSN (insn);
1002 	  if (INSN_P (insn) && noop_move_p (insn))
1003 	    {
1004 	      if (dump_file)
1005 		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
1006 
1007 	      edges_deleted |= delete_insn_and_edges (insn);
1008 	    }
1009 	}
1010     }
1011 
1012   return edges_deleted;
1013 }
1014 
1015 
1016 /* Return false if we do not want to (or cannot) combine DEF.  */
1017 static bool
can_combine_def_p(df_ref def)1018 can_combine_def_p (df_ref def)
1019 {
1020   /* Do not consider if it is pre/post modification in MEM.  */
1021   if (DF_REF_FLAGS (def) & DF_REF_PRE_POST_MODIFY)
1022     return false;
1023 
1024   unsigned int regno = DF_REF_REGNO (def);
1025 
1026   /* Do not combine frame pointer adjustments.  */
1027   if ((regno == FRAME_POINTER_REGNUM
1028        && (!reload_completed || frame_pointer_needed))
1029       || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
1030 	  && regno == HARD_FRAME_POINTER_REGNUM
1031 	  && (!reload_completed || frame_pointer_needed))
1032       || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
1033 	  && regno == ARG_POINTER_REGNUM && fixed_regs[regno]))
1034     return false;
1035 
1036   return true;
1037 }
1038 
1039 /* Return false if we do not want to (or cannot) combine USE.  */
1040 static bool
can_combine_use_p(df_ref use)1041 can_combine_use_p (df_ref use)
1042 {
1043   /* Do not consider the usage of the stack pointer by function call.  */
1044   if (DF_REF_FLAGS (use) & DF_REF_CALL_STACK_USAGE)
1045     return false;
1046 
1047   return true;
1048 }
1049 
1050 /* Fill in log links field for all insns.  */
1051 
1052 static void
create_log_links(void)1053 create_log_links (void)
1054 {
1055   basic_block bb;
1056   rtx_insn **next_use;
1057   rtx_insn *insn;
1058   df_ref def, use;
1059 
1060   next_use = XCNEWVEC (rtx_insn *, max_reg_num ());
1061 
1062   /* Pass through each block from the end, recording the uses of each
1063      register and establishing log links when def is encountered.
1064      Note that we do not clear next_use array in order to save time,
1065      so we have to test whether the use is in the same basic block as def.
1066 
1067      There are a few cases below when we do not consider the definition or
1068      usage -- these are taken from original flow.c did. Don't ask me why it is
1069      done this way; I don't know and if it works, I don't want to know.  */
1070 
1071   FOR_EACH_BB_FN (bb, cfun)
1072     {
1073       FOR_BB_INSNS_REVERSE (bb, insn)
1074         {
1075           if (!NONDEBUG_INSN_P (insn))
1076             continue;
1077 
1078 	  /* Log links are created only once.  */
1079 	  gcc_assert (!LOG_LINKS (insn));
1080 
1081 	  FOR_EACH_INSN_DEF (def, insn)
1082             {
1083               unsigned int regno = DF_REF_REGNO (def);
1084               rtx_insn *use_insn;
1085 
1086               if (!next_use[regno])
1087                 continue;
1088 
1089 	      if (!can_combine_def_p (def))
1090 		continue;
1091 
1092 	      use_insn = next_use[regno];
1093 	      next_use[regno] = NULL;
1094 
1095 	      if (BLOCK_FOR_INSN (use_insn) != bb)
1096 		continue;
1097 
1098 	      /* flow.c claimed:
1099 
1100 		 We don't build a LOG_LINK for hard registers contained
1101 		 in ASM_OPERANDs.  If these registers get replaced,
1102 		 we might wind up changing the semantics of the insn,
1103 		 even if reload can make what appear to be valid
1104 		 assignments later.  */
1105 	      if (regno < FIRST_PSEUDO_REGISTER
1106 		  && asm_noperands (PATTERN (use_insn)) >= 0)
1107 		continue;
1108 
1109 	      /* Don't add duplicate links between instructions.  */
1110 	      struct insn_link *links;
1111 	      FOR_EACH_LOG_LINK (links, use_insn)
1112 	        if (insn == links->insn && regno == links->regno)
1113 		  break;
1114 
1115 	      if (!links)
1116 		LOG_LINKS (use_insn)
1117 		  = alloc_insn_link (insn, regno, LOG_LINKS (use_insn));
1118             }
1119 
1120 	  FOR_EACH_INSN_USE (use, insn)
1121 	    if (can_combine_use_p (use))
1122 	      next_use[DF_REF_REGNO (use)] = insn;
1123         }
1124     }
1125 
1126   free (next_use);
1127 }
1128 
1129 /* Walk the LOG_LINKS of insn B to see if we find a reference to A.  Return
1130    true if we found a LOG_LINK that proves that A feeds B.  This only works
1131    if there are no instructions between A and B which could have a link
1132    depending on A, since in that case we would not record a link for B.
1133    We also check the implicit dependency created by a cc0 setter/user
1134    pair.  */
1135 
1136 static bool
insn_a_feeds_b(rtx_insn * a,rtx_insn * b)1137 insn_a_feeds_b (rtx_insn *a, rtx_insn *b)
1138 {
1139   struct insn_link *links;
1140   FOR_EACH_LOG_LINK (links, b)
1141     if (links->insn == a)
1142       return true;
1143   if (HAVE_cc0 && sets_cc0_p (a))
1144     return true;
1145   return false;
1146 }
1147 
1148 /* Main entry point for combiner.  F is the first insn of the function.
1149    NREGS is the first unused pseudo-reg number.
1150 
1151    Return nonzero if the CFG was changed (e.g. if the combiner has
1152    turned an indirect jump instruction into a direct jump).  */
1153 static int
combine_instructions(rtx_insn * f,unsigned int nregs)1154 combine_instructions (rtx_insn *f, unsigned int nregs)
1155 {
1156   rtx_insn *insn, *next;
1157   rtx_insn *prev;
1158   struct insn_link *links, *nextlinks;
1159   rtx_insn *first;
1160   basic_block last_bb;
1161 
1162   int new_direct_jump_p = 0;
1163 
1164   for (first = f; first && !NONDEBUG_INSN_P (first); )
1165     first = NEXT_INSN (first);
1166   if (!first)
1167     return 0;
1168 
1169   combine_attempts = 0;
1170   combine_merges = 0;
1171   combine_extras = 0;
1172   combine_successes = 0;
1173 
1174   rtl_hooks = combine_rtl_hooks;
1175 
1176   reg_stat.safe_grow_cleared (nregs);
1177 
1178   init_recog_no_volatile ();
1179 
1180   /* Allocate array for insn info.  */
1181   max_uid_known = get_max_uid ();
1182   uid_log_links = XCNEWVEC (struct insn_link *, max_uid_known + 1);
1183   uid_insn_cost = XCNEWVEC (int, max_uid_known + 1);
1184   gcc_obstack_init (&insn_link_obstack);
1185 
1186   nonzero_bits_mode = int_mode_for_size (HOST_BITS_PER_WIDE_INT, 0).require ();
1187 
1188   /* Don't use reg_stat[].nonzero_bits when computing it.  This can cause
1189      problems when, for example, we have j <<= 1 in a loop.  */
1190 
1191   nonzero_sign_valid = 0;
1192   label_tick = label_tick_ebb_start = 1;
1193 
1194   /* Scan all SETs and see if we can deduce anything about what
1195      bits are known to be zero for some registers and how many copies
1196      of the sign bit are known to exist for those registers.
1197 
1198      Also set any known values so that we can use it while searching
1199      for what bits are known to be set.  */
1200 
1201   setup_incoming_promotions (first);
1202   /* Allow the entry block and the first block to fall into the same EBB.
1203      Conceptually the incoming promotions are assigned to the entry block.  */
1204   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1205 
1206   create_log_links ();
1207   FOR_EACH_BB_FN (this_basic_block, cfun)
1208     {
1209       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1210       last_call_luid = 0;
1211       mem_last_set = -1;
1212 
1213       label_tick++;
1214       if (!single_pred_p (this_basic_block)
1215 	  || single_pred (this_basic_block) != last_bb)
1216 	label_tick_ebb_start = label_tick;
1217       last_bb = this_basic_block;
1218 
1219       FOR_BB_INSNS (this_basic_block, insn)
1220         if (INSN_P (insn) && BLOCK_FOR_INSN (insn))
1221 	  {
1222             rtx links;
1223 
1224             subst_low_luid = DF_INSN_LUID (insn);
1225             subst_insn = insn;
1226 
1227 	    note_stores (PATTERN (insn), set_nonzero_bits_and_sign_copies,
1228 		         insn);
1229 	    record_dead_and_set_regs (insn);
1230 
1231 	    if (AUTO_INC_DEC)
1232 	      for (links = REG_NOTES (insn); links; links = XEXP (links, 1))
1233 		if (REG_NOTE_KIND (links) == REG_INC)
1234 		  set_nonzero_bits_and_sign_copies (XEXP (links, 0), NULL_RTX,
1235 						    insn);
1236 
1237 	    /* Record the current insn_cost of this instruction.  */
1238 	    if (NONJUMP_INSN_P (insn))
1239 	      INSN_COST (insn) = insn_cost (insn, optimize_this_for_speed_p);
1240 	    if (dump_file)
1241 	      {
1242 		fprintf (dump_file, "insn_cost %d for ", INSN_COST (insn));
1243 		dump_insn_slim (dump_file, insn);
1244 	      }
1245 	  }
1246     }
1247 
1248   nonzero_sign_valid = 1;
1249 
1250   /* Now scan all the insns in forward order.  */
1251   label_tick = label_tick_ebb_start = 1;
1252   init_reg_last ();
1253   setup_incoming_promotions (first);
1254   last_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
1255   int max_combine = PARAM_VALUE (PARAM_MAX_COMBINE_INSNS);
1256 
1257   FOR_EACH_BB_FN (this_basic_block, cfun)
1258     {
1259       rtx_insn *last_combined_insn = NULL;
1260 
1261       /* Ignore instruction combination in basic blocks that are going to
1262 	 be removed as unreachable anyway.  See PR82386.  */
1263       if (EDGE_COUNT (this_basic_block->preds) == 0)
1264 	continue;
1265 
1266       optimize_this_for_speed_p = optimize_bb_for_speed_p (this_basic_block);
1267       last_call_luid = 0;
1268       mem_last_set = -1;
1269 
1270       label_tick++;
1271       if (!single_pred_p (this_basic_block)
1272 	  || single_pred (this_basic_block) != last_bb)
1273 	label_tick_ebb_start = label_tick;
1274       last_bb = this_basic_block;
1275 
1276       rtl_profile_for_bb (this_basic_block);
1277       for (insn = BB_HEAD (this_basic_block);
1278 	   insn != NEXT_INSN (BB_END (this_basic_block));
1279 	   insn = next ? next : NEXT_INSN (insn))
1280 	{
1281 	  next = 0;
1282 	  if (!NONDEBUG_INSN_P (insn))
1283 	    continue;
1284 
1285 	  while (last_combined_insn
1286 		 && (!NONDEBUG_INSN_P (last_combined_insn)
1287 		     || last_combined_insn->deleted ()))
1288 	    last_combined_insn = PREV_INSN (last_combined_insn);
1289 	  if (last_combined_insn == NULL_RTX
1290 	      || BLOCK_FOR_INSN (last_combined_insn) != this_basic_block
1291 	      || DF_INSN_LUID (last_combined_insn) <= DF_INSN_LUID (insn))
1292 	    last_combined_insn = insn;
1293 
1294 	  /* See if we know about function return values before this
1295 	     insn based upon SUBREG flags.  */
1296 	  check_promoted_subreg (insn, PATTERN (insn));
1297 
1298 	  /* See if we can find hardregs and subreg of pseudos in
1299 	     narrower modes.  This could help turning TRUNCATEs
1300 	     into SUBREGs.  */
1301 	  note_uses (&PATTERN (insn), record_truncated_values, NULL);
1302 
1303 	  /* Try this insn with each insn it links back to.  */
1304 
1305 	  FOR_EACH_LOG_LINK (links, insn)
1306 	    if ((next = try_combine (insn, links->insn, NULL,
1307 				     NULL, &new_direct_jump_p,
1308 				     last_combined_insn)) != 0)
1309 	      {
1310 		statistics_counter_event (cfun, "two-insn combine", 1);
1311 		goto retry;
1312 	      }
1313 
1314 	  /* Try each sequence of three linked insns ending with this one.  */
1315 
1316 	  if (max_combine >= 3)
1317 	    FOR_EACH_LOG_LINK (links, insn)
1318 	      {
1319 		rtx_insn *link = links->insn;
1320 
1321 		/* If the linked insn has been replaced by a note, then there
1322 		   is no point in pursuing this chain any further.  */
1323 		if (NOTE_P (link))
1324 		  continue;
1325 
1326 		FOR_EACH_LOG_LINK (nextlinks, link)
1327 		  if ((next = try_combine (insn, link, nextlinks->insn,
1328 					   NULL, &new_direct_jump_p,
1329 					   last_combined_insn)) != 0)
1330 		    {
1331 		      statistics_counter_event (cfun, "three-insn combine", 1);
1332 		      goto retry;
1333 		    }
1334 	      }
1335 
1336 	  /* Try to combine a jump insn that uses CC0
1337 	     with a preceding insn that sets CC0, and maybe with its
1338 	     logical predecessor as well.
1339 	     This is how we make decrement-and-branch insns.
1340 	     We need this special code because data flow connections
1341 	     via CC0 do not get entered in LOG_LINKS.  */
1342 
1343 	  if (HAVE_cc0
1344 	      && JUMP_P (insn)
1345 	      && (prev = prev_nonnote_insn (insn)) != 0
1346 	      && NONJUMP_INSN_P (prev)
1347 	      && sets_cc0_p (PATTERN (prev)))
1348 	    {
1349 	      if ((next = try_combine (insn, prev, NULL, NULL,
1350 				       &new_direct_jump_p,
1351 				       last_combined_insn)) != 0)
1352 		goto retry;
1353 
1354 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1355 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1356 					   NULL, &new_direct_jump_p,
1357 					   last_combined_insn)) != 0)
1358 		    goto retry;
1359 	    }
1360 
1361 	  /* Do the same for an insn that explicitly references CC0.  */
1362 	  if (HAVE_cc0 && NONJUMP_INSN_P (insn)
1363 	      && (prev = prev_nonnote_insn (insn)) != 0
1364 	      && NONJUMP_INSN_P (prev)
1365 	      && sets_cc0_p (PATTERN (prev))
1366 	      && GET_CODE (PATTERN (insn)) == SET
1367 	      && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (insn))))
1368 	    {
1369 	      if ((next = try_combine (insn, prev, NULL, NULL,
1370 				       &new_direct_jump_p,
1371 				       last_combined_insn)) != 0)
1372 		goto retry;
1373 
1374 	      FOR_EACH_LOG_LINK (nextlinks, prev)
1375 		  if ((next = try_combine (insn, prev, nextlinks->insn,
1376 					   NULL, &new_direct_jump_p,
1377 					   last_combined_insn)) != 0)
1378 		    goto retry;
1379 	    }
1380 
1381 	  /* Finally, see if any of the insns that this insn links to
1382 	     explicitly references CC0.  If so, try this insn, that insn,
1383 	     and its predecessor if it sets CC0.  */
1384 	  if (HAVE_cc0)
1385 	    {
1386 	      FOR_EACH_LOG_LINK (links, insn)
1387 		if (NONJUMP_INSN_P (links->insn)
1388 		    && GET_CODE (PATTERN (links->insn)) == SET
1389 		    && reg_mentioned_p (cc0_rtx, SET_SRC (PATTERN (links->insn)))
1390 		    && (prev = prev_nonnote_insn (links->insn)) != 0
1391 		    && NONJUMP_INSN_P (prev)
1392 		    && sets_cc0_p (PATTERN (prev))
1393 		    && (next = try_combine (insn, links->insn,
1394 					    prev, NULL, &new_direct_jump_p,
1395 					    last_combined_insn)) != 0)
1396 		  goto retry;
1397 	    }
1398 
1399 	  /* Try combining an insn with two different insns whose results it
1400 	     uses.  */
1401 	  if (max_combine >= 3)
1402 	    FOR_EACH_LOG_LINK (links, insn)
1403 	      for (nextlinks = links->next; nextlinks;
1404 		   nextlinks = nextlinks->next)
1405 		if ((next = try_combine (insn, links->insn,
1406 					 nextlinks->insn, NULL,
1407 					 &new_direct_jump_p,
1408 					 last_combined_insn)) != 0)
1409 
1410 		  {
1411 		    statistics_counter_event (cfun, "three-insn combine", 1);
1412 		    goto retry;
1413 		  }
1414 
1415 	  /* Try four-instruction combinations.  */
1416 	  if (max_combine >= 4)
1417 	    FOR_EACH_LOG_LINK (links, insn)
1418 	      {
1419 		struct insn_link *next1;
1420 		rtx_insn *link = links->insn;
1421 
1422 		/* If the linked insn has been replaced by a note, then there
1423 		   is no point in pursuing this chain any further.  */
1424 		if (NOTE_P (link))
1425 		  continue;
1426 
1427 		FOR_EACH_LOG_LINK (next1, link)
1428 		  {
1429 		    rtx_insn *link1 = next1->insn;
1430 		    if (NOTE_P (link1))
1431 		      continue;
1432 		    /* I0 -> I1 -> I2 -> I3.  */
1433 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1434 		      if ((next = try_combine (insn, link, link1,
1435 					       nextlinks->insn,
1436 					       &new_direct_jump_p,
1437 					       last_combined_insn)) != 0)
1438 			{
1439 			  statistics_counter_event (cfun, "four-insn combine", 1);
1440 			  goto retry;
1441 			}
1442 		    /* I0, I1 -> I2, I2 -> I3.  */
1443 		    for (nextlinks = next1->next; nextlinks;
1444 			 nextlinks = nextlinks->next)
1445 		      if ((next = try_combine (insn, link, link1,
1446 					       nextlinks->insn,
1447 					       &new_direct_jump_p,
1448 					       last_combined_insn)) != 0)
1449 			{
1450 			  statistics_counter_event (cfun, "four-insn combine", 1);
1451 			  goto retry;
1452 			}
1453 		  }
1454 
1455 		for (next1 = links->next; next1; next1 = next1->next)
1456 		  {
1457 		    rtx_insn *link1 = next1->insn;
1458 		    if (NOTE_P (link1))
1459 		      continue;
1460 		    /* I0 -> I2; I1, I2 -> I3.  */
1461 		    FOR_EACH_LOG_LINK (nextlinks, link)
1462 		      if ((next = try_combine (insn, link, link1,
1463 					       nextlinks->insn,
1464 					       &new_direct_jump_p,
1465 					       last_combined_insn)) != 0)
1466 			{
1467 			  statistics_counter_event (cfun, "four-insn combine", 1);
1468 			  goto retry;
1469 			}
1470 		    /* I0 -> I1; I1, I2 -> I3.  */
1471 		    FOR_EACH_LOG_LINK (nextlinks, link1)
1472 		      if ((next = try_combine (insn, link, link1,
1473 					       nextlinks->insn,
1474 					       &new_direct_jump_p,
1475 					       last_combined_insn)) != 0)
1476 			{
1477 			  statistics_counter_event (cfun, "four-insn combine", 1);
1478 			  goto retry;
1479 			}
1480 		  }
1481 	      }
1482 
1483 	  /* Try this insn with each REG_EQUAL note it links back to.  */
1484 	  FOR_EACH_LOG_LINK (links, insn)
1485 	    {
1486 	      rtx set, note;
1487 	      rtx_insn *temp = links->insn;
1488 	      if ((set = single_set (temp)) != 0
1489 		  && (note = find_reg_equal_equiv_note (temp)) != 0
1490 		  && (note = XEXP (note, 0), GET_CODE (note)) != EXPR_LIST
1491 		  /* Avoid using a register that may already been marked
1492 		     dead by an earlier instruction.  */
1493 		  && ! unmentioned_reg_p (note, SET_SRC (set))
1494 		  && (GET_MODE (note) == VOIDmode
1495 		      ? SCALAR_INT_MODE_P (GET_MODE (SET_DEST (set)))
1496 		      : (GET_MODE (SET_DEST (set)) == GET_MODE (note)
1497 			 && (GET_CODE (SET_DEST (set)) != ZERO_EXTRACT
1498 			     || (GET_MODE (XEXP (SET_DEST (set), 0))
1499 				 == GET_MODE (note))))))
1500 		{
1501 		  /* Temporarily replace the set's source with the
1502 		     contents of the REG_EQUAL note.  The insn will
1503 		     be deleted or recognized by try_combine.  */
1504 		  rtx orig_src = SET_SRC (set);
1505 		  rtx orig_dest = SET_DEST (set);
1506 		  if (GET_CODE (SET_DEST (set)) == ZERO_EXTRACT)
1507 		    SET_DEST (set) = XEXP (SET_DEST (set), 0);
1508 		  SET_SRC (set) = note;
1509 		  i2mod = temp;
1510 		  i2mod_old_rhs = copy_rtx (orig_src);
1511 		  i2mod_new_rhs = copy_rtx (note);
1512 		  next = try_combine (insn, i2mod, NULL, NULL,
1513 				      &new_direct_jump_p,
1514 				      last_combined_insn);
1515 		  i2mod = NULL;
1516 		  if (next)
1517 		    {
1518 		      statistics_counter_event (cfun, "insn-with-note combine", 1);
1519 		      goto retry;
1520 		    }
1521 		  SET_SRC (set) = orig_src;
1522 		  SET_DEST (set) = orig_dest;
1523 		}
1524 	    }
1525 
1526 	  if (!NOTE_P (insn))
1527 	    record_dead_and_set_regs (insn);
1528 
1529 retry:
1530 	  ;
1531 	}
1532     }
1533 
1534   default_rtl_profile ();
1535   clear_bb_flags ();
1536   new_direct_jump_p |= purge_all_dead_edges ();
1537   new_direct_jump_p |= delete_noop_moves ();
1538 
1539   /* Clean up.  */
1540   obstack_free (&insn_link_obstack, NULL);
1541   free (uid_log_links);
1542   free (uid_insn_cost);
1543   reg_stat.release ();
1544 
1545   {
1546     struct undo *undo, *next;
1547     for (undo = undobuf.frees; undo; undo = next)
1548       {
1549 	next = undo->next;
1550 	free (undo);
1551       }
1552     undobuf.frees = 0;
1553   }
1554 
1555   total_attempts += combine_attempts;
1556   total_merges += combine_merges;
1557   total_extras += combine_extras;
1558   total_successes += combine_successes;
1559 
1560   nonzero_sign_valid = 0;
1561   rtl_hooks = general_rtl_hooks;
1562 
1563   /* Make recognizer allow volatile MEMs again.  */
1564   init_recog ();
1565 
1566   return new_direct_jump_p;
1567 }
1568 
1569 /* Wipe the last_xxx fields of reg_stat in preparation for another pass.  */
1570 
1571 static void
init_reg_last(void)1572 init_reg_last (void)
1573 {
1574   unsigned int i;
1575   reg_stat_type *p;
1576 
1577   FOR_EACH_VEC_ELT (reg_stat, i, p)
1578     memset (p, 0, offsetof (reg_stat_type, sign_bit_copies));
1579 }
1580 
1581 /* Set up any promoted values for incoming argument registers.  */
1582 
1583 static void
setup_incoming_promotions(rtx_insn * first)1584 setup_incoming_promotions (rtx_insn *first)
1585 {
1586   tree arg;
1587   bool strictly_local = false;
1588 
1589   for (arg = DECL_ARGUMENTS (current_function_decl); arg;
1590        arg = DECL_CHAIN (arg))
1591     {
1592       rtx x, reg = DECL_INCOMING_RTL (arg);
1593       int uns1, uns3;
1594       machine_mode mode1, mode2, mode3, mode4;
1595 
1596       /* Only continue if the incoming argument is in a register.  */
1597       if (!REG_P (reg))
1598 	continue;
1599 
1600       /* Determine, if possible, whether all call sites of the current
1601          function lie within the current compilation unit.  (This does
1602 	 take into account the exporting of a function via taking its
1603 	 address, and so forth.)  */
1604       strictly_local = cgraph_node::local_info (current_function_decl)->local;
1605 
1606       /* The mode and signedness of the argument before any promotions happen
1607          (equal to the mode of the pseudo holding it at that stage).  */
1608       mode1 = TYPE_MODE (TREE_TYPE (arg));
1609       uns1 = TYPE_UNSIGNED (TREE_TYPE (arg));
1610 
1611       /* The mode and signedness of the argument after any source language and
1612          TARGET_PROMOTE_PROTOTYPES-driven promotions.  */
1613       mode2 = TYPE_MODE (DECL_ARG_TYPE (arg));
1614       uns3 = TYPE_UNSIGNED (DECL_ARG_TYPE (arg));
1615 
1616       /* The mode and signedness of the argument as it is actually passed,
1617          see assign_parm_setup_reg in function.c.  */
1618       mode3 = promote_function_mode (TREE_TYPE (arg), mode1, &uns3,
1619 				     TREE_TYPE (cfun->decl), 0);
1620 
1621       /* The mode of the register in which the argument is being passed.  */
1622       mode4 = GET_MODE (reg);
1623 
1624       /* Eliminate sign extensions in the callee when:
1625 	 (a) A mode promotion has occurred;  */
1626       if (mode1 == mode3)
1627 	continue;
1628       /* (b) The mode of the register is the same as the mode of
1629 	     the argument as it is passed; */
1630       if (mode3 != mode4)
1631 	continue;
1632       /* (c) There's no language level extension;  */
1633       if (mode1 == mode2)
1634 	;
1635       /* (c.1) All callers are from the current compilation unit.  If that's
1636 	 the case we don't have to rely on an ABI, we only have to know
1637 	 what we're generating right now, and we know that we will do the
1638 	 mode1 to mode2 promotion with the given sign.  */
1639       else if (!strictly_local)
1640 	continue;
1641       /* (c.2) The combination of the two promotions is useful.  This is
1642 	 true when the signs match, or if the first promotion is unsigned.
1643 	 In the later case, (sign_extend (zero_extend x)) is the same as
1644 	 (zero_extend (zero_extend x)), so make sure to force UNS3 true.  */
1645       else if (uns1)
1646 	uns3 = true;
1647       else if (uns3)
1648 	continue;
1649 
1650       /* Record that the value was promoted from mode1 to mode3,
1651 	 so that any sign extension at the head of the current
1652 	 function may be eliminated.  */
1653       x = gen_rtx_CLOBBER (mode1, const0_rtx);
1654       x = gen_rtx_fmt_e ((uns3 ? ZERO_EXTEND : SIGN_EXTEND), mode3, x);
1655       record_value_for_reg (reg, first, x);
1656     }
1657 }
1658 
1659 /* If MODE has a precision lower than PREC and SRC is a non-negative constant
1660    that would appear negative in MODE, sign-extend SRC for use in nonzero_bits
1661    because some machines (maybe most) will actually do the sign-extension and
1662    this is the conservative approach.
1663 
1664    ??? For 2.5, try to tighten up the MD files in this regard instead of this
1665    kludge.  */
1666 
1667 static rtx
sign_extend_short_imm(rtx src,machine_mode mode,unsigned int prec)1668 sign_extend_short_imm (rtx src, machine_mode mode, unsigned int prec)
1669 {
1670   scalar_int_mode int_mode;
1671   if (CONST_INT_P (src)
1672       && is_a <scalar_int_mode> (mode, &int_mode)
1673       && GET_MODE_PRECISION (int_mode) < prec
1674       && INTVAL (src) > 0
1675       && val_signbit_known_set_p (int_mode, INTVAL (src)))
1676     src = GEN_INT (INTVAL (src) | ~GET_MODE_MASK (int_mode));
1677 
1678   return src;
1679 }
1680 
1681 /* Update RSP for pseudo-register X from INSN's REG_EQUAL note (if one exists)
1682    and SET.  */
1683 
1684 static void
update_rsp_from_reg_equal(reg_stat_type * rsp,rtx_insn * insn,const_rtx set,rtx x)1685 update_rsp_from_reg_equal (reg_stat_type *rsp, rtx_insn *insn, const_rtx set,
1686 			   rtx x)
1687 {
1688   rtx reg_equal_note = insn ? find_reg_equal_equiv_note (insn) : NULL_RTX;
1689   unsigned HOST_WIDE_INT bits = 0;
1690   rtx reg_equal = NULL, src = SET_SRC (set);
1691   unsigned int num = 0;
1692 
1693   if (reg_equal_note)
1694     reg_equal = XEXP (reg_equal_note, 0);
1695 
1696   if (SHORT_IMMEDIATES_SIGN_EXTEND)
1697     {
1698       src = sign_extend_short_imm (src, GET_MODE (x), BITS_PER_WORD);
1699       if (reg_equal)
1700 	reg_equal = sign_extend_short_imm (reg_equal, GET_MODE (x), BITS_PER_WORD);
1701     }
1702 
1703   /* Don't call nonzero_bits if it cannot change anything.  */
1704   if (rsp->nonzero_bits != HOST_WIDE_INT_M1U)
1705     {
1706       machine_mode mode = GET_MODE (x);
1707       if (GET_MODE_CLASS (mode) == MODE_INT
1708 	  && HWI_COMPUTABLE_MODE_P (mode))
1709 	mode = nonzero_bits_mode;
1710       bits = nonzero_bits (src, mode);
1711       if (reg_equal && bits)
1712 	bits &= nonzero_bits (reg_equal, mode);
1713       rsp->nonzero_bits |= bits;
1714     }
1715 
1716   /* Don't call num_sign_bit_copies if it cannot change anything.  */
1717   if (rsp->sign_bit_copies != 1)
1718     {
1719       num = num_sign_bit_copies (SET_SRC (set), GET_MODE (x));
1720       if (reg_equal && maybe_ne (num, GET_MODE_PRECISION (GET_MODE (x))))
1721 	{
1722 	  unsigned int numeq = num_sign_bit_copies (reg_equal, GET_MODE (x));
1723 	  if (num == 0 || numeq > num)
1724 	    num = numeq;
1725 	}
1726       if (rsp->sign_bit_copies == 0 || num < rsp->sign_bit_copies)
1727 	rsp->sign_bit_copies = num;
1728     }
1729 }
1730 
1731 /* Called via note_stores.  If X is a pseudo that is narrower than
1732    HOST_BITS_PER_WIDE_INT and is being set, record what bits are known zero.
1733 
1734    If we are setting only a portion of X and we can't figure out what
1735    portion, assume all bits will be used since we don't know what will
1736    be happening.
1737 
1738    Similarly, set how many bits of X are known to be copies of the sign bit
1739    at all locations in the function.  This is the smallest number implied
1740    by any set of X.  */
1741 
1742 static void
set_nonzero_bits_and_sign_copies(rtx x,const_rtx set,void * data)1743 set_nonzero_bits_and_sign_copies (rtx x, const_rtx set, void *data)
1744 {
1745   rtx_insn *insn = (rtx_insn *) data;
1746   scalar_int_mode mode;
1747 
1748   if (REG_P (x)
1749       && REGNO (x) >= FIRST_PSEUDO_REGISTER
1750       /* If this register is undefined at the start of the file, we can't
1751 	 say what its contents were.  */
1752       && ! REGNO_REG_SET_P
1753 	   (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), REGNO (x))
1754       && is_a <scalar_int_mode> (GET_MODE (x), &mode)
1755       && HWI_COMPUTABLE_MODE_P (mode))
1756     {
1757       reg_stat_type *rsp = &reg_stat[REGNO (x)];
1758 
1759       if (set == 0 || GET_CODE (set) == CLOBBER)
1760 	{
1761 	  rsp->nonzero_bits = GET_MODE_MASK (mode);
1762 	  rsp->sign_bit_copies = 1;
1763 	  return;
1764 	}
1765 
1766       /* Should not happen as we only using pseduo registers.  */
1767       gcc_assert (GET_CODE (set) != CLOBBER_HIGH);
1768 
1769       /* If this register is being initialized using itself, and the
1770 	 register is uninitialized in this basic block, and there are
1771 	 no LOG_LINKS which set the register, then part of the
1772 	 register is uninitialized.  In that case we can't assume
1773 	 anything about the number of nonzero bits.
1774 
1775 	 ??? We could do better if we checked this in
1776 	 reg_{nonzero_bits,num_sign_bit_copies}_for_combine.  Then we
1777 	 could avoid making assumptions about the insn which initially
1778 	 sets the register, while still using the information in other
1779 	 insns.  We would have to be careful to check every insn
1780 	 involved in the combination.  */
1781 
1782       if (insn
1783 	  && reg_referenced_p (x, PATTERN (insn))
1784 	  && !REGNO_REG_SET_P (DF_LR_IN (BLOCK_FOR_INSN (insn)),
1785 			       REGNO (x)))
1786 	{
1787 	  struct insn_link *link;
1788 
1789 	  FOR_EACH_LOG_LINK (link, insn)
1790 	    if (dead_or_set_p (link->insn, x))
1791 	      break;
1792 	  if (!link)
1793 	    {
1794 	      rsp->nonzero_bits = GET_MODE_MASK (mode);
1795 	      rsp->sign_bit_copies = 1;
1796 	      return;
1797 	    }
1798 	}
1799 
1800       /* If this is a complex assignment, see if we can convert it into a
1801 	 simple assignment.  */
1802       set = expand_field_assignment (set);
1803 
1804       /* If this is a simple assignment, or we have a paradoxical SUBREG,
1805 	 set what we know about X.  */
1806 
1807       if (SET_DEST (set) == x
1808 	  || (paradoxical_subreg_p (SET_DEST (set))
1809 	      && SUBREG_REG (SET_DEST (set)) == x))
1810 	update_rsp_from_reg_equal (rsp, insn, set, x);
1811       else
1812 	{
1813 	  rsp->nonzero_bits = GET_MODE_MASK (mode);
1814 	  rsp->sign_bit_copies = 1;
1815 	}
1816     }
1817 }
1818 
1819 /* See if INSN can be combined into I3.  PRED, PRED2, SUCC and SUCC2 are
1820    optionally insns that were previously combined into I3 or that will be
1821    combined into the merger of INSN and I3.  The order is PRED, PRED2,
1822    INSN, SUCC, SUCC2, I3.
1823 
1824    Return 0 if the combination is not allowed for any reason.
1825 
1826    If the combination is allowed, *PDEST will be set to the single
1827    destination of INSN and *PSRC to the single source, and this function
1828    will return 1.  */
1829 
1830 static int
can_combine_p(rtx_insn * insn,rtx_insn * i3,rtx_insn * pred ATTRIBUTE_UNUSED,rtx_insn * pred2 ATTRIBUTE_UNUSED,rtx_insn * succ,rtx_insn * succ2,rtx * pdest,rtx * psrc)1831 can_combine_p (rtx_insn *insn, rtx_insn *i3, rtx_insn *pred ATTRIBUTE_UNUSED,
1832 	       rtx_insn *pred2 ATTRIBUTE_UNUSED, rtx_insn *succ, rtx_insn *succ2,
1833 	       rtx *pdest, rtx *psrc)
1834 {
1835   int i;
1836   const_rtx set = 0;
1837   rtx src, dest;
1838   rtx_insn *p;
1839   rtx link;
1840   bool all_adjacent = true;
1841   int (*is_volatile_p) (const_rtx);
1842 
1843   if (succ)
1844     {
1845       if (succ2)
1846 	{
1847 	  if (next_active_insn (succ2) != i3)
1848 	    all_adjacent = false;
1849 	  if (next_active_insn (succ) != succ2)
1850 	    all_adjacent = false;
1851 	}
1852       else if (next_active_insn (succ) != i3)
1853 	all_adjacent = false;
1854       if (next_active_insn (insn) != succ)
1855 	all_adjacent = false;
1856     }
1857   else if (next_active_insn (insn) != i3)
1858     all_adjacent = false;
1859 
1860   /* Can combine only if previous insn is a SET of a REG, a SUBREG or CC0.
1861      or a PARALLEL consisting of such a SET and CLOBBERs.
1862 
1863      If INSN has CLOBBER parallel parts, ignore them for our processing.
1864      By definition, these happen during the execution of the insn.  When it
1865      is merged with another insn, all bets are off.  If they are, in fact,
1866      needed and aren't also supplied in I3, they may be added by
1867      recog_for_combine.  Otherwise, it won't match.
1868 
1869      We can also ignore a SET whose SET_DEST is mentioned in a REG_UNUSED
1870      note.
1871 
1872      Get the source and destination of INSN.  If more than one, can't
1873      combine.  */
1874 
1875   if (GET_CODE (PATTERN (insn)) == SET)
1876     set = PATTERN (insn);
1877   else if (GET_CODE (PATTERN (insn)) == PARALLEL
1878 	   && GET_CODE (XVECEXP (PATTERN (insn), 0, 0)) == SET)
1879     {
1880       for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
1881 	{
1882 	  rtx elt = XVECEXP (PATTERN (insn), 0, i);
1883 
1884 	  switch (GET_CODE (elt))
1885 	    {
1886 	    /* This is important to combine floating point insns
1887 	       for the SH4 port.  */
1888 	    case USE:
1889 	      /* Combining an isolated USE doesn't make sense.
1890 		 We depend here on combinable_i3pat to reject them.  */
1891 	      /* The code below this loop only verifies that the inputs of
1892 		 the SET in INSN do not change.  We call reg_set_between_p
1893 		 to verify that the REG in the USE does not change between
1894 		 I3 and INSN.
1895 		 If the USE in INSN was for a pseudo register, the matching
1896 		 insn pattern will likely match any register; combining this
1897 		 with any other USE would only be safe if we knew that the
1898 		 used registers have identical values, or if there was
1899 		 something to tell them apart, e.g. different modes.  For
1900 		 now, we forgo such complicated tests and simply disallow
1901 		 combining of USES of pseudo registers with any other USE.  */
1902 	      if (REG_P (XEXP (elt, 0))
1903 		  && GET_CODE (PATTERN (i3)) == PARALLEL)
1904 		{
1905 		  rtx i3pat = PATTERN (i3);
1906 		  int i = XVECLEN (i3pat, 0) - 1;
1907 		  unsigned int regno = REGNO (XEXP (elt, 0));
1908 
1909 		  do
1910 		    {
1911 		      rtx i3elt = XVECEXP (i3pat, 0, i);
1912 
1913 		      if (GET_CODE (i3elt) == USE
1914 			  && REG_P (XEXP (i3elt, 0))
1915 			  && (REGNO (XEXP (i3elt, 0)) == regno
1916 			      ? reg_set_between_p (XEXP (elt, 0),
1917 						   PREV_INSN (insn), i3)
1918 			      : regno >= FIRST_PSEUDO_REGISTER))
1919 			return 0;
1920 		    }
1921 		  while (--i >= 0);
1922 		}
1923 	      break;
1924 
1925 	      /* We can ignore CLOBBERs.  */
1926 	    case CLOBBER:
1927 	    case CLOBBER_HIGH:
1928 	      break;
1929 
1930 	    case SET:
1931 	      /* Ignore SETs whose result isn't used but not those that
1932 		 have side-effects.  */
1933 	      if (find_reg_note (insn, REG_UNUSED, SET_DEST (elt))
1934 		  && insn_nothrow_p (insn)
1935 		  && !side_effects_p (elt))
1936 		break;
1937 
1938 	      /* If we have already found a SET, this is a second one and
1939 		 so we cannot combine with this insn.  */
1940 	      if (set)
1941 		return 0;
1942 
1943 	      set = elt;
1944 	      break;
1945 
1946 	    default:
1947 	      /* Anything else means we can't combine.  */
1948 	      return 0;
1949 	    }
1950 	}
1951 
1952       if (set == 0
1953 	  /* If SET_SRC is an ASM_OPERANDS we can't throw away these CLOBBERs,
1954 	     so don't do anything with it.  */
1955 	  || GET_CODE (SET_SRC (set)) == ASM_OPERANDS)
1956 	return 0;
1957     }
1958   else
1959     return 0;
1960 
1961   if (set == 0)
1962     return 0;
1963 
1964   /* The simplification in expand_field_assignment may call back to
1965      get_last_value, so set safe guard here.  */
1966   subst_low_luid = DF_INSN_LUID (insn);
1967 
1968   set = expand_field_assignment (set);
1969   src = SET_SRC (set), dest = SET_DEST (set);
1970 
1971   /* Do not eliminate user-specified register if it is in an
1972      asm input because we may break the register asm usage defined
1973      in GCC manual if allow to do so.
1974      Be aware that this may cover more cases than we expect but this
1975      should be harmless.  */
1976   if (REG_P (dest) && REG_USERVAR_P (dest) && HARD_REGISTER_P (dest)
1977       && extract_asm_operands (PATTERN (i3)))
1978     return 0;
1979 
1980   /* Don't eliminate a store in the stack pointer.  */
1981   if (dest == stack_pointer_rtx
1982       /* Don't combine with an insn that sets a register to itself if it has
1983 	 a REG_EQUAL note.  This may be part of a LIBCALL sequence.  */
1984       || (rtx_equal_p (src, dest) && find_reg_note (insn, REG_EQUAL, NULL_RTX))
1985       /* Can't merge an ASM_OPERANDS.  */
1986       || GET_CODE (src) == ASM_OPERANDS
1987       /* Can't merge a function call.  */
1988       || GET_CODE (src) == CALL
1989       /* Don't eliminate a function call argument.  */
1990       || (CALL_P (i3)
1991 	  && (find_reg_fusage (i3, USE, dest)
1992 	      || (REG_P (dest)
1993 		  && REGNO (dest) < FIRST_PSEUDO_REGISTER
1994 		  && global_regs[REGNO (dest)])))
1995       /* Don't substitute into an incremented register.  */
1996       || FIND_REG_INC_NOTE (i3, dest)
1997       || (succ && FIND_REG_INC_NOTE (succ, dest))
1998       || (succ2 && FIND_REG_INC_NOTE (succ2, dest))
1999       /* Don't substitute into a non-local goto, this confuses CFG.  */
2000       || (JUMP_P (i3) && find_reg_note (i3, REG_NON_LOCAL_GOTO, NULL_RTX))
2001       /* Make sure that DEST is not used after INSN but before SUCC, or
2002 	 after SUCC and before SUCC2, or after SUCC2 but before I3.  */
2003       || (!all_adjacent
2004 	  && ((succ2
2005 	       && (reg_used_between_p (dest, succ2, i3)
2006 		   || reg_used_between_p (dest, succ, succ2)))
2007 	      || (!succ2 && succ && reg_used_between_p (dest, succ, i3))
2008 	      || (!succ2 && !succ && reg_used_between_p (dest, insn, i3))
2009 	      || (succ
2010 		  /* SUCC and SUCC2 can be split halves from a PARALLEL; in
2011 		     that case SUCC is not in the insn stream, so use SUCC2
2012 		     instead for this test.  */
2013 		  && reg_used_between_p (dest, insn,
2014 					 succ2
2015 					 && INSN_UID (succ) == INSN_UID (succ2)
2016 					 ? succ2 : succ))))
2017       /* Make sure that the value that is to be substituted for the register
2018 	 does not use any registers whose values alter in between.  However,
2019 	 If the insns are adjacent, a use can't cross a set even though we
2020 	 think it might (this can happen for a sequence of insns each setting
2021 	 the same destination; last_set of that register might point to
2022 	 a NOTE).  If INSN has a REG_EQUIV note, the register is always
2023 	 equivalent to the memory so the substitution is valid even if there
2024 	 are intervening stores.  Also, don't move a volatile asm or
2025 	 UNSPEC_VOLATILE across any other insns.  */
2026       || (! all_adjacent
2027 	  && (((!MEM_P (src)
2028 		|| ! find_reg_note (insn, REG_EQUIV, src))
2029 	       && modified_between_p (src, insn, i3))
2030 	      || (GET_CODE (src) == ASM_OPERANDS && MEM_VOLATILE_P (src))
2031 	      || GET_CODE (src) == UNSPEC_VOLATILE))
2032       /* Don't combine across a CALL_INSN, because that would possibly
2033 	 change whether the life span of some REGs crosses calls or not,
2034 	 and it is a pain to update that information.
2035 	 Exception: if source is a constant, moving it later can't hurt.
2036 	 Accept that as a special case.  */
2037       || (DF_INSN_LUID (insn) < last_call_luid && ! CONSTANT_P (src)))
2038     return 0;
2039 
2040   /* DEST must either be a REG or CC0.  */
2041   if (REG_P (dest))
2042     {
2043       /* If register alignment is being enforced for multi-word items in all
2044 	 cases except for parameters, it is possible to have a register copy
2045 	 insn referencing a hard register that is not allowed to contain the
2046 	 mode being copied and which would not be valid as an operand of most
2047 	 insns.  Eliminate this problem by not combining with such an insn.
2048 
2049 	 Also, on some machines we don't want to extend the life of a hard
2050 	 register.  */
2051 
2052       if (REG_P (src)
2053 	  && ((REGNO (dest) < FIRST_PSEUDO_REGISTER
2054 	       && !targetm.hard_regno_mode_ok (REGNO (dest), GET_MODE (dest)))
2055 	      /* Don't extend the life of a hard register unless it is
2056 		 user variable (if we have few registers) or it can't
2057 		 fit into the desired register (meaning something special
2058 		 is going on).
2059 		 Also avoid substituting a return register into I3, because
2060 		 reload can't handle a conflict with constraints of other
2061 		 inputs.  */
2062 	      || (REGNO (src) < FIRST_PSEUDO_REGISTER
2063 		  && !targetm.hard_regno_mode_ok (REGNO (src),
2064 						  GET_MODE (src)))))
2065 	return 0;
2066     }
2067   else if (GET_CODE (dest) != CC0)
2068     return 0;
2069 
2070 
2071   if (GET_CODE (PATTERN (i3)) == PARALLEL)
2072     for (i = XVECLEN (PATTERN (i3), 0) - 1; i >= 0; i--)
2073       if (GET_CODE (XVECEXP (PATTERN (i3), 0, i)) == CLOBBER)
2074 	{
2075 	  rtx reg = XEXP (XVECEXP (PATTERN (i3), 0, i), 0);
2076 
2077 	  /* If the clobber represents an earlyclobber operand, we must not
2078 	     substitute an expression containing the clobbered register.
2079 	     As we do not analyze the constraint strings here, we have to
2080 	     make the conservative assumption.  However, if the register is
2081 	     a fixed hard reg, the clobber cannot represent any operand;
2082 	     we leave it up to the machine description to either accept or
2083 	     reject use-and-clobber patterns.  */
2084 	  if (!REG_P (reg)
2085 	      || REGNO (reg) >= FIRST_PSEUDO_REGISTER
2086 	      || !fixed_regs[REGNO (reg)])
2087 	    if (reg_overlap_mentioned_p (reg, src))
2088 	      return 0;
2089 	}
2090 
2091   /* If INSN contains anything volatile, or is an `asm' (whether volatile
2092      or not), reject, unless nothing volatile comes between it and I3 */
2093 
2094   if (GET_CODE (src) == ASM_OPERANDS || volatile_refs_p (src))
2095     {
2096       /* Make sure neither succ nor succ2 contains a volatile reference.  */
2097       if (succ2 != 0 && volatile_refs_p (PATTERN (succ2)))
2098 	return 0;
2099       if (succ != 0 && volatile_refs_p (PATTERN (succ)))
2100 	return 0;
2101       /* We'll check insns between INSN and I3 below.  */
2102     }
2103 
2104   /* If INSN is an asm, and DEST is a hard register, reject, since it has
2105      to be an explicit register variable, and was chosen for a reason.  */
2106 
2107   if (GET_CODE (src) == ASM_OPERANDS
2108       && REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER)
2109     return 0;
2110 
2111   /* If INSN contains volatile references (specifically volatile MEMs),
2112      we cannot combine across any other volatile references.
2113      Even if INSN doesn't contain volatile references, any intervening
2114      volatile insn might affect machine state.  */
2115 
2116   is_volatile_p = volatile_refs_p (PATTERN (insn))
2117     ? volatile_refs_p
2118     : volatile_insn_p;
2119 
2120   for (p = NEXT_INSN (insn); p != i3; p = NEXT_INSN (p))
2121     if (INSN_P (p) && p != succ && p != succ2 && is_volatile_p (PATTERN (p)))
2122       return 0;
2123 
2124   /* If INSN contains an autoincrement or autodecrement, make sure that
2125      register is not used between there and I3, and not already used in
2126      I3 either.  Neither must it be used in PRED or SUCC, if they exist.
2127      Also insist that I3 not be a jump; if it were one
2128      and the incremented register were spilled, we would lose.  */
2129 
2130   if (AUTO_INC_DEC)
2131     for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2132       if (REG_NOTE_KIND (link) == REG_INC
2133 	  && (JUMP_P (i3)
2134 	      || reg_used_between_p (XEXP (link, 0), insn, i3)
2135 	      || (pred != NULL_RTX
2136 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred)))
2137 	      || (pred2 != NULL_RTX
2138 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (pred2)))
2139 	      || (succ != NULL_RTX
2140 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ)))
2141 	      || (succ2 != NULL_RTX
2142 		  && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (succ2)))
2143 	      || reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i3))))
2144 	return 0;
2145 
2146   /* Don't combine an insn that follows a CC0-setting insn.
2147      An insn that uses CC0 must not be separated from the one that sets it.
2148      We do, however, allow I2 to follow a CC0-setting insn if that insn
2149      is passed as I1; in that case it will be deleted also.
2150      We also allow combining in this case if all the insns are adjacent
2151      because that would leave the two CC0 insns adjacent as well.
2152      It would be more logical to test whether CC0 occurs inside I1 or I2,
2153      but that would be much slower, and this ought to be equivalent.  */
2154 
2155   if (HAVE_cc0)
2156     {
2157       p = prev_nonnote_insn (insn);
2158       if (p && p != pred && NONJUMP_INSN_P (p) && sets_cc0_p (PATTERN (p))
2159 	  && ! all_adjacent)
2160 	return 0;
2161     }
2162 
2163   /* If we get here, we have passed all the tests and the combination is
2164      to be allowed.  */
2165 
2166   *pdest = dest;
2167   *psrc = src;
2168 
2169   return 1;
2170 }
2171 
2172 /* LOC is the location within I3 that contains its pattern or the component
2173    of a PARALLEL of the pattern.  We validate that it is valid for combining.
2174 
2175    One problem is if I3 modifies its output, as opposed to replacing it
2176    entirely, we can't allow the output to contain I2DEST, I1DEST or I0DEST as
2177    doing so would produce an insn that is not equivalent to the original insns.
2178 
2179    Consider:
2180 
2181 	 (set (reg:DI 101) (reg:DI 100))
2182 	 (set (subreg:SI (reg:DI 101) 0) <foo>)
2183 
2184    This is NOT equivalent to:
2185 
2186 	 (parallel [(set (subreg:SI (reg:DI 100) 0) <foo>)
2187 		    (set (reg:DI 101) (reg:DI 100))])
2188 
2189    Not only does this modify 100 (in which case it might still be valid
2190    if 100 were dead in I2), it sets 101 to the ORIGINAL value of 100.
2191 
2192    We can also run into a problem if I2 sets a register that I1
2193    uses and I1 gets directly substituted into I3 (not via I2).  In that
2194    case, we would be getting the wrong value of I2DEST into I3, so we
2195    must reject the combination.  This case occurs when I2 and I1 both
2196    feed into I3, rather than when I1 feeds into I2, which feeds into I3.
2197    If I1_NOT_IN_SRC is nonzero, it means that finding I1 in the source
2198    of a SET must prevent combination from occurring.  The same situation
2199    can occur for I0, in which case I0_NOT_IN_SRC is set.
2200 
2201    Before doing the above check, we first try to expand a field assignment
2202    into a set of logical operations.
2203 
2204    If PI3_DEST_KILLED is nonzero, it is a pointer to a location in which
2205    we place a register that is both set and used within I3.  If more than one
2206    such register is detected, we fail.
2207 
2208    Return 1 if the combination is valid, zero otherwise.  */
2209 
2210 static int
combinable_i3pat(rtx_insn * i3,rtx * loc,rtx i2dest,rtx i1dest,rtx i0dest,int i1_not_in_src,int i0_not_in_src,rtx * pi3dest_killed)2211 combinable_i3pat (rtx_insn *i3, rtx *loc, rtx i2dest, rtx i1dest, rtx i0dest,
2212 		  int i1_not_in_src, int i0_not_in_src, rtx *pi3dest_killed)
2213 {
2214   rtx x = *loc;
2215 
2216   if (GET_CODE (x) == SET)
2217     {
2218       rtx set = x ;
2219       rtx dest = SET_DEST (set);
2220       rtx src = SET_SRC (set);
2221       rtx inner_dest = dest;
2222       rtx subdest;
2223 
2224       while (GET_CODE (inner_dest) == STRICT_LOW_PART
2225 	     || GET_CODE (inner_dest) == SUBREG
2226 	     || GET_CODE (inner_dest) == ZERO_EXTRACT)
2227 	inner_dest = XEXP (inner_dest, 0);
2228 
2229       /* Check for the case where I3 modifies its output, as discussed
2230 	 above.  We don't want to prevent pseudos from being combined
2231 	 into the address of a MEM, so only prevent the combination if
2232 	 i1 or i2 set the same MEM.  */
2233       if ((inner_dest != dest &&
2234 	   (!MEM_P (inner_dest)
2235 	    || rtx_equal_p (i2dest, inner_dest)
2236 	    || (i1dest && rtx_equal_p (i1dest, inner_dest))
2237 	    || (i0dest && rtx_equal_p (i0dest, inner_dest)))
2238 	   && (reg_overlap_mentioned_p (i2dest, inner_dest)
2239 	       || (i1dest && reg_overlap_mentioned_p (i1dest, inner_dest))
2240 	       || (i0dest && reg_overlap_mentioned_p (i0dest, inner_dest))))
2241 
2242 	  /* This is the same test done in can_combine_p except we can't test
2243 	     all_adjacent; we don't have to, since this instruction will stay
2244 	     in place, thus we are not considering increasing the lifetime of
2245 	     INNER_DEST.
2246 
2247 	     Also, if this insn sets a function argument, combining it with
2248 	     something that might need a spill could clobber a previous
2249 	     function argument; the all_adjacent test in can_combine_p also
2250 	     checks this; here, we do a more specific test for this case.  */
2251 
2252 	  || (REG_P (inner_dest)
2253 	      && REGNO (inner_dest) < FIRST_PSEUDO_REGISTER
2254 	      && !targetm.hard_regno_mode_ok (REGNO (inner_dest),
2255 					      GET_MODE (inner_dest)))
2256 	  || (i1_not_in_src && reg_overlap_mentioned_p (i1dest, src))
2257 	  || (i0_not_in_src && reg_overlap_mentioned_p (i0dest, src)))
2258 	return 0;
2259 
2260       /* If DEST is used in I3, it is being killed in this insn, so
2261 	 record that for later.  We have to consider paradoxical
2262 	 subregs here, since they kill the whole register, but we
2263 	 ignore partial subregs, STRICT_LOW_PART, etc.
2264 	 Never add REG_DEAD notes for the FRAME_POINTER_REGNUM or the
2265 	 STACK_POINTER_REGNUM, since these are always considered to be
2266 	 live.  Similarly for ARG_POINTER_REGNUM if it is fixed.  */
2267       subdest = dest;
2268       if (GET_CODE (subdest) == SUBREG && !partial_subreg_p (subdest))
2269 	subdest = SUBREG_REG (subdest);
2270       if (pi3dest_killed
2271 	  && REG_P (subdest)
2272 	  && reg_referenced_p (subdest, PATTERN (i3))
2273 	  && REGNO (subdest) != FRAME_POINTER_REGNUM
2274 	  && (HARD_FRAME_POINTER_IS_FRAME_POINTER
2275 	      || REGNO (subdest) != HARD_FRAME_POINTER_REGNUM)
2276 	  && (FRAME_POINTER_REGNUM == ARG_POINTER_REGNUM
2277 	      || (REGNO (subdest) != ARG_POINTER_REGNUM
2278 		  || ! fixed_regs [REGNO (subdest)]))
2279 	  && REGNO (subdest) != STACK_POINTER_REGNUM)
2280 	{
2281 	  if (*pi3dest_killed)
2282 	    return 0;
2283 
2284 	  *pi3dest_killed = subdest;
2285 	}
2286     }
2287 
2288   else if (GET_CODE (x) == PARALLEL)
2289     {
2290       int i;
2291 
2292       for (i = 0; i < XVECLEN (x, 0); i++)
2293 	if (! combinable_i3pat (i3, &XVECEXP (x, 0, i), i2dest, i1dest, i0dest,
2294 				i1_not_in_src, i0_not_in_src, pi3dest_killed))
2295 	  return 0;
2296     }
2297 
2298   return 1;
2299 }
2300 
2301 /* Return 1 if X is an arithmetic expression that contains a multiplication
2302    and division.  We don't count multiplications by powers of two here.  */
2303 
2304 static int
contains_muldiv(rtx x)2305 contains_muldiv (rtx x)
2306 {
2307   switch (GET_CODE (x))
2308     {
2309     case MOD:  case DIV:  case UMOD:  case UDIV:
2310       return 1;
2311 
2312     case MULT:
2313       return ! (CONST_INT_P (XEXP (x, 1))
2314 		&& pow2p_hwi (UINTVAL (XEXP (x, 1))));
2315     default:
2316       if (BINARY_P (x))
2317 	return contains_muldiv (XEXP (x, 0))
2318 	    || contains_muldiv (XEXP (x, 1));
2319 
2320       if (UNARY_P (x))
2321 	return contains_muldiv (XEXP (x, 0));
2322 
2323       return 0;
2324     }
2325 }
2326 
2327 /* Determine whether INSN can be used in a combination.  Return nonzero if
2328    not.  This is used in try_combine to detect early some cases where we
2329    can't perform combinations.  */
2330 
2331 static int
cant_combine_insn_p(rtx_insn * insn)2332 cant_combine_insn_p (rtx_insn *insn)
2333 {
2334   rtx set;
2335   rtx src, dest;
2336 
2337   /* If this isn't really an insn, we can't do anything.
2338      This can occur when flow deletes an insn that it has merged into an
2339      auto-increment address.  */
2340   if (!NONDEBUG_INSN_P (insn))
2341     return 1;
2342 
2343   /* Never combine loads and stores involving hard regs that are likely
2344      to be spilled.  The register allocator can usually handle such
2345      reg-reg moves by tying.  If we allow the combiner to make
2346      substitutions of likely-spilled regs, reload might die.
2347      As an exception, we allow combinations involving fixed regs; these are
2348      not available to the register allocator so there's no risk involved.  */
2349 
2350   set = single_set (insn);
2351   if (! set)
2352     return 0;
2353   src = SET_SRC (set);
2354   dest = SET_DEST (set);
2355   if (GET_CODE (src) == SUBREG)
2356     src = SUBREG_REG (src);
2357   if (GET_CODE (dest) == SUBREG)
2358     dest = SUBREG_REG (dest);
2359   if (REG_P (src) && REG_P (dest)
2360       && ((HARD_REGISTER_P (src)
2361 	   && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src))
2362 #ifdef LEAF_REGISTERS
2363 	   && ! LEAF_REGISTERS [REGNO (src)])
2364 #else
2365 	   )
2366 #endif
2367 	  || (HARD_REGISTER_P (dest)
2368 	      && ! TEST_HARD_REG_BIT (fixed_reg_set, REGNO (dest))
2369 	      && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (dest))))))
2370     return 1;
2371 
2372   return 0;
2373 }
2374 
2375 struct likely_spilled_retval_info
2376 {
2377   unsigned regno, nregs;
2378   unsigned mask;
2379 };
2380 
2381 /* Called via note_stores by likely_spilled_retval_p.  Remove from info->mask
2382    hard registers that are known to be written to / clobbered in full.  */
2383 static void
likely_spilled_retval_1(rtx x,const_rtx set,void * data)2384 likely_spilled_retval_1 (rtx x, const_rtx set, void *data)
2385 {
2386   struct likely_spilled_retval_info *const info =
2387     (struct likely_spilled_retval_info *) data;
2388   unsigned regno, nregs;
2389   unsigned new_mask;
2390 
2391   if (!REG_P (XEXP (set, 0)))
2392     return;
2393   regno = REGNO (x);
2394   if (regno >= info->regno + info->nregs)
2395     return;
2396   nregs = REG_NREGS (x);
2397   if (regno + nregs <= info->regno)
2398     return;
2399   new_mask = (2U << (nregs - 1)) - 1;
2400   if (regno < info->regno)
2401     new_mask >>= info->regno - regno;
2402   else
2403     new_mask <<= regno - info->regno;
2404   info->mask &= ~new_mask;
2405 }
2406 
2407 /* Return nonzero iff part of the return value is live during INSN, and
2408    it is likely spilled.  This can happen when more than one insn is needed
2409    to copy the return value, e.g. when we consider to combine into the
2410    second copy insn for a complex value.  */
2411 
2412 static int
likely_spilled_retval_p(rtx_insn * insn)2413 likely_spilled_retval_p (rtx_insn *insn)
2414 {
2415   rtx_insn *use = BB_END (this_basic_block);
2416   rtx reg;
2417   rtx_insn *p;
2418   unsigned regno, nregs;
2419   /* We assume here that no machine mode needs more than
2420      32 hard registers when the value overlaps with a register
2421      for which TARGET_FUNCTION_VALUE_REGNO_P is true.  */
2422   unsigned mask;
2423   struct likely_spilled_retval_info info;
2424 
2425   if (!NONJUMP_INSN_P (use) || GET_CODE (PATTERN (use)) != USE || insn == use)
2426     return 0;
2427   reg = XEXP (PATTERN (use), 0);
2428   if (!REG_P (reg) || !targetm.calls.function_value_regno_p (REGNO (reg)))
2429     return 0;
2430   regno = REGNO (reg);
2431   nregs = REG_NREGS (reg);
2432   if (nregs == 1)
2433     return 0;
2434   mask = (2U << (nregs - 1)) - 1;
2435 
2436   /* Disregard parts of the return value that are set later.  */
2437   info.regno = regno;
2438   info.nregs = nregs;
2439   info.mask = mask;
2440   for (p = PREV_INSN (use); info.mask && p != insn; p = PREV_INSN (p))
2441     if (INSN_P (p))
2442       note_stores (PATTERN (p), likely_spilled_retval_1, &info);
2443   mask = info.mask;
2444 
2445   /* Check if any of the (probably) live return value registers is
2446      likely spilled.  */
2447   nregs --;
2448   do
2449     {
2450       if ((mask & 1 << nregs)
2451 	  && targetm.class_likely_spilled_p (REGNO_REG_CLASS (regno + nregs)))
2452 	return 1;
2453     } while (nregs--);
2454   return 0;
2455 }
2456 
2457 /* Adjust INSN after we made a change to its destination.
2458 
2459    Changing the destination can invalidate notes that say something about
2460    the results of the insn and a LOG_LINK pointing to the insn.  */
2461 
2462 static void
adjust_for_new_dest(rtx_insn * insn)2463 adjust_for_new_dest (rtx_insn *insn)
2464 {
2465   /* For notes, be conservative and simply remove them.  */
2466   remove_reg_equal_equiv_notes (insn);
2467 
2468   /* The new insn will have a destination that was previously the destination
2469      of an insn just above it.  Call distribute_links to make a LOG_LINK from
2470      the next use of that destination.  */
2471 
2472   rtx set = single_set (insn);
2473   gcc_assert (set);
2474 
2475   rtx reg = SET_DEST (set);
2476 
2477   while (GET_CODE (reg) == ZERO_EXTRACT
2478 	 || GET_CODE (reg) == STRICT_LOW_PART
2479 	 || GET_CODE (reg) == SUBREG)
2480     reg = XEXP (reg, 0);
2481   gcc_assert (REG_P (reg));
2482 
2483   distribute_links (alloc_insn_link (insn, REGNO (reg), NULL));
2484 
2485   df_insn_rescan (insn);
2486 }
2487 
2488 /* Return TRUE if combine can reuse reg X in mode MODE.
2489    ADDED_SETS is nonzero if the original set is still required.  */
2490 static bool
can_change_dest_mode(rtx x,int added_sets,machine_mode mode)2491 can_change_dest_mode (rtx x, int added_sets, machine_mode mode)
2492 {
2493   unsigned int regno;
2494 
2495   if (!REG_P (x))
2496     return false;
2497 
2498   /* Don't change between modes with different underlying register sizes,
2499      since this could lead to invalid subregs.  */
2500   if (maybe_ne (REGMODE_NATURAL_SIZE (mode),
2501 		REGMODE_NATURAL_SIZE (GET_MODE (x))))
2502     return false;
2503 
2504   regno = REGNO (x);
2505   /* Allow hard registers if the new mode is legal, and occupies no more
2506      registers than the old mode.  */
2507   if (regno < FIRST_PSEUDO_REGISTER)
2508     return (targetm.hard_regno_mode_ok (regno, mode)
2509 	    && REG_NREGS (x) >= hard_regno_nregs (regno, mode));
2510 
2511   /* Or a pseudo that is only used once.  */
2512   return (regno < reg_n_sets_max
2513 	  && REG_N_SETS (regno) == 1
2514 	  && !added_sets
2515 	  && !REG_USERVAR_P (x));
2516 }
2517 
2518 
2519 /* Check whether X, the destination of a set, refers to part of
2520    the register specified by REG.  */
2521 
2522 static bool
reg_subword_p(rtx x,rtx reg)2523 reg_subword_p (rtx x, rtx reg)
2524 {
2525   /* Check that reg is an integer mode register.  */
2526   if (!REG_P (reg) || GET_MODE_CLASS (GET_MODE (reg)) != MODE_INT)
2527     return false;
2528 
2529   if (GET_CODE (x) == STRICT_LOW_PART
2530       || GET_CODE (x) == ZERO_EXTRACT)
2531     x = XEXP (x, 0);
2532 
2533   return GET_CODE (x) == SUBREG
2534 	 && SUBREG_REG (x) == reg
2535 	 && GET_MODE_CLASS (GET_MODE (x)) == MODE_INT;
2536 }
2537 
2538 /* Delete the unconditional jump INSN and adjust the CFG correspondingly.
2539    Note that the INSN should be deleted *after* removing dead edges, so
2540    that the kept edge is the fallthrough edge for a (set (pc) (pc))
2541    but not for a (set (pc) (label_ref FOO)).  */
2542 
2543 static void
update_cfg_for_uncondjump(rtx_insn * insn)2544 update_cfg_for_uncondjump (rtx_insn *insn)
2545 {
2546   basic_block bb = BLOCK_FOR_INSN (insn);
2547   gcc_assert (BB_END (bb) == insn);
2548 
2549   purge_dead_edges (bb);
2550 
2551   delete_insn (insn);
2552   if (EDGE_COUNT (bb->succs) == 1)
2553     {
2554       rtx_insn *insn;
2555 
2556       single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
2557 
2558       /* Remove barriers from the footer if there are any.  */
2559       for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2560 	if (BARRIER_P (insn))
2561 	  {
2562 	    if (PREV_INSN (insn))
2563 	      SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
2564 	    else
2565 	      BB_FOOTER (bb) = NEXT_INSN (insn);
2566 	    if (NEXT_INSN (insn))
2567 	      SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
2568 	  }
2569 	else if (LABEL_P (insn))
2570 	  break;
2571     }
2572 }
2573 
2574 /* Return whether PAT is a PARALLEL of exactly N register SETs followed
2575    by an arbitrary number of CLOBBERs.  */
2576 static bool
is_parallel_of_n_reg_sets(rtx pat,int n)2577 is_parallel_of_n_reg_sets (rtx pat, int n)
2578 {
2579   if (GET_CODE (pat) != PARALLEL)
2580     return false;
2581 
2582   int len = XVECLEN (pat, 0);
2583   if (len < n)
2584     return false;
2585 
2586   int i;
2587   for (i = 0; i < n; i++)
2588     if (GET_CODE (XVECEXP (pat, 0, i)) != SET
2589 	|| !REG_P (SET_DEST (XVECEXP (pat, 0, i))))
2590       return false;
2591   for ( ; i < len; i++)
2592     switch (GET_CODE (XVECEXP (pat, 0, i)))
2593       {
2594       case CLOBBER:
2595 	if (XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
2596 	  return false;
2597 	break;
2598       case CLOBBER_HIGH:
2599 	break;
2600       default:
2601 	return false;
2602       }
2603   return true;
2604 }
2605 
2606 /* Return whether INSN, a PARALLEL of N register SETs (and maybe some
2607    CLOBBERs), can be split into individual SETs in that order, without
2608    changing semantics.  */
2609 static bool
can_split_parallel_of_n_reg_sets(rtx_insn * insn,int n)2610 can_split_parallel_of_n_reg_sets (rtx_insn *insn, int n)
2611 {
2612   if (!insn_nothrow_p (insn))
2613     return false;
2614 
2615   rtx pat = PATTERN (insn);
2616 
2617   int i, j;
2618   for (i = 0; i < n; i++)
2619     {
2620       if (side_effects_p (SET_SRC (XVECEXP (pat, 0, i))))
2621 	return false;
2622 
2623       rtx reg = SET_DEST (XVECEXP (pat, 0, i));
2624 
2625       for (j = i + 1; j < n; j++)
2626 	if (reg_referenced_p (reg, XVECEXP (pat, 0, j)))
2627 	  return false;
2628     }
2629 
2630   return true;
2631 }
2632 
2633 /* Return whether X is just a single set, with the source
2634    a general_operand.  */
2635 static bool
is_just_move(rtx x)2636 is_just_move (rtx x)
2637 {
2638   if (INSN_P (x))
2639     x = PATTERN (x);
2640 
2641   return (GET_CODE (x) == SET && general_operand (SET_SRC (x), VOIDmode));
2642 }
2643 
2644 /* Callback function to count autoincs.  */
2645 
2646 static int
count_auto_inc(rtx,rtx,rtx,rtx,rtx,void * arg)2647 count_auto_inc (rtx, rtx, rtx, rtx, rtx, void *arg)
2648 {
2649   (*((int *) arg))++;
2650 
2651   return 0;
2652 }
2653 
2654 /* Try to combine the insns I0, I1 and I2 into I3.
2655    Here I0, I1 and I2 appear earlier than I3.
2656    I0 and I1 can be zero; then we combine just I2 into I3, or I1 and I2 into
2657    I3.
2658 
2659    If we are combining more than two insns and the resulting insn is not
2660    recognized, try splitting it into two insns.  If that happens, I2 and I3
2661    are retained and I1/I0 are pseudo-deleted by turning them into a NOTE.
2662    Otherwise, I0, I1 and I2 are pseudo-deleted.
2663 
2664    Return 0 if the combination does not work.  Then nothing is changed.
2665    If we did the combination, return the insn at which combine should
2666    resume scanning.
2667 
2668    Set NEW_DIRECT_JUMP_P to a nonzero value if try_combine creates a
2669    new direct jump instruction.
2670 
2671    LAST_COMBINED_INSN is either I3, or some insn after I3 that has
2672    been I3 passed to an earlier try_combine within the same basic
2673    block.  */
2674 
2675 static rtx_insn *
try_combine(rtx_insn * i3,rtx_insn * i2,rtx_insn * i1,rtx_insn * i0,int * new_direct_jump_p,rtx_insn * last_combined_insn)2676 try_combine (rtx_insn *i3, rtx_insn *i2, rtx_insn *i1, rtx_insn *i0,
2677 	     int *new_direct_jump_p, rtx_insn *last_combined_insn)
2678 {
2679   /* New patterns for I3 and I2, respectively.  */
2680   rtx newpat, newi2pat = 0;
2681   rtvec newpat_vec_with_clobbers = 0;
2682   int substed_i2 = 0, substed_i1 = 0, substed_i0 = 0;
2683   /* Indicates need to preserve SET in I0, I1 or I2 in I3 if it is not
2684      dead.  */
2685   int added_sets_0, added_sets_1, added_sets_2;
2686   /* Total number of SETs to put into I3.  */
2687   int total_sets;
2688   /* Nonzero if I2's or I1's body now appears in I3.  */
2689   int i2_is_used = 0, i1_is_used = 0;
2690   /* INSN_CODEs for new I3, new I2, and user of condition code.  */
2691   int insn_code_number, i2_code_number = 0, other_code_number = 0;
2692   /* Contains I3 if the destination of I3 is used in its source, which means
2693      that the old life of I3 is being killed.  If that usage is placed into
2694      I2 and not in I3, a REG_DEAD note must be made.  */
2695   rtx i3dest_killed = 0;
2696   /* SET_DEST and SET_SRC of I2, I1 and I0.  */
2697   rtx i2dest = 0, i2src = 0, i1dest = 0, i1src = 0, i0dest = 0, i0src = 0;
2698   /* Copy of SET_SRC of I1 and I0, if needed.  */
2699   rtx i1src_copy = 0, i0src_copy = 0, i0src_copy2 = 0;
2700   /* Set if I2DEST was reused as a scratch register.  */
2701   bool i2scratch = false;
2702   /* The PATTERNs of I0, I1, and I2, or a copy of them in certain cases.  */
2703   rtx i0pat = 0, i1pat = 0, i2pat = 0;
2704   /* Indicates if I2DEST or I1DEST is in I2SRC or I1_SRC.  */
2705   int i2dest_in_i2src = 0, i1dest_in_i1src = 0, i2dest_in_i1src = 0;
2706   int i0dest_in_i0src = 0, i1dest_in_i0src = 0, i2dest_in_i0src = 0;
2707   int i2dest_killed = 0, i1dest_killed = 0, i0dest_killed = 0;
2708   int i1_feeds_i2_n = 0, i0_feeds_i2_n = 0, i0_feeds_i1_n = 0;
2709   /* Notes that must be added to REG_NOTES in I3 and I2.  */
2710   rtx new_i3_notes, new_i2_notes;
2711   /* Notes that we substituted I3 into I2 instead of the normal case.  */
2712   int i3_subst_into_i2 = 0;
2713   /* Notes that I1, I2 or I3 is a MULT operation.  */
2714   int have_mult = 0;
2715   int swap_i2i3 = 0;
2716   int split_i2i3 = 0;
2717   int changed_i3_dest = 0;
2718   bool i2_was_move = false, i3_was_move = false;
2719   int n_auto_inc = 0;
2720 
2721   int maxreg;
2722   rtx_insn *temp_insn;
2723   rtx temp_expr;
2724   struct insn_link *link;
2725   rtx other_pat = 0;
2726   rtx new_other_notes;
2727   int i;
2728   scalar_int_mode dest_mode, temp_mode;
2729 
2730   /* Immediately return if any of I0,I1,I2 are the same insn (I3 can
2731      never be).  */
2732   if (i1 == i2 || i0 == i2 || (i0 && i0 == i1))
2733     return 0;
2734 
2735   /* Only try four-insn combinations when there's high likelihood of
2736      success.  Look for simple insns, such as loads of constants or
2737      binary operations involving a constant.  */
2738   if (i0)
2739     {
2740       int i;
2741       int ngood = 0;
2742       int nshift = 0;
2743       rtx set0, set3;
2744 
2745       if (!flag_expensive_optimizations)
2746 	return 0;
2747 
2748       for (i = 0; i < 4; i++)
2749 	{
2750 	  rtx_insn *insn = i == 0 ? i0 : i == 1 ? i1 : i == 2 ? i2 : i3;
2751 	  rtx set = single_set (insn);
2752 	  rtx src;
2753 	  if (!set)
2754 	    continue;
2755 	  src = SET_SRC (set);
2756 	  if (CONSTANT_P (src))
2757 	    {
2758 	      ngood += 2;
2759 	      break;
2760 	    }
2761 	  else if (BINARY_P (src) && CONSTANT_P (XEXP (src, 1)))
2762 	    ngood++;
2763 	  else if (GET_CODE (src) == ASHIFT || GET_CODE (src) == ASHIFTRT
2764 		   || GET_CODE (src) == LSHIFTRT)
2765 	    nshift++;
2766 	}
2767 
2768       /* If I0 loads a memory and I3 sets the same memory, then I1 and I2
2769 	 are likely manipulating its value.  Ideally we'll be able to combine
2770 	 all four insns into a bitfield insertion of some kind.
2771 
2772 	 Note the source in I0 might be inside a sign/zero extension and the
2773 	 memory modes in I0 and I3 might be different.  So extract the address
2774 	 from the destination of I3 and search for it in the source of I0.
2775 
2776 	 In the event that there's a match but the source/dest do not actually
2777 	 refer to the same memory, the worst that happens is we try some
2778 	 combinations that we wouldn't have otherwise.  */
2779       if ((set0 = single_set (i0))
2780 	  /* Ensure the source of SET0 is a MEM, possibly buried inside
2781 	     an extension.  */
2782 	  && (GET_CODE (SET_SRC (set0)) == MEM
2783 	      || ((GET_CODE (SET_SRC (set0)) == ZERO_EXTEND
2784 		   || GET_CODE (SET_SRC (set0)) == SIGN_EXTEND)
2785 		  && GET_CODE (XEXP (SET_SRC (set0), 0)) == MEM))
2786 	  && (set3 = single_set (i3))
2787 	  /* Ensure the destination of SET3 is a MEM.  */
2788 	  && GET_CODE (SET_DEST (set3)) == MEM
2789 	  /* Would it be better to extract the base address for the MEM
2790 	     in SET3 and look for that?  I don't have cases where it matters
2791 	     but I could envision such cases.  */
2792 	  && rtx_referenced_p (XEXP (SET_DEST (set3), 0), SET_SRC (set0)))
2793 	ngood += 2;
2794 
2795       if (ngood < 2 && nshift < 2)
2796 	return 0;
2797     }
2798 
2799   /* Exit early if one of the insns involved can't be used for
2800      combinations.  */
2801   if (CALL_P (i2)
2802       || (i1 && CALL_P (i1))
2803       || (i0 && CALL_P (i0))
2804       || cant_combine_insn_p (i3)
2805       || cant_combine_insn_p (i2)
2806       || (i1 && cant_combine_insn_p (i1))
2807       || (i0 && cant_combine_insn_p (i0))
2808       || likely_spilled_retval_p (i3))
2809     return 0;
2810 
2811   combine_attempts++;
2812   undobuf.other_insn = 0;
2813 
2814   /* Reset the hard register usage information.  */
2815   CLEAR_HARD_REG_SET (newpat_used_regs);
2816 
2817   if (dump_file && (dump_flags & TDF_DETAILS))
2818     {
2819       if (i0)
2820 	fprintf (dump_file, "\nTrying %d, %d, %d -> %d:\n",
2821 		 INSN_UID (i0), INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2822       else if (i1)
2823 	fprintf (dump_file, "\nTrying %d, %d -> %d:\n",
2824 		 INSN_UID (i1), INSN_UID (i2), INSN_UID (i3));
2825       else
2826 	fprintf (dump_file, "\nTrying %d -> %d:\n",
2827 		 INSN_UID (i2), INSN_UID (i3));
2828 
2829       if (i0)
2830 	dump_insn_slim (dump_file, i0);
2831       if (i1)
2832 	dump_insn_slim (dump_file, i1);
2833       dump_insn_slim (dump_file, i2);
2834       dump_insn_slim (dump_file, i3);
2835     }
2836 
2837   /* If multiple insns feed into one of I2 or I3, they can be in any
2838      order.  To simplify the code below, reorder them in sequence.  */
2839   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i2))
2840     std::swap (i0, i2);
2841   if (i0 && DF_INSN_LUID (i0) > DF_INSN_LUID (i1))
2842     std::swap (i0, i1);
2843   if (i1 && DF_INSN_LUID (i1) > DF_INSN_LUID (i2))
2844     std::swap (i1, i2);
2845 
2846   added_links_insn = 0;
2847   added_notes_insn = 0;
2848 
2849   /* First check for one important special case that the code below will
2850      not handle.  Namely, the case where I1 is zero, I2 is a PARALLEL
2851      and I3 is a SET whose SET_SRC is a SET_DEST in I2.  In that case,
2852      we may be able to replace that destination with the destination of I3.
2853      This occurs in the common code where we compute both a quotient and
2854      remainder into a structure, in which case we want to do the computation
2855      directly into the structure to avoid register-register copies.
2856 
2857      Note that this case handles both multiple sets in I2 and also cases
2858      where I2 has a number of CLOBBERs inside the PARALLEL.
2859 
2860      We make very conservative checks below and only try to handle the
2861      most common cases of this.  For example, we only handle the case
2862      where I2 and I3 are adjacent to avoid making difficult register
2863      usage tests.  */
2864 
2865   if (i1 == 0 && NONJUMP_INSN_P (i3) && GET_CODE (PATTERN (i3)) == SET
2866       && REG_P (SET_SRC (PATTERN (i3)))
2867       && REGNO (SET_SRC (PATTERN (i3))) >= FIRST_PSEUDO_REGISTER
2868       && find_reg_note (i3, REG_DEAD, SET_SRC (PATTERN (i3)))
2869       && GET_CODE (PATTERN (i2)) == PARALLEL
2870       && ! side_effects_p (SET_DEST (PATTERN (i3)))
2871       /* If the dest of I3 is a ZERO_EXTRACT or STRICT_LOW_PART, the code
2872 	 below would need to check what is inside (and reg_overlap_mentioned_p
2873 	 doesn't support those codes anyway).  Don't allow those destinations;
2874 	 the resulting insn isn't likely to be recognized anyway.  */
2875       && GET_CODE (SET_DEST (PATTERN (i3))) != ZERO_EXTRACT
2876       && GET_CODE (SET_DEST (PATTERN (i3))) != STRICT_LOW_PART
2877       && ! reg_overlap_mentioned_p (SET_SRC (PATTERN (i3)),
2878 				    SET_DEST (PATTERN (i3)))
2879       && next_active_insn (i2) == i3)
2880     {
2881       rtx p2 = PATTERN (i2);
2882 
2883       /* Make sure that the destination of I3,
2884 	 which we are going to substitute into one output of I2,
2885 	 is not used within another output of I2.  We must avoid making this:
2886 	 (parallel [(set (mem (reg 69)) ...)
2887 		    (set (reg 69) ...)])
2888 	 which is not well-defined as to order of actions.
2889 	 (Besides, reload can't handle output reloads for this.)
2890 
2891 	 The problem can also happen if the dest of I3 is a memory ref,
2892 	 if another dest in I2 is an indirect memory ref.
2893 
2894 	 Neither can this PARALLEL be an asm.  We do not allow combining
2895 	 that usually (see can_combine_p), so do not here either.  */
2896       bool ok = true;
2897       for (i = 0; ok && i < XVECLEN (p2, 0); i++)
2898 	{
2899 	  if ((GET_CODE (XVECEXP (p2, 0, i)) == SET
2900 	       || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER
2901 	       || GET_CODE (XVECEXP (p2, 0, i)) == CLOBBER_HIGH)
2902 	      && reg_overlap_mentioned_p (SET_DEST (PATTERN (i3)),
2903 					  SET_DEST (XVECEXP (p2, 0, i))))
2904 	    ok = false;
2905 	  else if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2906 		   && GET_CODE (SET_SRC (XVECEXP (p2, 0, i))) == ASM_OPERANDS)
2907 	    ok = false;
2908 	}
2909 
2910       if (ok)
2911 	for (i = 0; i < XVECLEN (p2, 0); i++)
2912 	  if (GET_CODE (XVECEXP (p2, 0, i)) == SET
2913 	      && SET_DEST (XVECEXP (p2, 0, i)) == SET_SRC (PATTERN (i3)))
2914 	    {
2915 	      combine_merges++;
2916 
2917 	      subst_insn = i3;
2918 	      subst_low_luid = DF_INSN_LUID (i2);
2919 
2920 	      added_sets_2 = added_sets_1 = added_sets_0 = 0;
2921 	      i2src = SET_SRC (XVECEXP (p2, 0, i));
2922 	      i2dest = SET_DEST (XVECEXP (p2, 0, i));
2923 	      i2dest_killed = dead_or_set_p (i2, i2dest);
2924 
2925 	      /* Replace the dest in I2 with our dest and make the resulting
2926 		 insn the new pattern for I3.  Then skip to where we validate
2927 		 the pattern.  Everything was set up above.  */
2928 	      SUBST (SET_DEST (XVECEXP (p2, 0, i)), SET_DEST (PATTERN (i3)));
2929 	      newpat = p2;
2930 	      i3_subst_into_i2 = 1;
2931 	      goto validate_replacement;
2932 	    }
2933     }
2934 
2935   /* If I2 is setting a pseudo to a constant and I3 is setting some
2936      sub-part of it to another constant, merge them by making a new
2937      constant.  */
2938   if (i1 == 0
2939       && (temp_expr = single_set (i2)) != 0
2940       && is_a <scalar_int_mode> (GET_MODE (SET_DEST (temp_expr)), &temp_mode)
2941       && CONST_SCALAR_INT_P (SET_SRC (temp_expr))
2942       && GET_CODE (PATTERN (i3)) == SET
2943       && CONST_SCALAR_INT_P (SET_SRC (PATTERN (i3)))
2944       && reg_subword_p (SET_DEST (PATTERN (i3)), SET_DEST (temp_expr)))
2945     {
2946       rtx dest = SET_DEST (PATTERN (i3));
2947       rtx temp_dest = SET_DEST (temp_expr);
2948       int offset = -1;
2949       int width = 0;
2950 
2951       if (GET_CODE (dest) == ZERO_EXTRACT)
2952 	{
2953 	  if (CONST_INT_P (XEXP (dest, 1))
2954 	      && CONST_INT_P (XEXP (dest, 2))
2955 	      && is_a <scalar_int_mode> (GET_MODE (XEXP (dest, 0)),
2956 					 &dest_mode))
2957 	    {
2958 	      width = INTVAL (XEXP (dest, 1));
2959 	      offset = INTVAL (XEXP (dest, 2));
2960 	      dest = XEXP (dest, 0);
2961 	      if (BITS_BIG_ENDIAN)
2962 		offset = GET_MODE_PRECISION (dest_mode) - width - offset;
2963 	    }
2964 	}
2965       else
2966 	{
2967 	  if (GET_CODE (dest) == STRICT_LOW_PART)
2968 	    dest = XEXP (dest, 0);
2969 	  if (is_a <scalar_int_mode> (GET_MODE (dest), &dest_mode))
2970 	    {
2971 	      width = GET_MODE_PRECISION (dest_mode);
2972 	      offset = 0;
2973 	    }
2974 	}
2975 
2976       if (offset >= 0)
2977 	{
2978 	  /* If this is the low part, we're done.  */
2979 	  if (subreg_lowpart_p (dest))
2980 	    ;
2981 	  /* Handle the case where inner is twice the size of outer.  */
2982 	  else if (GET_MODE_PRECISION (temp_mode)
2983 		   == 2 * GET_MODE_PRECISION (dest_mode))
2984 	    offset += GET_MODE_PRECISION (dest_mode);
2985 	  /* Otherwise give up for now.  */
2986 	  else
2987 	    offset = -1;
2988 	}
2989 
2990       if (offset >= 0)
2991 	{
2992 	  rtx inner = SET_SRC (PATTERN (i3));
2993 	  rtx outer = SET_SRC (temp_expr);
2994 
2995 	  wide_int o = wi::insert (rtx_mode_t (outer, temp_mode),
2996 				   rtx_mode_t (inner, dest_mode),
2997 				   offset, width);
2998 
2999 	  combine_merges++;
3000 	  subst_insn = i3;
3001 	  subst_low_luid = DF_INSN_LUID (i2);
3002 	  added_sets_2 = added_sets_1 = added_sets_0 = 0;
3003 	  i2dest = temp_dest;
3004 	  i2dest_killed = dead_or_set_p (i2, i2dest);
3005 
3006 	  /* Replace the source in I2 with the new constant and make the
3007 	     resulting insn the new pattern for I3.  Then skip to where we
3008 	     validate the pattern.  Everything was set up above.  */
3009 	  SUBST (SET_SRC (temp_expr),
3010 		 immed_wide_int_const (o, temp_mode));
3011 
3012 	  newpat = PATTERN (i2);
3013 
3014           /* The dest of I3 has been replaced with the dest of I2.  */
3015           changed_i3_dest = 1;
3016 	  goto validate_replacement;
3017 	}
3018     }
3019 
3020   /* If we have no I1 and I2 looks like:
3021 	(parallel [(set (reg:CC X) (compare:CC OP (const_int 0)))
3022 		   (set Y OP)])
3023      make up a dummy I1 that is
3024 	(set Y OP)
3025      and change I2 to be
3026 	(set (reg:CC X) (compare:CC Y (const_int 0)))
3027 
3028      (We can ignore any trailing CLOBBERs.)
3029 
3030      This undoes a previous combination and allows us to match a branch-and-
3031      decrement insn.  */
3032 
3033   if (!HAVE_cc0 && i1 == 0
3034       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3035       && (GET_MODE_CLASS (GET_MODE (SET_DEST (XVECEXP (PATTERN (i2), 0, 0))))
3036 	  == MODE_CC)
3037       && GET_CODE (SET_SRC (XVECEXP (PATTERN (i2), 0, 0))) == COMPARE
3038       && XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 1) == const0_rtx
3039       && rtx_equal_p (XEXP (SET_SRC (XVECEXP (PATTERN (i2), 0, 0)), 0),
3040 		      SET_SRC (XVECEXP (PATTERN (i2), 0, 1)))
3041       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3042       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3043     {
3044       /* We make I1 with the same INSN_UID as I2.  This gives it
3045 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
3046 	 never appear in the insn stream so giving it the same INSN_UID
3047 	 as I2 will not cause a problem.  */
3048 
3049       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3050 			 XVECEXP (PATTERN (i2), 0, 1), INSN_LOCATION (i2),
3051 			 -1, NULL_RTX);
3052       INSN_UID (i1) = INSN_UID (i2);
3053 
3054       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 0));
3055       SUBST (XEXP (SET_SRC (PATTERN (i2)), 0),
3056 	     SET_DEST (PATTERN (i1)));
3057       unsigned int regno = REGNO (SET_DEST (PATTERN (i1)));
3058       SUBST_LINK (LOG_LINKS (i2),
3059 		  alloc_insn_link (i1, regno, LOG_LINKS (i2)));
3060     }
3061 
3062   /* If I2 is a PARALLEL of two SETs of REGs (and perhaps some CLOBBERs),
3063      make those two SETs separate I1 and I2 insns, and make an I0 that is
3064      the original I1.  */
3065   if (!HAVE_cc0 && i0 == 0
3066       && is_parallel_of_n_reg_sets (PATTERN (i2), 2)
3067       && can_split_parallel_of_n_reg_sets (i2, 2)
3068       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3069       && !reg_used_between_p (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3)
3070       && !reg_set_between_p  (SET_DEST (XVECEXP (PATTERN (i2), 0, 0)), i2, i3)
3071       && !reg_set_between_p  (SET_DEST (XVECEXP (PATTERN (i2), 0, 1)), i2, i3))
3072     {
3073       /* If there is no I1, there is no I0 either.  */
3074       i0 = i1;
3075 
3076       /* We make I1 with the same INSN_UID as I2.  This gives it
3077 	 the same DF_INSN_LUID for value tracking.  Our fake I1 will
3078 	 never appear in the insn stream so giving it the same INSN_UID
3079 	 as I2 will not cause a problem.  */
3080 
3081       i1 = gen_rtx_INSN (VOIDmode, NULL, i2, BLOCK_FOR_INSN (i2),
3082 			 XVECEXP (PATTERN (i2), 0, 0), INSN_LOCATION (i2),
3083 			 -1, NULL_RTX);
3084       INSN_UID (i1) = INSN_UID (i2);
3085 
3086       SUBST (PATTERN (i2), XVECEXP (PATTERN (i2), 0, 1));
3087     }
3088 
3089   /* Verify that I2 and maybe I1 and I0 can be combined into I3.  */
3090   if (!can_combine_p (i2, i3, i0, i1, NULL, NULL, &i2dest, &i2src))
3091     {
3092       if (dump_file && (dump_flags & TDF_DETAILS))
3093 	fprintf (dump_file, "Can't combine i2 into i3\n");
3094       undo_all ();
3095       return 0;
3096     }
3097   if (i1 && !can_combine_p (i1, i3, i0, NULL, i2, NULL, &i1dest, &i1src))
3098     {
3099       if (dump_file && (dump_flags & TDF_DETAILS))
3100 	fprintf (dump_file, "Can't combine i1 into i3\n");
3101       undo_all ();
3102       return 0;
3103     }
3104   if (i0 && !can_combine_p (i0, i3, NULL, NULL, i1, i2, &i0dest, &i0src))
3105     {
3106       if (dump_file && (dump_flags & TDF_DETAILS))
3107 	fprintf (dump_file, "Can't combine i0 into i3\n");
3108       undo_all ();
3109       return 0;
3110     }
3111 
3112   /* Record whether i2 and i3 are trivial moves.  */
3113   i2_was_move = is_just_move (i2);
3114   i3_was_move = is_just_move (i3);
3115 
3116   /* Record whether I2DEST is used in I2SRC and similarly for the other
3117      cases.  Knowing this will help in register status updating below.  */
3118   i2dest_in_i2src = reg_overlap_mentioned_p (i2dest, i2src);
3119   i1dest_in_i1src = i1 && reg_overlap_mentioned_p (i1dest, i1src);
3120   i2dest_in_i1src = i1 && reg_overlap_mentioned_p (i2dest, i1src);
3121   i0dest_in_i0src = i0 && reg_overlap_mentioned_p (i0dest, i0src);
3122   i1dest_in_i0src = i0 && reg_overlap_mentioned_p (i1dest, i0src);
3123   i2dest_in_i0src = i0 && reg_overlap_mentioned_p (i2dest, i0src);
3124   i2dest_killed = dead_or_set_p (i2, i2dest);
3125   i1dest_killed = i1 && dead_or_set_p (i1, i1dest);
3126   i0dest_killed = i0 && dead_or_set_p (i0, i0dest);
3127 
3128   /* For the earlier insns, determine which of the subsequent ones they
3129      feed.  */
3130   i1_feeds_i2_n = i1 && insn_a_feeds_b (i1, i2);
3131   i0_feeds_i1_n = i0 && insn_a_feeds_b (i0, i1);
3132   i0_feeds_i2_n = (i0 && (!i0_feeds_i1_n ? insn_a_feeds_b (i0, i2)
3133 			  : (!reg_overlap_mentioned_p (i1dest, i0dest)
3134 			     && reg_overlap_mentioned_p (i0dest, i2src))));
3135 
3136   /* Ensure that I3's pattern can be the destination of combines.  */
3137   if (! combinable_i3pat (i3, &PATTERN (i3), i2dest, i1dest, i0dest,
3138 			  i1 && i2dest_in_i1src && !i1_feeds_i2_n,
3139 			  i0 && ((i2dest_in_i0src && !i0_feeds_i2_n)
3140 				 || (i1dest_in_i0src && !i0_feeds_i1_n)),
3141 			  &i3dest_killed))
3142     {
3143       undo_all ();
3144       return 0;
3145     }
3146 
3147   /* See if any of the insns is a MULT operation.  Unless one is, we will
3148      reject a combination that is, since it must be slower.  Be conservative
3149      here.  */
3150   if (GET_CODE (i2src) == MULT
3151       || (i1 != 0 && GET_CODE (i1src) == MULT)
3152       || (i0 != 0 && GET_CODE (i0src) == MULT)
3153       || (GET_CODE (PATTERN (i3)) == SET
3154 	  && GET_CODE (SET_SRC (PATTERN (i3))) == MULT))
3155     have_mult = 1;
3156 
3157   /* If I3 has an inc, then give up if I1 or I2 uses the reg that is inc'd.
3158      We used to do this EXCEPT in one case: I3 has a post-inc in an
3159      output operand.  However, that exception can give rise to insns like
3160 	mov r3,(r3)+
3161      which is a famous insn on the PDP-11 where the value of r3 used as the
3162      source was model-dependent.  Avoid this sort of thing.  */
3163 
3164 #if 0
3165   if (!(GET_CODE (PATTERN (i3)) == SET
3166 	&& REG_P (SET_SRC (PATTERN (i3)))
3167 	&& MEM_P (SET_DEST (PATTERN (i3)))
3168 	&& (GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_INC
3169 	    || GET_CODE (XEXP (SET_DEST (PATTERN (i3)), 0)) == POST_DEC)))
3170     /* It's not the exception.  */
3171 #endif
3172     if (AUTO_INC_DEC)
3173       {
3174 	rtx link;
3175 	for (link = REG_NOTES (i3); link; link = XEXP (link, 1))
3176 	  if (REG_NOTE_KIND (link) == REG_INC
3177 	      && (reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i2))
3178 		  || (i1 != 0
3179 		      && reg_overlap_mentioned_p (XEXP (link, 0), PATTERN (i1)))))
3180 	    {
3181 	      undo_all ();
3182 	      return 0;
3183 	    }
3184       }
3185 
3186   /* See if the SETs in I1 or I2 need to be kept around in the merged
3187      instruction: whenever the value set there is still needed past I3.
3188      For the SET in I2, this is easy: we see if I2DEST dies or is set in I3.
3189 
3190      For the SET in I1, we have two cases: if I1 and I2 independently feed
3191      into I3, the set in I1 needs to be kept around unless I1DEST dies
3192      or is set in I3.  Otherwise (if I1 feeds I2 which feeds I3), the set
3193      in I1 needs to be kept around unless I1DEST dies or is set in either
3194      I2 or I3.  The same considerations apply to I0.  */
3195 
3196   added_sets_2 = !dead_or_set_p (i3, i2dest);
3197 
3198   if (i1)
3199     added_sets_1 = !(dead_or_set_p (i3, i1dest)
3200 		     || (i1_feeds_i2_n && dead_or_set_p (i2, i1dest)));
3201   else
3202     added_sets_1 = 0;
3203 
3204   if (i0)
3205     added_sets_0 =  !(dead_or_set_p (i3, i0dest)
3206 		      || (i0_feeds_i1_n && dead_or_set_p (i1, i0dest))
3207 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3208 			  && dead_or_set_p (i2, i0dest)));
3209   else
3210     added_sets_0 = 0;
3211 
3212   /* We are about to copy insns for the case where they need to be kept
3213      around.  Check that they can be copied in the merged instruction.  */
3214 
3215   if (targetm.cannot_copy_insn_p
3216       && ((added_sets_2 && targetm.cannot_copy_insn_p (i2))
3217 	  || (i1 && added_sets_1 && targetm.cannot_copy_insn_p (i1))
3218 	  || (i0 && added_sets_0 && targetm.cannot_copy_insn_p (i0))))
3219     {
3220       undo_all ();
3221       return 0;
3222     }
3223 
3224   /* Count how many auto_inc expressions there were in the original insns;
3225      we need to have the same number in the resulting patterns.  */
3226 
3227   if (i0)
3228     for_each_inc_dec (PATTERN (i0), count_auto_inc, &n_auto_inc);
3229   if (i1)
3230     for_each_inc_dec (PATTERN (i1), count_auto_inc, &n_auto_inc);
3231   for_each_inc_dec (PATTERN (i2), count_auto_inc, &n_auto_inc);
3232   for_each_inc_dec (PATTERN (i3), count_auto_inc, &n_auto_inc);
3233 
3234   /* If the set in I2 needs to be kept around, we must make a copy of
3235      PATTERN (I2), so that when we substitute I1SRC for I1DEST in
3236      PATTERN (I2), we are only substituting for the original I1DEST, not into
3237      an already-substituted copy.  This also prevents making self-referential
3238      rtx.  If I2 is a PARALLEL, we just need the piece that assigns I2SRC to
3239      I2DEST.  */
3240 
3241   if (added_sets_2)
3242     {
3243       if (GET_CODE (PATTERN (i2)) == PARALLEL)
3244 	i2pat = gen_rtx_SET (i2dest, copy_rtx (i2src));
3245       else
3246 	i2pat = copy_rtx (PATTERN (i2));
3247     }
3248 
3249   if (added_sets_1)
3250     {
3251       if (GET_CODE (PATTERN (i1)) == PARALLEL)
3252 	i1pat = gen_rtx_SET (i1dest, copy_rtx (i1src));
3253       else
3254 	i1pat = copy_rtx (PATTERN (i1));
3255     }
3256 
3257   if (added_sets_0)
3258     {
3259       if (GET_CODE (PATTERN (i0)) == PARALLEL)
3260 	i0pat = gen_rtx_SET (i0dest, copy_rtx (i0src));
3261       else
3262 	i0pat = copy_rtx (PATTERN (i0));
3263     }
3264 
3265   combine_merges++;
3266 
3267   /* Substitute in the latest insn for the regs set by the earlier ones.  */
3268 
3269   maxreg = max_reg_num ();
3270 
3271   subst_insn = i3;
3272 
3273   /* Many machines that don't use CC0 have insns that can both perform an
3274      arithmetic operation and set the condition code.  These operations will
3275      be represented as a PARALLEL with the first element of the vector
3276      being a COMPARE of an arithmetic operation with the constant zero.
3277      The second element of the vector will set some pseudo to the result
3278      of the same arithmetic operation.  If we simplify the COMPARE, we won't
3279      match such a pattern and so will generate an extra insn.   Here we test
3280      for this case, where both the comparison and the operation result are
3281      needed, and make the PARALLEL by just replacing I2DEST in I3SRC with
3282      I2SRC.  Later we will make the PARALLEL that contains I2.  */
3283 
3284   if (!HAVE_cc0 && i1 == 0 && added_sets_2 && GET_CODE (PATTERN (i3)) == SET
3285       && GET_CODE (SET_SRC (PATTERN (i3))) == COMPARE
3286       && CONST_INT_P (XEXP (SET_SRC (PATTERN (i3)), 1))
3287       && rtx_equal_p (XEXP (SET_SRC (PATTERN (i3)), 0), i2dest))
3288     {
3289       rtx newpat_dest;
3290       rtx *cc_use_loc = NULL;
3291       rtx_insn *cc_use_insn = NULL;
3292       rtx op0 = i2src, op1 = XEXP (SET_SRC (PATTERN (i3)), 1);
3293       machine_mode compare_mode, orig_compare_mode;
3294       enum rtx_code compare_code = UNKNOWN, orig_compare_code = UNKNOWN;
3295       scalar_int_mode mode;
3296 
3297       newpat = PATTERN (i3);
3298       newpat_dest = SET_DEST (newpat);
3299       compare_mode = orig_compare_mode = GET_MODE (newpat_dest);
3300 
3301       if (undobuf.other_insn == 0
3302 	  && (cc_use_loc = find_single_use (SET_DEST (newpat), i3,
3303 					    &cc_use_insn)))
3304 	{
3305 	  compare_code = orig_compare_code = GET_CODE (*cc_use_loc);
3306 	  if (is_a <scalar_int_mode> (GET_MODE (i2dest), &mode))
3307 	    compare_code = simplify_compare_const (compare_code, mode,
3308 						   op0, &op1);
3309 	  target_canonicalize_comparison (&compare_code, &op0, &op1, 1);
3310 	}
3311 
3312       /* Do the rest only if op1 is const0_rtx, which may be the
3313 	 result of simplification.  */
3314       if (op1 == const0_rtx)
3315 	{
3316 	  /* If a single use of the CC is found, prepare to modify it
3317 	     when SELECT_CC_MODE returns a new CC-class mode, or when
3318 	     the above simplify_compare_const() returned a new comparison
3319 	     operator.  undobuf.other_insn is assigned the CC use insn
3320 	     when modifying it.  */
3321 	  if (cc_use_loc)
3322 	    {
3323 #ifdef SELECT_CC_MODE
3324 	      machine_mode new_mode
3325 		= SELECT_CC_MODE (compare_code, op0, op1);
3326 	      if (new_mode != orig_compare_mode
3327 		  && can_change_dest_mode (SET_DEST (newpat),
3328 					   added_sets_2, new_mode))
3329 		{
3330 		  unsigned int regno = REGNO (newpat_dest);
3331 		  compare_mode = new_mode;
3332 		  if (regno < FIRST_PSEUDO_REGISTER)
3333 		    newpat_dest = gen_rtx_REG (compare_mode, regno);
3334 		  else
3335 		    {
3336 		      SUBST_MODE (regno_reg_rtx[regno], compare_mode);
3337 		      newpat_dest = regno_reg_rtx[regno];
3338 		    }
3339 		}
3340 #endif
3341 	      /* Cases for modifying the CC-using comparison.  */
3342 	      if (compare_code != orig_compare_code
3343 		  /* ??? Do we need to verify the zero rtx?  */
3344 		  && XEXP (*cc_use_loc, 1) == const0_rtx)
3345 		{
3346 		  /* Replace cc_use_loc with entire new RTX.  */
3347 		  SUBST (*cc_use_loc,
3348 			 gen_rtx_fmt_ee (compare_code, GET_MODE (*cc_use_loc),
3349 					 newpat_dest, const0_rtx));
3350 		  undobuf.other_insn = cc_use_insn;
3351 		}
3352 	      else if (compare_mode != orig_compare_mode)
3353 		{
3354 		  /* Just replace the CC reg with a new mode.  */
3355 		  SUBST (XEXP (*cc_use_loc, 0), newpat_dest);
3356 		  undobuf.other_insn = cc_use_insn;
3357 		}
3358 	    }
3359 
3360 	  /* Now we modify the current newpat:
3361 	     First, SET_DEST(newpat) is updated if the CC mode has been
3362 	     altered. For targets without SELECT_CC_MODE, this should be
3363 	     optimized away.  */
3364 	  if (compare_mode != orig_compare_mode)
3365 	    SUBST (SET_DEST (newpat), newpat_dest);
3366 	  /* This is always done to propagate i2src into newpat.  */
3367 	  SUBST (SET_SRC (newpat),
3368 		 gen_rtx_COMPARE (compare_mode, op0, op1));
3369 	  /* Create new version of i2pat if needed; the below PARALLEL
3370 	     creation needs this to work correctly.  */
3371 	  if (! rtx_equal_p (i2src, op0))
3372 	    i2pat = gen_rtx_SET (i2dest, op0);
3373 	  i2_is_used = 1;
3374 	}
3375     }
3376 
3377   if (i2_is_used == 0)
3378     {
3379       /* It is possible that the source of I2 or I1 may be performing
3380 	 an unneeded operation, such as a ZERO_EXTEND of something
3381 	 that is known to have the high part zero.  Handle that case
3382 	 by letting subst look at the inner insns.
3383 
3384 	 Another way to do this would be to have a function that tries
3385 	 to simplify a single insn instead of merging two or more
3386 	 insns.  We don't do this because of the potential of infinite
3387 	 loops and because of the potential extra memory required.
3388 	 However, doing it the way we are is a bit of a kludge and
3389 	 doesn't catch all cases.
3390 
3391 	 But only do this if -fexpensive-optimizations since it slows
3392 	 things down and doesn't usually win.
3393 
3394 	 This is not done in the COMPARE case above because the
3395 	 unmodified I2PAT is used in the PARALLEL and so a pattern
3396 	 with a modified I2SRC would not match.  */
3397 
3398       if (flag_expensive_optimizations)
3399 	{
3400 	  /* Pass pc_rtx so no substitutions are done, just
3401 	     simplifications.  */
3402 	  if (i1)
3403 	    {
3404 	      subst_low_luid = DF_INSN_LUID (i1);
3405 	      i1src = subst (i1src, pc_rtx, pc_rtx, 0, 0, 0);
3406 	    }
3407 
3408 	  subst_low_luid = DF_INSN_LUID (i2);
3409 	  i2src = subst (i2src, pc_rtx, pc_rtx, 0, 0, 0);
3410 	}
3411 
3412       n_occurrences = 0;		/* `subst' counts here */
3413       subst_low_luid = DF_INSN_LUID (i2);
3414 
3415       /* If I1 feeds into I2 and I1DEST is in I1SRC, we need to make a unique
3416 	 copy of I2SRC each time we substitute it, in order to avoid creating
3417 	 self-referential RTL when we will be substituting I1SRC for I1DEST
3418 	 later.  Likewise if I0 feeds into I2, either directly or indirectly
3419 	 through I1, and I0DEST is in I0SRC.  */
3420       newpat = subst (PATTERN (i3), i2dest, i2src, 0, 0,
3421 		      (i1_feeds_i2_n && i1dest_in_i1src)
3422 		      || ((i0_feeds_i2_n || (i0_feeds_i1_n && i1_feeds_i2_n))
3423 			  && i0dest_in_i0src));
3424       substed_i2 = 1;
3425 
3426       /* Record whether I2's body now appears within I3's body.  */
3427       i2_is_used = n_occurrences;
3428     }
3429 
3430   /* If we already got a failure, don't try to do more.  Otherwise, try to
3431      substitute I1 if we have it.  */
3432 
3433   if (i1 && GET_CODE (newpat) != CLOBBER)
3434     {
3435       /* Before we can do this substitution, we must redo the test done
3436 	 above (see detailed comments there) that ensures I1DEST isn't
3437 	 mentioned in any SETs in NEWPAT that are field assignments.  */
3438       if (!combinable_i3pat (NULL, &newpat, i1dest, NULL_RTX, NULL_RTX,
3439 			     0, 0, 0))
3440 	{
3441 	  undo_all ();
3442 	  return 0;
3443 	}
3444 
3445       n_occurrences = 0;
3446       subst_low_luid = DF_INSN_LUID (i1);
3447 
3448       /* If the following substitution will modify I1SRC, make a copy of it
3449 	 for the case where it is substituted for I1DEST in I2PAT later.  */
3450       if (added_sets_2 && i1_feeds_i2_n)
3451 	i1src_copy = copy_rtx (i1src);
3452 
3453       /* If I0 feeds into I1 and I0DEST is in I0SRC, we need to make a unique
3454 	 copy of I1SRC each time we substitute it, in order to avoid creating
3455 	 self-referential RTL when we will be substituting I0SRC for I0DEST
3456 	 later.  */
3457       newpat = subst (newpat, i1dest, i1src, 0, 0,
3458 		      i0_feeds_i1_n && i0dest_in_i0src);
3459       substed_i1 = 1;
3460 
3461       /* Record whether I1's body now appears within I3's body.  */
3462       i1_is_used = n_occurrences;
3463     }
3464 
3465   /* Likewise for I0 if we have it.  */
3466 
3467   if (i0 && GET_CODE (newpat) != CLOBBER)
3468     {
3469       if (!combinable_i3pat (NULL, &newpat, i0dest, NULL_RTX, NULL_RTX,
3470 			     0, 0, 0))
3471 	{
3472 	  undo_all ();
3473 	  return 0;
3474 	}
3475 
3476       /* If the following substitution will modify I0SRC, make a copy of it
3477 	 for the case where it is substituted for I0DEST in I1PAT later.  */
3478       if (added_sets_1 && i0_feeds_i1_n)
3479 	i0src_copy = copy_rtx (i0src);
3480       /* And a copy for I0DEST in I2PAT substitution.  */
3481       if (added_sets_2 && ((i0_feeds_i1_n && i1_feeds_i2_n)
3482 			   || (i0_feeds_i2_n)))
3483 	i0src_copy2 = copy_rtx (i0src);
3484 
3485       n_occurrences = 0;
3486       subst_low_luid = DF_INSN_LUID (i0);
3487       newpat = subst (newpat, i0dest, i0src, 0, 0, 0);
3488       substed_i0 = 1;
3489     }
3490 
3491   if (n_auto_inc)
3492     {
3493       int new_n_auto_inc = 0;
3494       for_each_inc_dec (newpat, count_auto_inc, &new_n_auto_inc);
3495 
3496       if (n_auto_inc != new_n_auto_inc)
3497 	{
3498 	  if (dump_file && (dump_flags & TDF_DETAILS))
3499 	    fprintf (dump_file, "Number of auto_inc expressions changed\n");
3500 	  undo_all ();
3501 	  return 0;
3502 	}
3503     }
3504 
3505   /* Fail if an autoincrement side-effect has been duplicated.  Be careful
3506      to count all the ways that I2SRC and I1SRC can be used.  */
3507   if ((FIND_REG_INC_NOTE (i2, NULL_RTX) != 0
3508        && i2_is_used + added_sets_2 > 1)
3509       || (i1 != 0 && FIND_REG_INC_NOTE (i1, NULL_RTX) != 0
3510 	  && (i1_is_used + added_sets_1 + (added_sets_2 && i1_feeds_i2_n)
3511 	      > 1))
3512       || (i0 != 0 && FIND_REG_INC_NOTE (i0, NULL_RTX) != 0
3513 	  && (n_occurrences + added_sets_0
3514 	      + (added_sets_1 && i0_feeds_i1_n)
3515 	      + (added_sets_2 && i0_feeds_i2_n)
3516 	      > 1))
3517       /* Fail if we tried to make a new register.  */
3518       || max_reg_num () != maxreg
3519       /* Fail if we couldn't do something and have a CLOBBER.  */
3520       || GET_CODE (newpat) == CLOBBER
3521       /* Fail if this new pattern is a MULT and we didn't have one before
3522 	 at the outer level.  */
3523       || (GET_CODE (newpat) == SET && GET_CODE (SET_SRC (newpat)) == MULT
3524 	  && ! have_mult))
3525     {
3526       undo_all ();
3527       return 0;
3528     }
3529 
3530   /* If the actions of the earlier insns must be kept
3531      in addition to substituting them into the latest one,
3532      we must make a new PARALLEL for the latest insn
3533      to hold additional the SETs.  */
3534 
3535   if (added_sets_0 || added_sets_1 || added_sets_2)
3536     {
3537       int extra_sets = added_sets_0 + added_sets_1 + added_sets_2;
3538       combine_extras++;
3539 
3540       if (GET_CODE (newpat) == PARALLEL)
3541 	{
3542 	  rtvec old = XVEC (newpat, 0);
3543 	  total_sets = XVECLEN (newpat, 0) + extra_sets;
3544 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3545 	  memcpy (XVEC (newpat, 0)->elem, &old->elem[0],
3546 		  sizeof (old->elem[0]) * old->num_elem);
3547 	}
3548       else
3549 	{
3550 	  rtx old = newpat;
3551 	  total_sets = 1 + extra_sets;
3552 	  newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (total_sets));
3553 	  XVECEXP (newpat, 0, 0) = old;
3554 	}
3555 
3556       if (added_sets_0)
3557 	XVECEXP (newpat, 0, --total_sets) = i0pat;
3558 
3559       if (added_sets_1)
3560 	{
3561 	  rtx t = i1pat;
3562 	  if (i0_feeds_i1_n)
3563 	    t = subst (t, i0dest, i0src_copy ? i0src_copy : i0src, 0, 0, 0);
3564 
3565 	  XVECEXP (newpat, 0, --total_sets) = t;
3566 	}
3567       if (added_sets_2)
3568 	{
3569 	  rtx t = i2pat;
3570 	  if (i1_feeds_i2_n)
3571 	    t = subst (t, i1dest, i1src_copy ? i1src_copy : i1src, 0, 0,
3572 		       i0_feeds_i1_n && i0dest_in_i0src);
3573 	  if ((i0_feeds_i1_n && i1_feeds_i2_n) || i0_feeds_i2_n)
3574 	    t = subst (t, i0dest, i0src_copy2 ? i0src_copy2 : i0src, 0, 0, 0);
3575 
3576 	  XVECEXP (newpat, 0, --total_sets) = t;
3577 	}
3578     }
3579 
3580  validate_replacement:
3581 
3582   /* Note which hard regs this insn has as inputs.  */
3583   mark_used_regs_combine (newpat);
3584 
3585   /* If recog_for_combine fails, it strips existing clobbers.  If we'll
3586      consider splitting this pattern, we might need these clobbers.  */
3587   if (i1 && GET_CODE (newpat) == PARALLEL
3588       && GET_CODE (XVECEXP (newpat, 0, XVECLEN (newpat, 0) - 1)) == CLOBBER)
3589     {
3590       int len = XVECLEN (newpat, 0);
3591 
3592       newpat_vec_with_clobbers = rtvec_alloc (len);
3593       for (i = 0; i < len; i++)
3594 	RTVEC_ELT (newpat_vec_with_clobbers, i) = XVECEXP (newpat, 0, i);
3595     }
3596 
3597   /* We have recognized nothing yet.  */
3598   insn_code_number = -1;
3599 
3600   /* See if this is a PARALLEL of two SETs where one SET's destination is
3601      a register that is unused and this isn't marked as an instruction that
3602      might trap in an EH region.  In that case, we just need the other SET.
3603      We prefer this over the PARALLEL.
3604 
3605      This can occur when simplifying a divmod insn.  We *must* test for this
3606      case here because the code below that splits two independent SETs doesn't
3607      handle this case correctly when it updates the register status.
3608 
3609      It's pointless doing this if we originally had two sets, one from
3610      i3, and one from i2.  Combining then splitting the parallel results
3611      in the original i2 again plus an invalid insn (which we delete).
3612      The net effect is only to move instructions around, which makes
3613      debug info less accurate.
3614 
3615      If the remaining SET came from I2 its destination should not be used
3616      between I2 and I3.  See PR82024.  */
3617 
3618   if (!(added_sets_2 && i1 == 0)
3619       && is_parallel_of_n_reg_sets (newpat, 2)
3620       && asm_noperands (newpat) < 0)
3621     {
3622       rtx set0 = XVECEXP (newpat, 0, 0);
3623       rtx set1 = XVECEXP (newpat, 0, 1);
3624       rtx oldpat = newpat;
3625 
3626       if (((REG_P (SET_DEST (set1))
3627 	    && find_reg_note (i3, REG_UNUSED, SET_DEST (set1)))
3628 	   || (GET_CODE (SET_DEST (set1)) == SUBREG
3629 	       && find_reg_note (i3, REG_UNUSED, SUBREG_REG (SET_DEST (set1)))))
3630 	  && insn_nothrow_p (i3)
3631 	  && !side_effects_p (SET_SRC (set1)))
3632 	{
3633 	  newpat = set0;
3634 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3635 	}
3636 
3637       else if (((REG_P (SET_DEST (set0))
3638 		 && find_reg_note (i3, REG_UNUSED, SET_DEST (set0)))
3639 		|| (GET_CODE (SET_DEST (set0)) == SUBREG
3640 		    && find_reg_note (i3, REG_UNUSED,
3641 				      SUBREG_REG (SET_DEST (set0)))))
3642 	       && insn_nothrow_p (i3)
3643 	       && !side_effects_p (SET_SRC (set0)))
3644 	{
3645 	  rtx dest = SET_DEST (set1);
3646 	  if (GET_CODE (dest) == SUBREG)
3647 	    dest = SUBREG_REG (dest);
3648 	  if (!reg_used_between_p (dest, i2, i3))
3649 	    {
3650 	      newpat = set1;
3651 	      insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3652 
3653 	      if (insn_code_number >= 0)
3654 		changed_i3_dest = 1;
3655 	    }
3656 	}
3657 
3658       if (insn_code_number < 0)
3659 	newpat = oldpat;
3660     }
3661 
3662   /* Is the result of combination a valid instruction?  */
3663   if (insn_code_number < 0)
3664     insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
3665 
3666   /* If we were combining three insns and the result is a simple SET
3667      with no ASM_OPERANDS that wasn't recognized, try to split it into two
3668      insns.  There are two ways to do this.  It can be split using a
3669      machine-specific method (like when you have an addition of a large
3670      constant) or by combine in the function find_split_point.  */
3671 
3672   if (i1 && insn_code_number < 0 && GET_CODE (newpat) == SET
3673       && asm_noperands (newpat) < 0)
3674     {
3675       rtx parallel, *split;
3676       rtx_insn *m_split_insn;
3677 
3678       /* See if the MD file can split NEWPAT.  If it can't, see if letting it
3679 	 use I2DEST as a scratch register will help.  In the latter case,
3680 	 convert I2DEST to the mode of the source of NEWPAT if we can.  */
3681 
3682       m_split_insn = combine_split_insns (newpat, i3);
3683 
3684       /* We can only use I2DEST as a scratch reg if it doesn't overlap any
3685 	 inputs of NEWPAT.  */
3686 
3687       /* ??? If I2DEST is not safe, and I1DEST exists, then it would be
3688 	 possible to try that as a scratch reg.  This would require adding
3689 	 more code to make it work though.  */
3690 
3691       if (m_split_insn == 0 && ! reg_overlap_mentioned_p (i2dest, newpat))
3692 	{
3693 	  machine_mode new_mode = GET_MODE (SET_DEST (newpat));
3694 
3695 	  /* ??? Reusing i2dest without resetting the reg_stat entry for it
3696 	     (temporarily, until we are committed to this instruction
3697 	     combination) does not work: for example, any call to nonzero_bits
3698 	     on the register (from a splitter in the MD file, for example)
3699 	     will get the old information, which is invalid.
3700 
3701 	     Since nowadays we can create registers during combine just fine,
3702 	     we should just create a new one here, not reuse i2dest.  */
3703 
3704 	  /* First try to split using the original register as a
3705 	     scratch register.  */
3706 	  parallel = gen_rtx_PARALLEL (VOIDmode,
3707 				       gen_rtvec (2, newpat,
3708 						  gen_rtx_CLOBBER (VOIDmode,
3709 								   i2dest)));
3710 	  m_split_insn = combine_split_insns (parallel, i3);
3711 
3712 	  /* If that didn't work, try changing the mode of I2DEST if
3713 	     we can.  */
3714 	  if (m_split_insn == 0
3715 	      && new_mode != GET_MODE (i2dest)
3716 	      && new_mode != VOIDmode
3717 	      && can_change_dest_mode (i2dest, added_sets_2, new_mode))
3718 	    {
3719 	      machine_mode old_mode = GET_MODE (i2dest);
3720 	      rtx ni2dest;
3721 
3722 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3723 		ni2dest = gen_rtx_REG (new_mode, REGNO (i2dest));
3724 	      else
3725 		{
3726 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], new_mode);
3727 		  ni2dest = regno_reg_rtx[REGNO (i2dest)];
3728 		}
3729 
3730 	      parallel = (gen_rtx_PARALLEL
3731 			  (VOIDmode,
3732 			   gen_rtvec (2, newpat,
3733 				      gen_rtx_CLOBBER (VOIDmode,
3734 						       ni2dest))));
3735 	      m_split_insn = combine_split_insns (parallel, i3);
3736 
3737 	      if (m_split_insn == 0
3738 		  && REGNO (i2dest) >= FIRST_PSEUDO_REGISTER)
3739 		{
3740 		  struct undo *buf;
3741 
3742 		  adjust_reg_mode (regno_reg_rtx[REGNO (i2dest)], old_mode);
3743 		  buf = undobuf.undos;
3744 		  undobuf.undos = buf->next;
3745 		  buf->next = undobuf.frees;
3746 		  undobuf.frees = buf;
3747 		}
3748 	    }
3749 
3750 	  i2scratch = m_split_insn != 0;
3751 	}
3752 
3753       /* If recog_for_combine has discarded clobbers, try to use them
3754 	 again for the split.  */
3755       if (m_split_insn == 0 && newpat_vec_with_clobbers)
3756 	{
3757 	  parallel = gen_rtx_PARALLEL (VOIDmode, newpat_vec_with_clobbers);
3758 	  m_split_insn = combine_split_insns (parallel, i3);
3759 	}
3760 
3761       if (m_split_insn && NEXT_INSN (m_split_insn) == NULL_RTX)
3762 	{
3763 	  rtx m_split_pat = PATTERN (m_split_insn);
3764 	  insn_code_number = recog_for_combine (&m_split_pat, i3, &new_i3_notes);
3765 	  if (insn_code_number >= 0)
3766 	    newpat = m_split_pat;
3767 	}
3768       else if (m_split_insn && NEXT_INSN (NEXT_INSN (m_split_insn)) == NULL_RTX
3769 	       && (next_nonnote_nondebug_insn (i2) == i3
3770 		   || !modified_between_p (PATTERN (m_split_insn), i2, i3)))
3771 	{
3772 	  rtx i2set, i3set;
3773 	  rtx newi3pat = PATTERN (NEXT_INSN (m_split_insn));
3774 	  newi2pat = PATTERN (m_split_insn);
3775 
3776 	  i3set = single_set (NEXT_INSN (m_split_insn));
3777 	  i2set = single_set (m_split_insn);
3778 
3779 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3780 
3781 	  /* If I2 or I3 has multiple SETs, we won't know how to track
3782 	     register status, so don't use these insns.  If I2's destination
3783 	     is used between I2 and I3, we also can't use these insns.  */
3784 
3785 	  if (i2_code_number >= 0 && i2set && i3set
3786 	      && (next_nonnote_nondebug_insn (i2) == i3
3787 		  || ! reg_used_between_p (SET_DEST (i2set), i2, i3)))
3788 	    insn_code_number = recog_for_combine (&newi3pat, i3,
3789 						  &new_i3_notes);
3790 	  if (insn_code_number >= 0)
3791 	    newpat = newi3pat;
3792 
3793 	  /* It is possible that both insns now set the destination of I3.
3794 	     If so, we must show an extra use of it.  */
3795 
3796 	  if (insn_code_number >= 0)
3797 	    {
3798 	      rtx new_i3_dest = SET_DEST (i3set);
3799 	      rtx new_i2_dest = SET_DEST (i2set);
3800 
3801 	      while (GET_CODE (new_i3_dest) == ZERO_EXTRACT
3802 		     || GET_CODE (new_i3_dest) == STRICT_LOW_PART
3803 		     || GET_CODE (new_i3_dest) == SUBREG)
3804 		new_i3_dest = XEXP (new_i3_dest, 0);
3805 
3806 	      while (GET_CODE (new_i2_dest) == ZERO_EXTRACT
3807 		     || GET_CODE (new_i2_dest) == STRICT_LOW_PART
3808 		     || GET_CODE (new_i2_dest) == SUBREG)
3809 		new_i2_dest = XEXP (new_i2_dest, 0);
3810 
3811 	      if (REG_P (new_i3_dest)
3812 		  && REG_P (new_i2_dest)
3813 		  && REGNO (new_i3_dest) == REGNO (new_i2_dest)
3814 		  && REGNO (new_i2_dest) < reg_n_sets_max)
3815 		INC_REG_N_SETS (REGNO (new_i2_dest), 1);
3816 	    }
3817 	}
3818 
3819       /* If we can split it and use I2DEST, go ahead and see if that
3820 	 helps things be recognized.  Verify that none of the registers
3821 	 are set between I2 and I3.  */
3822       if (insn_code_number < 0
3823           && (split = find_split_point (&newpat, i3, false)) != 0
3824 	  && (!HAVE_cc0 || REG_P (i2dest))
3825 	  /* We need I2DEST in the proper mode.  If it is a hard register
3826 	     or the only use of a pseudo, we can change its mode.
3827 	     Make sure we don't change a hard register to have a mode that
3828 	     isn't valid for it, or change the number of registers.  */
3829 	  && (GET_MODE (*split) == GET_MODE (i2dest)
3830 	      || GET_MODE (*split) == VOIDmode
3831 	      || can_change_dest_mode (i2dest, added_sets_2,
3832 				       GET_MODE (*split)))
3833 	  && (next_nonnote_nondebug_insn (i2) == i3
3834 	      || !modified_between_p (*split, i2, i3))
3835 	  /* We can't overwrite I2DEST if its value is still used by
3836 	     NEWPAT.  */
3837 	  && ! reg_referenced_p (i2dest, newpat))
3838 	{
3839 	  rtx newdest = i2dest;
3840 	  enum rtx_code split_code = GET_CODE (*split);
3841 	  machine_mode split_mode = GET_MODE (*split);
3842 	  bool subst_done = false;
3843 	  newi2pat = NULL_RTX;
3844 
3845 	  i2scratch = true;
3846 
3847 	  /* *SPLIT may be part of I2SRC, so make sure we have the
3848 	     original expression around for later debug processing.
3849 	     We should not need I2SRC any more in other cases.  */
3850 	  if (MAY_HAVE_DEBUG_BIND_INSNS)
3851 	    i2src = copy_rtx (i2src);
3852 	  else
3853 	    i2src = NULL;
3854 
3855 	  /* Get NEWDEST as a register in the proper mode.  We have already
3856 	     validated that we can do this.  */
3857 	  if (GET_MODE (i2dest) != split_mode && split_mode != VOIDmode)
3858 	    {
3859 	      if (REGNO (i2dest) < FIRST_PSEUDO_REGISTER)
3860 		newdest = gen_rtx_REG (split_mode, REGNO (i2dest));
3861 	      else
3862 		{
3863 		  SUBST_MODE (regno_reg_rtx[REGNO (i2dest)], split_mode);
3864 		  newdest = regno_reg_rtx[REGNO (i2dest)];
3865 		}
3866 	    }
3867 
3868 	  /* If *SPLIT is a (mult FOO (const_int pow2)), convert it to
3869 	     an ASHIFT.  This can occur if it was inside a PLUS and hence
3870 	     appeared to be a memory address.  This is a kludge.  */
3871 	  if (split_code == MULT
3872 	      && CONST_INT_P (XEXP (*split, 1))
3873 	      && INTVAL (XEXP (*split, 1)) > 0
3874 	      && (i = exact_log2 (UINTVAL (XEXP (*split, 1)))) >= 0)
3875 	    {
3876 	      rtx i_rtx = gen_int_shift_amount (split_mode, i);
3877 	      SUBST (*split, gen_rtx_ASHIFT (split_mode,
3878 					     XEXP (*split, 0), i_rtx));
3879 	      /* Update split_code because we may not have a multiply
3880 		 anymore.  */
3881 	      split_code = GET_CODE (*split);
3882 	    }
3883 
3884 	  /* Similarly for (plus (mult FOO (const_int pow2))).  */
3885 	  if (split_code == PLUS
3886 	      && GET_CODE (XEXP (*split, 0)) == MULT
3887 	      && CONST_INT_P (XEXP (XEXP (*split, 0), 1))
3888 	      && INTVAL (XEXP (XEXP (*split, 0), 1)) > 0
3889 	      && (i = exact_log2 (UINTVAL (XEXP (XEXP (*split, 0), 1)))) >= 0)
3890 	    {
3891 	      rtx nsplit = XEXP (*split, 0);
3892 	      rtx i_rtx = gen_int_shift_amount (GET_MODE (nsplit), i);
3893 	      SUBST (XEXP (*split, 0), gen_rtx_ASHIFT (GET_MODE (nsplit),
3894 						       XEXP (nsplit, 0),
3895 						       i_rtx));
3896 	      /* Update split_code because we may not have a multiply
3897 		 anymore.  */
3898 	      split_code = GET_CODE (*split);
3899 	    }
3900 
3901 #ifdef INSN_SCHEDULING
3902 	  /* If *SPLIT is a paradoxical SUBREG, when we split it, it should
3903 	     be written as a ZERO_EXTEND.  */
3904 	  if (split_code == SUBREG && MEM_P (SUBREG_REG (*split)))
3905 	    {
3906 	      /* Or as a SIGN_EXTEND if LOAD_EXTEND_OP says that that's
3907 		 what it really is.  */
3908 	      if (load_extend_op (GET_MODE (SUBREG_REG (*split)))
3909 		  == SIGN_EXTEND)
3910 		SUBST (*split, gen_rtx_SIGN_EXTEND (split_mode,
3911 						    SUBREG_REG (*split)));
3912 	      else
3913 		SUBST (*split, gen_rtx_ZERO_EXTEND (split_mode,
3914 						    SUBREG_REG (*split)));
3915 	    }
3916 #endif
3917 
3918 	  /* Attempt to split binary operators using arithmetic identities.  */
3919 	  if (BINARY_P (SET_SRC (newpat))
3920 	      && split_mode == GET_MODE (SET_SRC (newpat))
3921 	      && ! side_effects_p (SET_SRC (newpat)))
3922 	    {
3923 	      rtx setsrc = SET_SRC (newpat);
3924 	      machine_mode mode = GET_MODE (setsrc);
3925 	      enum rtx_code code = GET_CODE (setsrc);
3926 	      rtx src_op0 = XEXP (setsrc, 0);
3927 	      rtx src_op1 = XEXP (setsrc, 1);
3928 
3929 	      /* Split "X = Y op Y" as "Z = Y; X = Z op Z".  */
3930 	      if (rtx_equal_p (src_op0, src_op1))
3931 		{
3932 		  newi2pat = gen_rtx_SET (newdest, src_op0);
3933 		  SUBST (XEXP (setsrc, 0), newdest);
3934 		  SUBST (XEXP (setsrc, 1), newdest);
3935 		  subst_done = true;
3936 		}
3937 	      /* Split "((P op Q) op R) op S" where op is PLUS or MULT.  */
3938 	      else if ((code == PLUS || code == MULT)
3939 		       && GET_CODE (src_op0) == code
3940 		       && GET_CODE (XEXP (src_op0, 0)) == code
3941 		       && (INTEGRAL_MODE_P (mode)
3942 			   || (FLOAT_MODE_P (mode)
3943 			       && flag_unsafe_math_optimizations)))
3944 		{
3945 		  rtx p = XEXP (XEXP (src_op0, 0), 0);
3946 		  rtx q = XEXP (XEXP (src_op0, 0), 1);
3947 		  rtx r = XEXP (src_op0, 1);
3948 		  rtx s = src_op1;
3949 
3950 		  /* Split both "((X op Y) op X) op Y" and
3951 		     "((X op Y) op Y) op X" as "T op T" where T is
3952 		     "X op Y".  */
3953 		  if ((rtx_equal_p (p,r) && rtx_equal_p (q,s))
3954 		       || (rtx_equal_p (p,s) && rtx_equal_p (q,r)))
3955 		    {
3956 		      newi2pat = gen_rtx_SET (newdest, XEXP (src_op0, 0));
3957 		      SUBST (XEXP (setsrc, 0), newdest);
3958 		      SUBST (XEXP (setsrc, 1), newdest);
3959 		      subst_done = true;
3960 		    }
3961 		  /* Split "((X op X) op Y) op Y)" as "T op T" where
3962 		     T is "X op Y".  */
3963 		  else if (rtx_equal_p (p,q) && rtx_equal_p (r,s))
3964 		    {
3965 		      rtx tmp = simplify_gen_binary (code, mode, p, r);
3966 		      newi2pat = gen_rtx_SET (newdest, tmp);
3967 		      SUBST (XEXP (setsrc, 0), newdest);
3968 		      SUBST (XEXP (setsrc, 1), newdest);
3969 		      subst_done = true;
3970 		    }
3971 		}
3972 	    }
3973 
3974 	  if (!subst_done)
3975 	    {
3976 	      newi2pat = gen_rtx_SET (newdest, *split);
3977 	      SUBST (*split, newdest);
3978 	    }
3979 
3980 	  i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
3981 
3982 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
3983 	     Make sure NEWPAT does not depend on the clobbered regs.  */
3984 	  if (GET_CODE (newi2pat) == PARALLEL)
3985 	    for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
3986 	      if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
3987 		{
3988 		  rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
3989 		  if (reg_overlap_mentioned_p (reg, newpat))
3990 		    {
3991 		      undo_all ();
3992 		      return 0;
3993 		    }
3994 		}
3995 
3996 	  /* If the split point was a MULT and we didn't have one before,
3997 	     don't use one now.  */
3998 	  if (i2_code_number >= 0 && ! (split_code == MULT && ! have_mult))
3999 	    insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4000 	}
4001     }
4002 
4003   /* Check for a case where we loaded from memory in a narrow mode and
4004      then sign extended it, but we need both registers.  In that case,
4005      we have a PARALLEL with both loads from the same memory location.
4006      We can split this into a load from memory followed by a register-register
4007      copy.  This saves at least one insn, more if register allocation can
4008      eliminate the copy.
4009 
4010      We cannot do this if the destination of the first assignment is a
4011      condition code register or cc0.  We eliminate this case by making sure
4012      the SET_DEST and SET_SRC have the same mode.
4013 
4014      We cannot do this if the destination of the second assignment is
4015      a register that we have already assumed is zero-extended.  Similarly
4016      for a SUBREG of such a register.  */
4017 
4018   else if (i1 && insn_code_number < 0 && asm_noperands (newpat) < 0
4019 	   && GET_CODE (newpat) == PARALLEL
4020 	   && XVECLEN (newpat, 0) == 2
4021 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4022 	   && GET_CODE (SET_SRC (XVECEXP (newpat, 0, 0))) == SIGN_EXTEND
4023 	   && (GET_MODE (SET_DEST (XVECEXP (newpat, 0, 0)))
4024 	       == GET_MODE (SET_SRC (XVECEXP (newpat, 0, 0))))
4025 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4026 	   && rtx_equal_p (SET_SRC (XVECEXP (newpat, 0, 1)),
4027 			   XEXP (SET_SRC (XVECEXP (newpat, 0, 0)), 0))
4028 	   && !modified_between_p (SET_SRC (XVECEXP (newpat, 0, 1)), i2, i3)
4029 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4030 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4031 	   && ! (temp_expr = SET_DEST (XVECEXP (newpat, 0, 1)),
4032 		 (REG_P (temp_expr)
4033 		  && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4034 		  && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4035 			       BITS_PER_WORD)
4036 		  && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4037 			       HOST_BITS_PER_INT)
4038 		  && (reg_stat[REGNO (temp_expr)].nonzero_bits
4039 		      != GET_MODE_MASK (word_mode))))
4040 	   && ! (GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) == SUBREG
4041 		 && (temp_expr = SUBREG_REG (SET_DEST (XVECEXP (newpat, 0, 1))),
4042 		     (REG_P (temp_expr)
4043 		      && reg_stat[REGNO (temp_expr)].nonzero_bits != 0
4044 		      && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4045 				   BITS_PER_WORD)
4046 		      && known_lt (GET_MODE_PRECISION (GET_MODE (temp_expr)),
4047 				   HOST_BITS_PER_INT)
4048 		      && (reg_stat[REGNO (temp_expr)].nonzero_bits
4049 			  != GET_MODE_MASK (word_mode)))))
4050 	   && ! reg_overlap_mentioned_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4051 					 SET_SRC (XVECEXP (newpat, 0, 1)))
4052 	   && ! find_reg_note (i3, REG_UNUSED,
4053 			       SET_DEST (XVECEXP (newpat, 0, 0))))
4054     {
4055       rtx ni2dest;
4056 
4057       newi2pat = XVECEXP (newpat, 0, 0);
4058       ni2dest = SET_DEST (XVECEXP (newpat, 0, 0));
4059       newpat = XVECEXP (newpat, 0, 1);
4060       SUBST (SET_SRC (newpat),
4061 	     gen_lowpart (GET_MODE (SET_SRC (newpat)), ni2dest));
4062       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4063 
4064       if (i2_code_number >= 0)
4065 	insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4066 
4067       if (insn_code_number >= 0)
4068 	swap_i2i3 = 1;
4069     }
4070 
4071   /* Similarly, check for a case where we have a PARALLEL of two independent
4072      SETs but we started with three insns.  In this case, we can do the sets
4073      as two separate insns.  This case occurs when some SET allows two
4074      other insns to combine, but the destination of that SET is still live.
4075 
4076      Also do this if we started with two insns and (at least) one of the
4077      resulting sets is a noop; this noop will be deleted later.
4078 
4079      Also do this if we started with two insns neither of which was a simple
4080      move.  */
4081 
4082   else if (insn_code_number < 0 && asm_noperands (newpat) < 0
4083 	   && GET_CODE (newpat) == PARALLEL
4084 	   && XVECLEN (newpat, 0) == 2
4085 	   && GET_CODE (XVECEXP (newpat, 0, 0)) == SET
4086 	   && GET_CODE (XVECEXP (newpat, 0, 1)) == SET
4087 	   && (i1
4088 	       || set_noop_p (XVECEXP (newpat, 0, 0))
4089 	       || set_noop_p (XVECEXP (newpat, 0, 1))
4090 	       || (!i2_was_move && !i3_was_move))
4091 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != ZERO_EXTRACT
4092 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 0))) != STRICT_LOW_PART
4093 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != ZERO_EXTRACT
4094 	   && GET_CODE (SET_DEST (XVECEXP (newpat, 0, 1))) != STRICT_LOW_PART
4095 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 1)),
4096 				  XVECEXP (newpat, 0, 0))
4097 	   && ! reg_referenced_p (SET_DEST (XVECEXP (newpat, 0, 0)),
4098 				  XVECEXP (newpat, 0, 1))
4099 	   && ! (contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 0)))
4100 		 && contains_muldiv (SET_SRC (XVECEXP (newpat, 0, 1)))))
4101     {
4102       rtx set0 = XVECEXP (newpat, 0, 0);
4103       rtx set1 = XVECEXP (newpat, 0, 1);
4104 
4105       /* Normally, it doesn't matter which of the two is done first,
4106 	 but the one that references cc0 can't be the second, and
4107 	 one which uses any regs/memory set in between i2 and i3 can't
4108 	 be first.  The PARALLEL might also have been pre-existing in i3,
4109 	 so we need to make sure that we won't wrongly hoist a SET to i2
4110 	 that would conflict with a death note present in there, or would
4111 	 have its dest modified between i2 and i3.  */
4112       if (!modified_between_p (SET_SRC (set1), i2, i3)
4113 	  && !(REG_P (SET_DEST (set1))
4114 	       && find_reg_note (i2, REG_DEAD, SET_DEST (set1)))
4115 	  && !(GET_CODE (SET_DEST (set1)) == SUBREG
4116 	       && find_reg_note (i2, REG_DEAD,
4117 				 SUBREG_REG (SET_DEST (set1))))
4118 	  && !modified_between_p (SET_DEST (set1), i2, i3)
4119 	  && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set0))
4120 	  /* If I3 is a jump, ensure that set0 is a jump so that
4121 	     we do not create invalid RTL.  */
4122 	  && (!JUMP_P (i3) || SET_DEST (set0) == pc_rtx)
4123 	 )
4124 	{
4125 	  newi2pat = set1;
4126 	  newpat = set0;
4127 	}
4128       else if (!modified_between_p (SET_SRC (set0), i2, i3)
4129 	       && !(REG_P (SET_DEST (set0))
4130 		    && find_reg_note (i2, REG_DEAD, SET_DEST (set0)))
4131 	       && !(GET_CODE (SET_DEST (set0)) == SUBREG
4132 		    && find_reg_note (i2, REG_DEAD,
4133 				      SUBREG_REG (SET_DEST (set0))))
4134 	       && !modified_between_p (SET_DEST (set0), i2, i3)
4135 	       && (!HAVE_cc0 || !reg_referenced_p (cc0_rtx, set1))
4136 	       /* If I3 is a jump, ensure that set1 is a jump so that
4137 		  we do not create invalid RTL.  */
4138 	       && (!JUMP_P (i3) || SET_DEST (set1) == pc_rtx)
4139 	      )
4140 	{
4141 	  newi2pat = set0;
4142 	  newpat = set1;
4143 	}
4144       else
4145 	{
4146 	  undo_all ();
4147 	  return 0;
4148 	}
4149 
4150       i2_code_number = recog_for_combine (&newi2pat, i2, &new_i2_notes);
4151 
4152       if (i2_code_number >= 0)
4153 	{
4154 	  /* recog_for_combine might have added CLOBBERs to newi2pat.
4155 	     Make sure NEWPAT does not depend on the clobbered regs.  */
4156 	  if (GET_CODE (newi2pat) == PARALLEL)
4157 	    {
4158 	      for (i = XVECLEN (newi2pat, 0) - 1; i >= 0; i--)
4159 		if (GET_CODE (XVECEXP (newi2pat, 0, i)) == CLOBBER)
4160 		  {
4161 		    rtx reg = XEXP (XVECEXP (newi2pat, 0, i), 0);
4162 		    if (reg_overlap_mentioned_p (reg, newpat))
4163 		      {
4164 			undo_all ();
4165 			return 0;
4166 		      }
4167 		  }
4168 	    }
4169 
4170 	  insn_code_number = recog_for_combine (&newpat, i3, &new_i3_notes);
4171 
4172 	  if (insn_code_number >= 0)
4173 	    split_i2i3 = 1;
4174 	}
4175     }
4176 
4177   /* If it still isn't recognized, fail and change things back the way they
4178      were.  */
4179   if ((insn_code_number < 0
4180        /* Is the result a reasonable ASM_OPERANDS?  */
4181        && (! check_asm_operands (newpat) || added_sets_1 || added_sets_2)))
4182     {
4183       undo_all ();
4184       return 0;
4185     }
4186 
4187   /* If we had to change another insn, make sure it is valid also.  */
4188   if (undobuf.other_insn)
4189     {
4190       CLEAR_HARD_REG_SET (newpat_used_regs);
4191 
4192       other_pat = PATTERN (undobuf.other_insn);
4193       other_code_number = recog_for_combine (&other_pat, undobuf.other_insn,
4194 					     &new_other_notes);
4195 
4196       if (other_code_number < 0 && ! check_asm_operands (other_pat))
4197 	{
4198 	  undo_all ();
4199 	  return 0;
4200 	}
4201     }
4202 
4203   /* If I2 is the CC0 setter and I3 is the CC0 user then check whether
4204      they are adjacent to each other or not.  */
4205   if (HAVE_cc0)
4206     {
4207       rtx_insn *p = prev_nonnote_insn (i3);
4208       if (p && p != i2 && NONJUMP_INSN_P (p) && newi2pat
4209 	  && sets_cc0_p (newi2pat))
4210 	{
4211 	  undo_all ();
4212 	  return 0;
4213 	}
4214     }
4215 
4216   /* Only allow this combination if insn_cost reports that the
4217      replacement instructions are cheaper than the originals.  */
4218   if (!combine_validate_cost (i0, i1, i2, i3, newpat, newi2pat, other_pat))
4219     {
4220       undo_all ();
4221       return 0;
4222     }
4223 
4224   if (MAY_HAVE_DEBUG_BIND_INSNS)
4225     {
4226       struct undo *undo;
4227 
4228       for (undo = undobuf.undos; undo; undo = undo->next)
4229 	if (undo->kind == UNDO_MODE)
4230 	  {
4231 	    rtx reg = *undo->where.r;
4232 	    machine_mode new_mode = GET_MODE (reg);
4233 	    machine_mode old_mode = undo->old_contents.m;
4234 
4235 	    /* Temporarily revert mode back.  */
4236 	    adjust_reg_mode (reg, old_mode);
4237 
4238 	    if (reg == i2dest && i2scratch)
4239 	      {
4240 		/* If we used i2dest as a scratch register with a
4241 		   different mode, substitute it for the original
4242 		   i2src while its original mode is temporarily
4243 		   restored, and then clear i2scratch so that we don't
4244 		   do it again later.  */
4245 		propagate_for_debug (i2, last_combined_insn, reg, i2src,
4246 				     this_basic_block);
4247 		i2scratch = false;
4248 		/* Put back the new mode.  */
4249 		adjust_reg_mode (reg, new_mode);
4250 	      }
4251 	    else
4252 	      {
4253 		rtx tempreg = gen_raw_REG (old_mode, REGNO (reg));
4254 		rtx_insn *first, *last;
4255 
4256 		if (reg == i2dest)
4257 		  {
4258 		    first = i2;
4259 		    last = last_combined_insn;
4260 		  }
4261 		else
4262 		  {
4263 		    first = i3;
4264 		    last = undobuf.other_insn;
4265 		    gcc_assert (last);
4266 		    if (DF_INSN_LUID (last)
4267 			< DF_INSN_LUID (last_combined_insn))
4268 		      last = last_combined_insn;
4269 		  }
4270 
4271 		/* We're dealing with a reg that changed mode but not
4272 		   meaning, so we want to turn it into a subreg for
4273 		   the new mode.  However, because of REG sharing and
4274 		   because its mode had already changed, we have to do
4275 		   it in two steps.  First, replace any debug uses of
4276 		   reg, with its original mode temporarily restored,
4277 		   with this copy we have created; then, replace the
4278 		   copy with the SUBREG of the original shared reg,
4279 		   once again changed to the new mode.  */
4280 		propagate_for_debug (first, last, reg, tempreg,
4281 				     this_basic_block);
4282 		adjust_reg_mode (reg, new_mode);
4283 		propagate_for_debug (first, last, tempreg,
4284 				     lowpart_subreg (old_mode, reg, new_mode),
4285 				     this_basic_block);
4286 	      }
4287 	  }
4288     }
4289 
4290   /* If we will be able to accept this, we have made a
4291      change to the destination of I3.  This requires us to
4292      do a few adjustments.  */
4293 
4294   if (changed_i3_dest)
4295     {
4296       PATTERN (i3) = newpat;
4297       adjust_for_new_dest (i3);
4298     }
4299 
4300   /* We now know that we can do this combination.  Merge the insns and
4301      update the status of registers and LOG_LINKS.  */
4302 
4303   if (undobuf.other_insn)
4304     {
4305       rtx note, next;
4306 
4307       PATTERN (undobuf.other_insn) = other_pat;
4308 
4309       /* If any of the notes in OTHER_INSN were REG_DEAD or REG_UNUSED,
4310 	 ensure that they are still valid.  Then add any non-duplicate
4311 	 notes added by recog_for_combine.  */
4312       for (note = REG_NOTES (undobuf.other_insn); note; note = next)
4313 	{
4314 	  next = XEXP (note, 1);
4315 
4316 	  if ((REG_NOTE_KIND (note) == REG_DEAD
4317 	       && !reg_referenced_p (XEXP (note, 0),
4318 				     PATTERN (undobuf.other_insn)))
4319 	      ||(REG_NOTE_KIND (note) == REG_UNUSED
4320 		 && !reg_set_p (XEXP (note, 0),
4321 				PATTERN (undobuf.other_insn)))
4322 	      /* Simply drop equal note since it may be no longer valid
4323 		 for other_insn.  It may be possible to record that CC
4324 		 register is changed and only discard those notes, but
4325 		 in practice it's unnecessary complication and doesn't
4326 		 give any meaningful improvement.
4327 
4328 		 See PR78559.  */
4329 	      || REG_NOTE_KIND (note) == REG_EQUAL
4330 	      || REG_NOTE_KIND (note) == REG_EQUIV)
4331 	    remove_note (undobuf.other_insn, note);
4332 	}
4333 
4334       distribute_notes  (new_other_notes, undobuf.other_insn,
4335 			undobuf.other_insn, NULL, NULL_RTX, NULL_RTX,
4336 			NULL_RTX);
4337     }
4338 
4339   if (swap_i2i3)
4340     {
4341       /* I3 now uses what used to be its destination and which is now
4342 	 I2's destination.  This requires us to do a few adjustments.  */
4343       PATTERN (i3) = newpat;
4344       adjust_for_new_dest (i3);
4345     }
4346 
4347   if (swap_i2i3 || split_i2i3)
4348     {
4349       /* We might need a LOG_LINK from I3 to I2.  But then we used to
4350 	 have one, so we still will.
4351 
4352 	 However, some later insn might be using I2's dest and have
4353 	 a LOG_LINK pointing at I3.  We should change it to point at
4354 	 I2 instead.  */
4355 
4356       /* newi2pat is usually a SET here; however, recog_for_combine might
4357 	 have added some clobbers.  */
4358       rtx x = newi2pat;
4359       if (GET_CODE (x) == PARALLEL)
4360 	x = XVECEXP (newi2pat, 0, 0);
4361 
4362       /* It can only be a SET of a REG or of a SUBREG of a REG.  */
4363       unsigned int regno = reg_or_subregno (SET_DEST (x));
4364 
4365       bool done = false;
4366       for (rtx_insn *insn = NEXT_INSN (i3);
4367 	   !done
4368 	   && insn
4369 	   && NONDEBUG_INSN_P (insn)
4370 	   && BLOCK_FOR_INSN (insn) == this_basic_block;
4371 	   insn = NEXT_INSN (insn))
4372 	{
4373 	  struct insn_link *link;
4374 	  FOR_EACH_LOG_LINK (link, insn)
4375 	    if (link->insn == i3 && link->regno == regno)
4376 	      {
4377 		link->insn = i2;
4378 		done = true;
4379 		break;
4380 	      }
4381 	}
4382     }
4383 
4384   {
4385     rtx i3notes, i2notes, i1notes = 0, i0notes = 0;
4386     struct insn_link *i3links, *i2links, *i1links = 0, *i0links = 0;
4387     rtx midnotes = 0;
4388     int from_luid;
4389     /* Compute which registers we expect to eliminate.  newi2pat may be setting
4390        either i3dest or i2dest, so we must check it.  */
4391     rtx elim_i2 = ((newi2pat && reg_set_p (i2dest, newi2pat))
4392 		   || i2dest_in_i2src || i2dest_in_i1src || i2dest_in_i0src
4393 		   || !i2dest_killed
4394 		   ? 0 : i2dest);
4395     /* For i1, we need to compute both local elimination and global
4396        elimination information with respect to newi2pat because i1dest
4397        may be the same as i3dest, in which case newi2pat may be setting
4398        i1dest.  Global information is used when distributing REG_DEAD
4399        note for i2 and i3, in which case it does matter if newi2pat sets
4400        i1dest or not.
4401 
4402        Local information is used when distributing REG_DEAD note for i1,
4403        in which case it doesn't matter if newi2pat sets i1dest or not.
4404        See PR62151, if we have four insns combination:
4405 	   i0: r0 <- i0src
4406 	   i1: r1 <- i1src (using r0)
4407 		     REG_DEAD (r0)
4408 	   i2: r0 <- i2src (using r1)
4409 	   i3: r3 <- i3src (using r0)
4410 	   ix: using r0
4411        From i1's point of view, r0 is eliminated, no matter if it is set
4412        by newi2pat or not.  In other words, REG_DEAD info for r0 in i1
4413        should be discarded.
4414 
4415        Note local information only affects cases in forms like "I1->I2->I3",
4416        "I0->I1->I2->I3" or "I0&I1->I2, I2->I3".  For other cases like
4417        "I0->I1, I1&I2->I3" or "I1&I2->I3", newi2pat won't set i1dest or
4418        i0dest anyway.  */
4419     rtx local_elim_i1 = (i1 == 0 || i1dest_in_i1src || i1dest_in_i0src
4420 			 || !i1dest_killed
4421 			 ? 0 : i1dest);
4422     rtx elim_i1 = (local_elim_i1 == 0
4423 		   || (newi2pat && reg_set_p (i1dest, newi2pat))
4424 		   ? 0 : i1dest);
4425     /* Same case as i1.  */
4426     rtx local_elim_i0 = (i0 == 0 || i0dest_in_i0src || !i0dest_killed
4427 			 ? 0 : i0dest);
4428     rtx elim_i0 = (local_elim_i0 == 0
4429 		   || (newi2pat && reg_set_p (i0dest, newi2pat))
4430 		   ? 0 : i0dest);
4431 
4432     /* Get the old REG_NOTES and LOG_LINKS from all our insns and
4433        clear them.  */
4434     i3notes = REG_NOTES (i3), i3links = LOG_LINKS (i3);
4435     i2notes = REG_NOTES (i2), i2links = LOG_LINKS (i2);
4436     if (i1)
4437       i1notes = REG_NOTES (i1), i1links = LOG_LINKS (i1);
4438     if (i0)
4439       i0notes = REG_NOTES (i0), i0links = LOG_LINKS (i0);
4440 
4441     /* Ensure that we do not have something that should not be shared but
4442        occurs multiple times in the new insns.  Check this by first
4443        resetting all the `used' flags and then copying anything is shared.  */
4444 
4445     reset_used_flags (i3notes);
4446     reset_used_flags (i2notes);
4447     reset_used_flags (i1notes);
4448     reset_used_flags (i0notes);
4449     reset_used_flags (newpat);
4450     reset_used_flags (newi2pat);
4451     if (undobuf.other_insn)
4452       reset_used_flags (PATTERN (undobuf.other_insn));
4453 
4454     i3notes = copy_rtx_if_shared (i3notes);
4455     i2notes = copy_rtx_if_shared (i2notes);
4456     i1notes = copy_rtx_if_shared (i1notes);
4457     i0notes = copy_rtx_if_shared (i0notes);
4458     newpat = copy_rtx_if_shared (newpat);
4459     newi2pat = copy_rtx_if_shared (newi2pat);
4460     if (undobuf.other_insn)
4461       reset_used_flags (PATTERN (undobuf.other_insn));
4462 
4463     INSN_CODE (i3) = insn_code_number;
4464     PATTERN (i3) = newpat;
4465 
4466     if (CALL_P (i3) && CALL_INSN_FUNCTION_USAGE (i3))
4467       {
4468 	for (rtx link = CALL_INSN_FUNCTION_USAGE (i3); link;
4469 	     link = XEXP (link, 1))
4470 	  {
4471 	    if (substed_i2)
4472 	      {
4473 		/* I2SRC must still be meaningful at this point.  Some
4474 		   splitting operations can invalidate I2SRC, but those
4475 		   operations do not apply to calls.  */
4476 		gcc_assert (i2src);
4477 		XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4478 						       i2dest, i2src);
4479 	      }
4480 	    if (substed_i1)
4481 	      XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4482 						     i1dest, i1src);
4483 	    if (substed_i0)
4484 	      XEXP (link, 0) = simplify_replace_rtx (XEXP (link, 0),
4485 						     i0dest, i0src);
4486 	  }
4487       }
4488 
4489     if (undobuf.other_insn)
4490       INSN_CODE (undobuf.other_insn) = other_code_number;
4491 
4492     /* We had one special case above where I2 had more than one set and
4493        we replaced a destination of one of those sets with the destination
4494        of I3.  In that case, we have to update LOG_LINKS of insns later
4495        in this basic block.  Note that this (expensive) case is rare.
4496 
4497        Also, in this case, we must pretend that all REG_NOTEs for I2
4498        actually came from I3, so that REG_UNUSED notes from I2 will be
4499        properly handled.  */
4500 
4501     if (i3_subst_into_i2)
4502       {
4503 	for (i = 0; i < XVECLEN (PATTERN (i2), 0); i++)
4504 	  if ((GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == SET
4505 	       || GET_CODE (XVECEXP (PATTERN (i2), 0, i)) == CLOBBER)
4506 	      && REG_P (SET_DEST (XVECEXP (PATTERN (i2), 0, i)))
4507 	      && SET_DEST (XVECEXP (PATTERN (i2), 0, i)) != i2dest
4508 	      && ! find_reg_note (i2, REG_UNUSED,
4509 				  SET_DEST (XVECEXP (PATTERN (i2), 0, i))))
4510 	    for (temp_insn = NEXT_INSN (i2);
4511 		 temp_insn
4512 		 && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
4513 		     || BB_HEAD (this_basic_block) != temp_insn);
4514 		 temp_insn = NEXT_INSN (temp_insn))
4515 	      if (temp_insn != i3 && NONDEBUG_INSN_P (temp_insn))
4516 		FOR_EACH_LOG_LINK (link, temp_insn)
4517 		  if (link->insn == i2)
4518 		    link->insn = i3;
4519 
4520 	if (i3notes)
4521 	  {
4522 	    rtx link = i3notes;
4523 	    while (XEXP (link, 1))
4524 	      link = XEXP (link, 1);
4525 	    XEXP (link, 1) = i2notes;
4526 	  }
4527 	else
4528 	  i3notes = i2notes;
4529 	i2notes = 0;
4530       }
4531 
4532     LOG_LINKS (i3) = NULL;
4533     REG_NOTES (i3) = 0;
4534     LOG_LINKS (i2) = NULL;
4535     REG_NOTES (i2) = 0;
4536 
4537     if (newi2pat)
4538       {
4539 	if (MAY_HAVE_DEBUG_BIND_INSNS && i2scratch)
4540 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4541 			       this_basic_block);
4542 	INSN_CODE (i2) = i2_code_number;
4543 	PATTERN (i2) = newi2pat;
4544       }
4545     else
4546       {
4547 	if (MAY_HAVE_DEBUG_BIND_INSNS && i2src)
4548 	  propagate_for_debug (i2, last_combined_insn, i2dest, i2src,
4549 			       this_basic_block);
4550 	SET_INSN_DELETED (i2);
4551       }
4552 
4553     if (i1)
4554       {
4555 	LOG_LINKS (i1) = NULL;
4556 	REG_NOTES (i1) = 0;
4557 	if (MAY_HAVE_DEBUG_BIND_INSNS)
4558 	  propagate_for_debug (i1, last_combined_insn, i1dest, i1src,
4559 			       this_basic_block);
4560 	SET_INSN_DELETED (i1);
4561       }
4562 
4563     if (i0)
4564       {
4565 	LOG_LINKS (i0) = NULL;
4566 	REG_NOTES (i0) = 0;
4567 	if (MAY_HAVE_DEBUG_BIND_INSNS)
4568 	  propagate_for_debug (i0, last_combined_insn, i0dest, i0src,
4569 			       this_basic_block);
4570 	SET_INSN_DELETED (i0);
4571       }
4572 
4573     /* Get death notes for everything that is now used in either I3 or
4574        I2 and used to die in a previous insn.  If we built two new
4575        patterns, move from I1 to I2 then I2 to I3 so that we get the
4576        proper movement on registers that I2 modifies.  */
4577 
4578     if (i0)
4579       from_luid = DF_INSN_LUID (i0);
4580     else if (i1)
4581       from_luid = DF_INSN_LUID (i1);
4582     else
4583       from_luid = DF_INSN_LUID (i2);
4584     if (newi2pat)
4585       move_deaths (newi2pat, NULL_RTX, from_luid, i2, &midnotes);
4586     move_deaths (newpat, newi2pat, from_luid, i3, &midnotes);
4587 
4588     /* Distribute all the LOG_LINKS and REG_NOTES from I1, I2, and I3.  */
4589     if (i3notes)
4590       distribute_notes (i3notes, i3, i3, newi2pat ? i2 : NULL,
4591 			elim_i2, elim_i1, elim_i0);
4592     if (i2notes)
4593       distribute_notes (i2notes, i2, i3, newi2pat ? i2 : NULL,
4594 			elim_i2, elim_i1, elim_i0);
4595     if (i1notes)
4596       distribute_notes (i1notes, i1, i3, newi2pat ? i2 : NULL,
4597 			elim_i2, local_elim_i1, local_elim_i0);
4598     if (i0notes)
4599       distribute_notes (i0notes, i0, i3, newi2pat ? i2 : NULL,
4600 			elim_i2, elim_i1, local_elim_i0);
4601     if (midnotes)
4602       distribute_notes (midnotes, NULL, i3, newi2pat ? i2 : NULL,
4603 			elim_i2, elim_i1, elim_i0);
4604 
4605     /* Distribute any notes added to I2 or I3 by recog_for_combine.  We
4606        know these are REG_UNUSED and want them to go to the desired insn,
4607        so we always pass it as i3.  */
4608 
4609     if (newi2pat && new_i2_notes)
4610       distribute_notes (new_i2_notes, i2, i2, NULL, NULL_RTX, NULL_RTX,
4611 			NULL_RTX);
4612 
4613     if (new_i3_notes)
4614       distribute_notes (new_i3_notes, i3, i3, NULL, NULL_RTX, NULL_RTX,
4615 			NULL_RTX);
4616 
4617     /* If I3DEST was used in I3SRC, it really died in I3.  We may need to
4618        put a REG_DEAD note for it somewhere.  If NEWI2PAT exists and sets
4619        I3DEST, the death must be somewhere before I2, not I3.  If we passed I3
4620        in that case, it might delete I2.  Similarly for I2 and I1.
4621        Show an additional death due to the REG_DEAD note we make here.  If
4622        we discard it in distribute_notes, we will decrement it again.  */
4623 
4624     if (i3dest_killed)
4625       {
4626 	rtx new_note = alloc_reg_note (REG_DEAD, i3dest_killed, NULL_RTX);
4627 	if (newi2pat && reg_set_p (i3dest_killed, newi2pat))
4628 	  distribute_notes (new_note, NULL, i2, NULL, elim_i2,
4629 			    elim_i1, elim_i0);
4630 	else
4631 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4632 			    elim_i2, elim_i1, elim_i0);
4633       }
4634 
4635     if (i2dest_in_i2src)
4636       {
4637 	rtx new_note = alloc_reg_note (REG_DEAD, i2dest, NULL_RTX);
4638 	if (newi2pat && reg_set_p (i2dest, newi2pat))
4639 	  distribute_notes (new_note,  NULL, i2, NULL, NULL_RTX,
4640 			    NULL_RTX, NULL_RTX);
4641 	else
4642 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4643 			    NULL_RTX, NULL_RTX, NULL_RTX);
4644       }
4645 
4646     if (i1dest_in_i1src)
4647       {
4648 	rtx new_note = alloc_reg_note (REG_DEAD, i1dest, NULL_RTX);
4649 	if (newi2pat && reg_set_p (i1dest, newi2pat))
4650 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4651 			    NULL_RTX, NULL_RTX);
4652 	else
4653 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4654 			    NULL_RTX, NULL_RTX, NULL_RTX);
4655       }
4656 
4657     if (i0dest_in_i0src)
4658       {
4659 	rtx new_note = alloc_reg_note (REG_DEAD, i0dest, NULL_RTX);
4660 	if (newi2pat && reg_set_p (i0dest, newi2pat))
4661 	  distribute_notes (new_note, NULL, i2, NULL, NULL_RTX,
4662 			    NULL_RTX, NULL_RTX);
4663 	else
4664 	  distribute_notes (new_note, NULL, i3, newi2pat ? i2 : NULL,
4665 			    NULL_RTX, NULL_RTX, NULL_RTX);
4666       }
4667 
4668     distribute_links (i3links);
4669     distribute_links (i2links);
4670     distribute_links (i1links);
4671     distribute_links (i0links);
4672 
4673     if (REG_P (i2dest))
4674       {
4675 	struct insn_link *link;
4676 	rtx_insn *i2_insn = 0;
4677 	rtx i2_val = 0, set;
4678 
4679 	/* The insn that used to set this register doesn't exist, and
4680 	   this life of the register may not exist either.  See if one of
4681 	   I3's links points to an insn that sets I2DEST.  If it does,
4682 	   that is now the last known value for I2DEST. If we don't update
4683 	   this and I2 set the register to a value that depended on its old
4684 	   contents, we will get confused.  If this insn is used, thing
4685 	   will be set correctly in combine_instructions.  */
4686 	FOR_EACH_LOG_LINK (link, i3)
4687 	  if ((set = single_set (link->insn)) != 0
4688 	      && rtx_equal_p (i2dest, SET_DEST (set)))
4689 	    i2_insn = link->insn, i2_val = SET_SRC (set);
4690 
4691 	record_value_for_reg (i2dest, i2_insn, i2_val);
4692 
4693 	/* If the reg formerly set in I2 died only once and that was in I3,
4694 	   zero its use count so it won't make `reload' do any work.  */
4695 	if (! added_sets_2
4696 	    && (newi2pat == 0 || ! reg_mentioned_p (i2dest, newi2pat))
4697 	    && ! i2dest_in_i2src
4698 	    && REGNO (i2dest) < reg_n_sets_max)
4699 	  INC_REG_N_SETS (REGNO (i2dest), -1);
4700       }
4701 
4702     if (i1 && REG_P (i1dest))
4703       {
4704 	struct insn_link *link;
4705 	rtx_insn *i1_insn = 0;
4706 	rtx i1_val = 0, set;
4707 
4708 	FOR_EACH_LOG_LINK (link, i3)
4709 	  if ((set = single_set (link->insn)) != 0
4710 	      && rtx_equal_p (i1dest, SET_DEST (set)))
4711 	    i1_insn = link->insn, i1_val = SET_SRC (set);
4712 
4713 	record_value_for_reg (i1dest, i1_insn, i1_val);
4714 
4715 	if (! added_sets_1
4716 	    && ! i1dest_in_i1src
4717 	    && REGNO (i1dest) < reg_n_sets_max)
4718 	  INC_REG_N_SETS (REGNO (i1dest), -1);
4719       }
4720 
4721     if (i0 && REG_P (i0dest))
4722       {
4723 	struct insn_link *link;
4724 	rtx_insn *i0_insn = 0;
4725 	rtx i0_val = 0, set;
4726 
4727 	FOR_EACH_LOG_LINK (link, i3)
4728 	  if ((set = single_set (link->insn)) != 0
4729 	      && rtx_equal_p (i0dest, SET_DEST (set)))
4730 	    i0_insn = link->insn, i0_val = SET_SRC (set);
4731 
4732 	record_value_for_reg (i0dest, i0_insn, i0_val);
4733 
4734 	if (! added_sets_0
4735 	    && ! i0dest_in_i0src
4736 	    && REGNO (i0dest) < reg_n_sets_max)
4737 	  INC_REG_N_SETS (REGNO (i0dest), -1);
4738       }
4739 
4740     /* Update reg_stat[].nonzero_bits et al for any changes that may have
4741        been made to this insn.  The order is important, because newi2pat
4742        can affect nonzero_bits of newpat.  */
4743     if (newi2pat)
4744       note_stores (newi2pat, set_nonzero_bits_and_sign_copies, NULL);
4745     note_stores (newpat, set_nonzero_bits_and_sign_copies, NULL);
4746   }
4747 
4748   if (undobuf.other_insn != NULL_RTX)
4749     {
4750       if (dump_file)
4751 	{
4752 	  fprintf (dump_file, "modifying other_insn ");
4753 	  dump_insn_slim (dump_file, undobuf.other_insn);
4754 	}
4755       df_insn_rescan (undobuf.other_insn);
4756     }
4757 
4758   if (i0 && !(NOTE_P (i0) && (NOTE_KIND (i0) == NOTE_INSN_DELETED)))
4759     {
4760       if (dump_file)
4761 	{
4762 	  fprintf (dump_file, "modifying insn i0 ");
4763 	  dump_insn_slim (dump_file, i0);
4764 	}
4765       df_insn_rescan (i0);
4766     }
4767 
4768   if (i1 && !(NOTE_P (i1) && (NOTE_KIND (i1) == NOTE_INSN_DELETED)))
4769     {
4770       if (dump_file)
4771 	{
4772 	  fprintf (dump_file, "modifying insn i1 ");
4773 	  dump_insn_slim (dump_file, i1);
4774 	}
4775       df_insn_rescan (i1);
4776     }
4777 
4778   if (i2 && !(NOTE_P (i2) && (NOTE_KIND (i2) == NOTE_INSN_DELETED)))
4779     {
4780       if (dump_file)
4781 	{
4782 	  fprintf (dump_file, "modifying insn i2 ");
4783 	  dump_insn_slim (dump_file, i2);
4784 	}
4785       df_insn_rescan (i2);
4786     }
4787 
4788   if (i3 && !(NOTE_P (i3) && (NOTE_KIND (i3) == NOTE_INSN_DELETED)))
4789     {
4790       if (dump_file)
4791 	{
4792 	  fprintf (dump_file, "modifying insn i3 ");
4793 	  dump_insn_slim (dump_file, i3);
4794 	}
4795       df_insn_rescan (i3);
4796     }
4797 
4798   /* Set new_direct_jump_p if a new return or simple jump instruction
4799      has been created.  Adjust the CFG accordingly.  */
4800   if (returnjump_p (i3) || any_uncondjump_p (i3))
4801     {
4802       *new_direct_jump_p = 1;
4803       mark_jump_label (PATTERN (i3), i3, 0);
4804       update_cfg_for_uncondjump (i3);
4805     }
4806 
4807   if (undobuf.other_insn != NULL_RTX
4808       && (returnjump_p (undobuf.other_insn)
4809 	  || any_uncondjump_p (undobuf.other_insn)))
4810     {
4811       *new_direct_jump_p = 1;
4812       update_cfg_for_uncondjump (undobuf.other_insn);
4813     }
4814 
4815   if (GET_CODE (PATTERN (i3)) == TRAP_IF
4816       && XEXP (PATTERN (i3), 0) == const1_rtx)
4817     {
4818       basic_block bb = BLOCK_FOR_INSN (i3);
4819       gcc_assert (bb);
4820       remove_edge (split_block (bb, i3));
4821       emit_barrier_after_bb (bb);
4822       *new_direct_jump_p = 1;
4823     }
4824 
4825   if (undobuf.other_insn
4826       && GET_CODE (PATTERN (undobuf.other_insn)) == TRAP_IF
4827       && XEXP (PATTERN (undobuf.other_insn), 0) == const1_rtx)
4828     {
4829       basic_block bb = BLOCK_FOR_INSN (undobuf.other_insn);
4830       gcc_assert (bb);
4831       remove_edge (split_block (bb, undobuf.other_insn));
4832       emit_barrier_after_bb (bb);
4833       *new_direct_jump_p = 1;
4834     }
4835 
4836   /* A noop might also need cleaning up of CFG, if it comes from the
4837      simplification of a jump.  */
4838   if (JUMP_P (i3)
4839       && GET_CODE (newpat) == SET
4840       && SET_SRC (newpat) == pc_rtx
4841       && SET_DEST (newpat) == pc_rtx)
4842     {
4843       *new_direct_jump_p = 1;
4844       update_cfg_for_uncondjump (i3);
4845     }
4846 
4847   if (undobuf.other_insn != NULL_RTX
4848       && JUMP_P (undobuf.other_insn)
4849       && GET_CODE (PATTERN (undobuf.other_insn)) == SET
4850       && SET_SRC (PATTERN (undobuf.other_insn)) == pc_rtx
4851       && SET_DEST (PATTERN (undobuf.other_insn)) == pc_rtx)
4852     {
4853       *new_direct_jump_p = 1;
4854       update_cfg_for_uncondjump (undobuf.other_insn);
4855     }
4856 
4857   combine_successes++;
4858   undo_commit ();
4859 
4860   rtx_insn *ret = newi2pat ? i2 : i3;
4861   if (added_links_insn && DF_INSN_LUID (added_links_insn) < DF_INSN_LUID (ret))
4862     ret = added_links_insn;
4863   if (added_notes_insn && DF_INSN_LUID (added_notes_insn) < DF_INSN_LUID (ret))
4864     ret = added_notes_insn;
4865 
4866   return ret;
4867 }
4868 
4869 /* Get a marker for undoing to the current state.  */
4870 
4871 static void *
get_undo_marker(void)4872 get_undo_marker (void)
4873 {
4874   return undobuf.undos;
4875 }
4876 
4877 /* Undo the modifications up to the marker.  */
4878 
4879 static void
undo_to_marker(void * marker)4880 undo_to_marker (void *marker)
4881 {
4882   struct undo *undo, *next;
4883 
4884   for (undo = undobuf.undos; undo != marker; undo = next)
4885     {
4886       gcc_assert (undo);
4887 
4888       next = undo->next;
4889       switch (undo->kind)
4890 	{
4891 	case UNDO_RTX:
4892 	  *undo->where.r = undo->old_contents.r;
4893 	  break;
4894 	case UNDO_INT:
4895 	  *undo->where.i = undo->old_contents.i;
4896 	  break;
4897 	case UNDO_MODE:
4898 	  adjust_reg_mode (*undo->where.r, undo->old_contents.m);
4899 	  break;
4900 	case UNDO_LINKS:
4901 	  *undo->where.l = undo->old_contents.l;
4902 	  break;
4903 	default:
4904 	  gcc_unreachable ();
4905 	}
4906 
4907       undo->next = undobuf.frees;
4908       undobuf.frees = undo;
4909     }
4910 
4911   undobuf.undos = (struct undo *) marker;
4912 }
4913 
4914 /* Undo all the modifications recorded in undobuf.  */
4915 
4916 static void
undo_all(void)4917 undo_all (void)
4918 {
4919   undo_to_marker (0);
4920 }
4921 
4922 /* We've committed to accepting the changes we made.  Move all
4923    of the undos to the free list.  */
4924 
4925 static void
undo_commit(void)4926 undo_commit (void)
4927 {
4928   struct undo *undo, *next;
4929 
4930   for (undo = undobuf.undos; undo; undo = next)
4931     {
4932       next = undo->next;
4933       undo->next = undobuf.frees;
4934       undobuf.frees = undo;
4935     }
4936   undobuf.undos = 0;
4937 }
4938 
4939 /* Find the innermost point within the rtx at LOC, possibly LOC itself,
4940    where we have an arithmetic expression and return that point.  LOC will
4941    be inside INSN.
4942 
4943    try_combine will call this function to see if an insn can be split into
4944    two insns.  */
4945 
4946 static rtx *
find_split_point(rtx * loc,rtx_insn * insn,bool set_src)4947 find_split_point (rtx *loc, rtx_insn *insn, bool set_src)
4948 {
4949   rtx x = *loc;
4950   enum rtx_code code = GET_CODE (x);
4951   rtx *split;
4952   unsigned HOST_WIDE_INT len = 0;
4953   HOST_WIDE_INT pos = 0;
4954   int unsignedp = 0;
4955   rtx inner = NULL_RTX;
4956   scalar_int_mode mode, inner_mode;
4957 
4958   /* First special-case some codes.  */
4959   switch (code)
4960     {
4961     case SUBREG:
4962 #ifdef INSN_SCHEDULING
4963       /* If we are making a paradoxical SUBREG invalid, it becomes a split
4964 	 point.  */
4965       if (MEM_P (SUBREG_REG (x)))
4966 	return loc;
4967 #endif
4968       return find_split_point (&SUBREG_REG (x), insn, false);
4969 
4970     case MEM:
4971       /* If we have (mem (const ..)) or (mem (symbol_ref ...)), split it
4972 	 using LO_SUM and HIGH.  */
4973       if (HAVE_lo_sum && (GET_CODE (XEXP (x, 0)) == CONST
4974 			  || GET_CODE (XEXP (x, 0)) == SYMBOL_REF))
4975 	{
4976 	  machine_mode address_mode = get_address_mode (x);
4977 
4978 	  SUBST (XEXP (x, 0),
4979 		 gen_rtx_LO_SUM (address_mode,
4980 				 gen_rtx_HIGH (address_mode, XEXP (x, 0)),
4981 				 XEXP (x, 0)));
4982 	  return &XEXP (XEXP (x, 0), 0);
4983 	}
4984 
4985       /* If we have a PLUS whose second operand is a constant and the
4986 	 address is not valid, perhaps we can split it up using
4987 	 the machine-specific way to split large constants.  We use
4988 	 the first pseudo-reg (one of the virtual regs) as a placeholder;
4989 	 it will not remain in the result.  */
4990       if (GET_CODE (XEXP (x, 0)) == PLUS
4991 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
4992 	  && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
4993 					    MEM_ADDR_SPACE (x)))
4994 	{
4995 	  rtx reg = regno_reg_rtx[FIRST_PSEUDO_REGISTER];
4996 	  rtx_insn *seq = combine_split_insns (gen_rtx_SET (reg, XEXP (x, 0)),
4997 					       subst_insn);
4998 
4999 	  /* This should have produced two insns, each of which sets our
5000 	     placeholder.  If the source of the second is a valid address,
5001 	     we can put both sources together and make a split point
5002 	     in the middle.  */
5003 
5004 	  if (seq
5005 	      && NEXT_INSN (seq) != NULL_RTX
5006 	      && NEXT_INSN (NEXT_INSN (seq)) == NULL_RTX
5007 	      && NONJUMP_INSN_P (seq)
5008 	      && GET_CODE (PATTERN (seq)) == SET
5009 	      && SET_DEST (PATTERN (seq)) == reg
5010 	      && ! reg_mentioned_p (reg,
5011 				    SET_SRC (PATTERN (seq)))
5012 	      && NONJUMP_INSN_P (NEXT_INSN (seq))
5013 	      && GET_CODE (PATTERN (NEXT_INSN (seq))) == SET
5014 	      && SET_DEST (PATTERN (NEXT_INSN (seq))) == reg
5015 	      && memory_address_addr_space_p
5016 		   (GET_MODE (x), SET_SRC (PATTERN (NEXT_INSN (seq))),
5017 		    MEM_ADDR_SPACE (x)))
5018 	    {
5019 	      rtx src1 = SET_SRC (PATTERN (seq));
5020 	      rtx src2 = SET_SRC (PATTERN (NEXT_INSN (seq)));
5021 
5022 	      /* Replace the placeholder in SRC2 with SRC1.  If we can
5023 		 find where in SRC2 it was placed, that can become our
5024 		 split point and we can replace this address with SRC2.
5025 		 Just try two obvious places.  */
5026 
5027 	      src2 = replace_rtx (src2, reg, src1);
5028 	      split = 0;
5029 	      if (XEXP (src2, 0) == src1)
5030 		split = &XEXP (src2, 0);
5031 	      else if (GET_RTX_FORMAT (GET_CODE (XEXP (src2, 0)))[0] == 'e'
5032 		       && XEXP (XEXP (src2, 0), 0) == src1)
5033 		split = &XEXP (XEXP (src2, 0), 0);
5034 
5035 	      if (split)
5036 		{
5037 		  SUBST (XEXP (x, 0), src2);
5038 		  return split;
5039 		}
5040 	    }
5041 
5042 	  /* If that didn't work and we have a nested plus, like:
5043 	     ((REG1 * CONST1) + REG2) + CONST2 and (REG1 + REG2) + CONST2
5044 	     is valid address, try to split (REG1 * CONST1).  */
5045 	  if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5046 	      && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5047 	      && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5048 	      && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SUBREG
5049 		    && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5050 							 0), 0)))))
5051 	    {
5052 	      rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 0);
5053 	      XEXP (XEXP (XEXP (x, 0), 0), 0) = reg;
5054 	      if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5055 					       MEM_ADDR_SPACE (x)))
5056 		{
5057 		  XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5058 		  return &XEXP (XEXP (XEXP (x, 0), 0), 0);
5059 		}
5060 	      XEXP (XEXP (XEXP (x, 0), 0), 0) = tem;
5061 	    }
5062 	  else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == PLUS
5063 		   && OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 0))
5064 		   && !OBJECT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
5065 		   && ! (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SUBREG
5066 			 && OBJECT_P (SUBREG_REG (XEXP (XEXP (XEXP (x, 0),
5067 							      0), 1)))))
5068 	    {
5069 	      rtx tem = XEXP (XEXP (XEXP (x, 0), 0), 1);
5070 	      XEXP (XEXP (XEXP (x, 0), 0), 1) = reg;
5071 	      if (memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5072 					       MEM_ADDR_SPACE (x)))
5073 		{
5074 		  XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5075 		  return &XEXP (XEXP (XEXP (x, 0), 0), 1);
5076 		}
5077 	      XEXP (XEXP (XEXP (x, 0), 0), 1) = tem;
5078 	    }
5079 
5080 	  /* If that didn't work, perhaps the first operand is complex and
5081 	     needs to be computed separately, so make a split point there.
5082 	     This will occur on machines that just support REG + CONST
5083 	     and have a constant moved through some previous computation.  */
5084 	  if (!OBJECT_P (XEXP (XEXP (x, 0), 0))
5085 	      && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5086 		    && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5087 	    return &XEXP (XEXP (x, 0), 0);
5088 	}
5089 
5090       /* If we have a PLUS whose first operand is complex, try computing it
5091          separately by making a split there.  */
5092       if (GET_CODE (XEXP (x, 0)) == PLUS
5093           && ! memory_address_addr_space_p (GET_MODE (x), XEXP (x, 0),
5094 					    MEM_ADDR_SPACE (x))
5095           && ! OBJECT_P (XEXP (XEXP (x, 0), 0))
5096           && ! (GET_CODE (XEXP (XEXP (x, 0), 0)) == SUBREG
5097                 && OBJECT_P (SUBREG_REG (XEXP (XEXP (x, 0), 0)))))
5098         return &XEXP (XEXP (x, 0), 0);
5099       break;
5100 
5101     case SET:
5102       /* If SET_DEST is CC0 and SET_SRC is not an operand, a COMPARE, or a
5103 	 ZERO_EXTRACT, the most likely reason why this doesn't match is that
5104 	 we need to put the operand into a register.  So split at that
5105 	 point.  */
5106 
5107       if (SET_DEST (x) == cc0_rtx
5108 	  && GET_CODE (SET_SRC (x)) != COMPARE
5109 	  && GET_CODE (SET_SRC (x)) != ZERO_EXTRACT
5110 	  && !OBJECT_P (SET_SRC (x))
5111 	  && ! (GET_CODE (SET_SRC (x)) == SUBREG
5112 		&& OBJECT_P (SUBREG_REG (SET_SRC (x)))))
5113 	return &SET_SRC (x);
5114 
5115       /* See if we can split SET_SRC as it stands.  */
5116       split = find_split_point (&SET_SRC (x), insn, true);
5117       if (split && split != &SET_SRC (x))
5118 	return split;
5119 
5120       /* See if we can split SET_DEST as it stands.  */
5121       split = find_split_point (&SET_DEST (x), insn, false);
5122       if (split && split != &SET_DEST (x))
5123 	return split;
5124 
5125       /* See if this is a bitfield assignment with everything constant.  If
5126 	 so, this is an IOR of an AND, so split it into that.  */
5127       if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
5128 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (SET_DEST (x), 0)),
5129 				     &inner_mode)
5130 	  && HWI_COMPUTABLE_MODE_P (inner_mode)
5131 	  && CONST_INT_P (XEXP (SET_DEST (x), 1))
5132 	  && CONST_INT_P (XEXP (SET_DEST (x), 2))
5133 	  && CONST_INT_P (SET_SRC (x))
5134 	  && ((INTVAL (XEXP (SET_DEST (x), 1))
5135 	       + INTVAL (XEXP (SET_DEST (x), 2)))
5136 	      <= GET_MODE_PRECISION (inner_mode))
5137 	  && ! side_effects_p (XEXP (SET_DEST (x), 0)))
5138 	{
5139 	  HOST_WIDE_INT pos = INTVAL (XEXP (SET_DEST (x), 2));
5140 	  unsigned HOST_WIDE_INT len = INTVAL (XEXP (SET_DEST (x), 1));
5141 	  unsigned HOST_WIDE_INT src = INTVAL (SET_SRC (x));
5142 	  rtx dest = XEXP (SET_DEST (x), 0);
5143 	  unsigned HOST_WIDE_INT mask
5144 	    = (HOST_WIDE_INT_1U << len) - 1;
5145 	  rtx or_mask;
5146 
5147 	  if (BITS_BIG_ENDIAN)
5148 	    pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5149 
5150 	  or_mask = gen_int_mode (src << pos, inner_mode);
5151 	  if (src == mask)
5152 	    SUBST (SET_SRC (x),
5153 		   simplify_gen_binary (IOR, inner_mode, dest, or_mask));
5154 	  else
5155 	    {
5156 	      rtx negmask = gen_int_mode (~(mask << pos), inner_mode);
5157 	      SUBST (SET_SRC (x),
5158 		     simplify_gen_binary (IOR, inner_mode,
5159 					  simplify_gen_binary (AND, inner_mode,
5160 							       dest, negmask),
5161 					  or_mask));
5162 	    }
5163 
5164 	  SUBST (SET_DEST (x), dest);
5165 
5166 	  split = find_split_point (&SET_SRC (x), insn, true);
5167 	  if (split && split != &SET_SRC (x))
5168 	    return split;
5169 	}
5170 
5171       /* Otherwise, see if this is an operation that we can split into two.
5172 	 If so, try to split that.  */
5173       code = GET_CODE (SET_SRC (x));
5174 
5175       switch (code)
5176 	{
5177 	case AND:
5178 	  /* If we are AND'ing with a large constant that is only a single
5179 	     bit and the result is only being used in a context where we
5180 	     need to know if it is zero or nonzero, replace it with a bit
5181 	     extraction.  This will avoid the large constant, which might
5182 	     have taken more than one insn to make.  If the constant were
5183 	     not a valid argument to the AND but took only one insn to make,
5184 	     this is no worse, but if it took more than one insn, it will
5185 	     be better.  */
5186 
5187 	  if (CONST_INT_P (XEXP (SET_SRC (x), 1))
5188 	      && REG_P (XEXP (SET_SRC (x), 0))
5189 	      && (pos = exact_log2 (UINTVAL (XEXP (SET_SRC (x), 1)))) >= 7
5190 	      && REG_P (SET_DEST (x))
5191 	      && (split = find_single_use (SET_DEST (x), insn, NULL)) != 0
5192 	      && (GET_CODE (*split) == EQ || GET_CODE (*split) == NE)
5193 	      && XEXP (*split, 0) == SET_DEST (x)
5194 	      && XEXP (*split, 1) == const0_rtx)
5195 	    {
5196 	      rtx extraction = make_extraction (GET_MODE (SET_DEST (x)),
5197 						XEXP (SET_SRC (x), 0),
5198 						pos, NULL_RTX, 1, 1, 0, 0);
5199 	      if (extraction != 0)
5200 		{
5201 		  SUBST (SET_SRC (x), extraction);
5202 		  return find_split_point (loc, insn, false);
5203 		}
5204 	    }
5205 	  break;
5206 
5207 	case NE:
5208 	  /* If STORE_FLAG_VALUE is -1, this is (NE X 0) and only one bit of X
5209 	     is known to be on, this can be converted into a NEG of a shift.  */
5210 	  if (STORE_FLAG_VALUE == -1 && XEXP (SET_SRC (x), 1) == const0_rtx
5211 	      && GET_MODE (SET_SRC (x)) == GET_MODE (XEXP (SET_SRC (x), 0))
5212 	      && ((pos = exact_log2 (nonzero_bits (XEXP (SET_SRC (x), 0),
5213 						   GET_MODE (XEXP (SET_SRC (x),
5214 							     0))))) >= 1))
5215 	    {
5216 	      machine_mode mode = GET_MODE (XEXP (SET_SRC (x), 0));
5217 	      rtx pos_rtx = gen_int_shift_amount (mode, pos);
5218 	      SUBST (SET_SRC (x),
5219 		     gen_rtx_NEG (mode,
5220 				  gen_rtx_LSHIFTRT (mode,
5221 						    XEXP (SET_SRC (x), 0),
5222 						    pos_rtx)));
5223 
5224 	      split = find_split_point (&SET_SRC (x), insn, true);
5225 	      if (split && split != &SET_SRC (x))
5226 		return split;
5227 	    }
5228 	  break;
5229 
5230 	case SIGN_EXTEND:
5231 	  inner = XEXP (SET_SRC (x), 0);
5232 
5233 	  /* We can't optimize if either mode is a partial integer
5234 	     mode as we don't know how many bits are significant
5235 	     in those modes.  */
5236 	  if (!is_int_mode (GET_MODE (inner), &inner_mode)
5237 	      || GET_MODE_CLASS (GET_MODE (SET_SRC (x))) == MODE_PARTIAL_INT)
5238 	    break;
5239 
5240 	  pos = 0;
5241 	  len = GET_MODE_PRECISION (inner_mode);
5242 	  unsignedp = 0;
5243 	  break;
5244 
5245 	case SIGN_EXTRACT:
5246 	case ZERO_EXTRACT:
5247 	  if (is_a <scalar_int_mode> (GET_MODE (XEXP (SET_SRC (x), 0)),
5248 				      &inner_mode)
5249 	      && CONST_INT_P (XEXP (SET_SRC (x), 1))
5250 	      && CONST_INT_P (XEXP (SET_SRC (x), 2)))
5251 	    {
5252 	      inner = XEXP (SET_SRC (x), 0);
5253 	      len = INTVAL (XEXP (SET_SRC (x), 1));
5254 	      pos = INTVAL (XEXP (SET_SRC (x), 2));
5255 
5256 	      if (BITS_BIG_ENDIAN)
5257 		pos = GET_MODE_PRECISION (inner_mode) - len - pos;
5258 	      unsignedp = (code == ZERO_EXTRACT);
5259 	    }
5260 	  break;
5261 
5262 	default:
5263 	  break;
5264 	}
5265 
5266       if (len
5267 	  && known_subrange_p (pos, len,
5268 			       0, GET_MODE_PRECISION (GET_MODE (inner)))
5269 	  && is_a <scalar_int_mode> (GET_MODE (SET_SRC (x)), &mode))
5270 	{
5271 	  /* For unsigned, we have a choice of a shift followed by an
5272 	     AND or two shifts.  Use two shifts for field sizes where the
5273 	     constant might be too large.  We assume here that we can
5274 	     always at least get 8-bit constants in an AND insn, which is
5275 	     true for every current RISC.  */
5276 
5277 	  if (unsignedp && len <= 8)
5278 	    {
5279 	      unsigned HOST_WIDE_INT mask
5280 		= (HOST_WIDE_INT_1U << len) - 1;
5281 	      rtx pos_rtx = gen_int_shift_amount (mode, pos);
5282 	      SUBST (SET_SRC (x),
5283 		     gen_rtx_AND (mode,
5284 				  gen_rtx_LSHIFTRT
5285 				  (mode, gen_lowpart (mode, inner), pos_rtx),
5286 				  gen_int_mode (mask, mode)));
5287 
5288 	      split = find_split_point (&SET_SRC (x), insn, true);
5289 	      if (split && split != &SET_SRC (x))
5290 		return split;
5291 	    }
5292 	  else
5293 	    {
5294 	      int left_bits = GET_MODE_PRECISION (mode) - len - pos;
5295 	      int right_bits = GET_MODE_PRECISION (mode) - len;
5296 	      SUBST (SET_SRC (x),
5297 		     gen_rtx_fmt_ee
5298 		     (unsignedp ? LSHIFTRT : ASHIFTRT, mode,
5299 		      gen_rtx_ASHIFT (mode,
5300 				      gen_lowpart (mode, inner),
5301 				      gen_int_shift_amount (mode, left_bits)),
5302 		      gen_int_shift_amount (mode, right_bits)));
5303 
5304 	      split = find_split_point (&SET_SRC (x), insn, true);
5305 	      if (split && split != &SET_SRC (x))
5306 		return split;
5307 	    }
5308 	}
5309 
5310       /* See if this is a simple operation with a constant as the second
5311 	 operand.  It might be that this constant is out of range and hence
5312 	 could be used as a split point.  */
5313       if (BINARY_P (SET_SRC (x))
5314 	  && CONSTANT_P (XEXP (SET_SRC (x), 1))
5315 	  && (OBJECT_P (XEXP (SET_SRC (x), 0))
5316 	      || (GET_CODE (XEXP (SET_SRC (x), 0)) == SUBREG
5317 		  && OBJECT_P (SUBREG_REG (XEXP (SET_SRC (x), 0))))))
5318 	return &XEXP (SET_SRC (x), 1);
5319 
5320       /* Finally, see if this is a simple operation with its first operand
5321 	 not in a register.  The operation might require this operand in a
5322 	 register, so return it as a split point.  We can always do this
5323 	 because if the first operand were another operation, we would have
5324 	 already found it as a split point.  */
5325       if ((BINARY_P (SET_SRC (x)) || UNARY_P (SET_SRC (x)))
5326 	  && ! register_operand (XEXP (SET_SRC (x), 0), VOIDmode))
5327 	return &XEXP (SET_SRC (x), 0);
5328 
5329       return 0;
5330 
5331     case AND:
5332     case IOR:
5333       /* We write NOR as (and (not A) (not B)), but if we don't have a NOR,
5334 	 it is better to write this as (not (ior A B)) so we can split it.
5335 	 Similarly for IOR.  */
5336       if (GET_CODE (XEXP (x, 0)) == NOT && GET_CODE (XEXP (x, 1)) == NOT)
5337 	{
5338 	  SUBST (*loc,
5339 		 gen_rtx_NOT (GET_MODE (x),
5340 			      gen_rtx_fmt_ee (code == IOR ? AND : IOR,
5341 					      GET_MODE (x),
5342 					      XEXP (XEXP (x, 0), 0),
5343 					      XEXP (XEXP (x, 1), 0))));
5344 	  return find_split_point (loc, insn, set_src);
5345 	}
5346 
5347       /* Many RISC machines have a large set of logical insns.  If the
5348 	 second operand is a NOT, put it first so we will try to split the
5349 	 other operand first.  */
5350       if (GET_CODE (XEXP (x, 1)) == NOT)
5351 	{
5352 	  rtx tem = XEXP (x, 0);
5353 	  SUBST (XEXP (x, 0), XEXP (x, 1));
5354 	  SUBST (XEXP (x, 1), tem);
5355 	}
5356       break;
5357 
5358     case PLUS:
5359     case MINUS:
5360       /* Canonicalization can produce (minus A (mult B C)), where C is a
5361 	 constant.  It may be better to try splitting (plus (mult B -C) A)
5362 	 instead if this isn't a multiply by a power of two.  */
5363       if (set_src && code == MINUS && GET_CODE (XEXP (x, 1)) == MULT
5364 	  && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
5365 	  && !pow2p_hwi (INTVAL (XEXP (XEXP (x, 1), 1))))
5366 	{
5367 	  machine_mode mode = GET_MODE (x);
5368 	  unsigned HOST_WIDE_INT this_int = INTVAL (XEXP (XEXP (x, 1), 1));
5369 	  HOST_WIDE_INT other_int = trunc_int_for_mode (-this_int, mode);
5370 	  SUBST (*loc, gen_rtx_PLUS (mode,
5371 				     gen_rtx_MULT (mode,
5372 						   XEXP (XEXP (x, 1), 0),
5373 						   gen_int_mode (other_int,
5374 								 mode)),
5375 				     XEXP (x, 0)));
5376 	  return find_split_point (loc, insn, set_src);
5377 	}
5378 
5379       /* Split at a multiply-accumulate instruction.  However if this is
5380          the SET_SRC, we likely do not have such an instruction and it's
5381          worthless to try this split.  */
5382       if (!set_src
5383 	  && (GET_CODE (XEXP (x, 0)) == MULT
5384 	      || (GET_CODE (XEXP (x, 0)) == ASHIFT
5385 		  && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
5386         return loc;
5387 
5388     default:
5389       break;
5390     }
5391 
5392   /* Otherwise, select our actions depending on our rtx class.  */
5393   switch (GET_RTX_CLASS (code))
5394     {
5395     case RTX_BITFIELD_OPS:		/* This is ZERO_EXTRACT and SIGN_EXTRACT.  */
5396     case RTX_TERNARY:
5397       split = find_split_point (&XEXP (x, 2), insn, false);
5398       if (split)
5399 	return split;
5400       /* fall through */
5401     case RTX_BIN_ARITH:
5402     case RTX_COMM_ARITH:
5403     case RTX_COMPARE:
5404     case RTX_COMM_COMPARE:
5405       split = find_split_point (&XEXP (x, 1), insn, false);
5406       if (split)
5407 	return split;
5408       /* fall through */
5409     case RTX_UNARY:
5410       /* Some machines have (and (shift ...) ...) insns.  If X is not
5411 	 an AND, but XEXP (X, 0) is, use it as our split point.  */
5412       if (GET_CODE (x) != AND && GET_CODE (XEXP (x, 0)) == AND)
5413 	return &XEXP (x, 0);
5414 
5415       split = find_split_point (&XEXP (x, 0), insn, false);
5416       if (split)
5417 	return split;
5418       return loc;
5419 
5420     default:
5421       /* Otherwise, we don't have a split point.  */
5422       return 0;
5423     }
5424 }
5425 
5426 /* Throughout X, replace FROM with TO, and return the result.
5427    The result is TO if X is FROM;
5428    otherwise the result is X, but its contents may have been modified.
5429    If they were modified, a record was made in undobuf so that
5430    undo_all will (among other things) return X to its original state.
5431 
5432    If the number of changes necessary is too much to record to undo,
5433    the excess changes are not made, so the result is invalid.
5434    The changes already made can still be undone.
5435    undobuf.num_undo is incremented for such changes, so by testing that
5436    the caller can tell whether the result is valid.
5437 
5438    `n_occurrences' is incremented each time FROM is replaced.
5439 
5440    IN_DEST is nonzero if we are processing the SET_DEST of a SET.
5441 
5442    IN_COND is nonzero if we are at the top level of a condition.
5443 
5444    UNIQUE_COPY is nonzero if each substitution must be unique.  We do this
5445    by copying if `n_occurrences' is nonzero.  */
5446 
5447 static rtx
subst(rtx x,rtx from,rtx to,int in_dest,int in_cond,int unique_copy)5448 subst (rtx x, rtx from, rtx to, int in_dest, int in_cond, int unique_copy)
5449 {
5450   enum rtx_code code = GET_CODE (x);
5451   machine_mode op0_mode = VOIDmode;
5452   const char *fmt;
5453   int len, i;
5454   rtx new_rtx;
5455 
5456 /* Two expressions are equal if they are identical copies of a shared
5457    RTX or if they are both registers with the same register number
5458    and mode.  */
5459 
5460 #define COMBINE_RTX_EQUAL_P(X,Y)			\
5461   ((X) == (Y)						\
5462    || (REG_P (X) && REG_P (Y)	\
5463        && REGNO (X) == REGNO (Y) && GET_MODE (X) == GET_MODE (Y)))
5464 
5465   /* Do not substitute into clobbers of regs -- this will never result in
5466      valid RTL.  */
5467   if (GET_CODE (x) == CLOBBER && REG_P (XEXP (x, 0)))
5468     return x;
5469 
5470   if (! in_dest && COMBINE_RTX_EQUAL_P (x, from))
5471     {
5472       n_occurrences++;
5473       return (unique_copy && n_occurrences > 1 ? copy_rtx (to) : to);
5474     }
5475 
5476   /* If X and FROM are the same register but different modes, they
5477      will not have been seen as equal above.  However, the log links code
5478      will make a LOG_LINKS entry for that case.  If we do nothing, we
5479      will try to rerecognize our original insn and, when it succeeds,
5480      we will delete the feeding insn, which is incorrect.
5481 
5482      So force this insn not to match in this (rare) case.  */
5483   if (! in_dest && code == REG && REG_P (from)
5484       && reg_overlap_mentioned_p (x, from))
5485     return gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
5486 
5487   /* If this is an object, we are done unless it is a MEM or LO_SUM, both
5488      of which may contain things that can be combined.  */
5489   if (code != MEM && code != LO_SUM && OBJECT_P (x))
5490     return x;
5491 
5492   /* It is possible to have a subexpression appear twice in the insn.
5493      Suppose that FROM is a register that appears within TO.
5494      Then, after that subexpression has been scanned once by `subst',
5495      the second time it is scanned, TO may be found.  If we were
5496      to scan TO here, we would find FROM within it and create a
5497      self-referent rtl structure which is completely wrong.  */
5498   if (COMBINE_RTX_EQUAL_P (x, to))
5499     return to;
5500 
5501   /* Parallel asm_operands need special attention because all of the
5502      inputs are shared across the arms.  Furthermore, unsharing the
5503      rtl results in recognition failures.  Failure to handle this case
5504      specially can result in circular rtl.
5505 
5506      Solve this by doing a normal pass across the first entry of the
5507      parallel, and only processing the SET_DESTs of the subsequent
5508      entries.  Ug.  */
5509 
5510   if (code == PARALLEL
5511       && GET_CODE (XVECEXP (x, 0, 0)) == SET
5512       && GET_CODE (SET_SRC (XVECEXP (x, 0, 0))) == ASM_OPERANDS)
5513     {
5514       new_rtx = subst (XVECEXP (x, 0, 0), from, to, 0, 0, unique_copy);
5515 
5516       /* If this substitution failed, this whole thing fails.  */
5517       if (GET_CODE (new_rtx) == CLOBBER
5518 	  && XEXP (new_rtx, 0) == const0_rtx)
5519 	return new_rtx;
5520 
5521       SUBST (XVECEXP (x, 0, 0), new_rtx);
5522 
5523       for (i = XVECLEN (x, 0) - 1; i >= 1; i--)
5524 	{
5525 	  rtx dest = SET_DEST (XVECEXP (x, 0, i));
5526 
5527 	  if (!REG_P (dest)
5528 	      && GET_CODE (dest) != CC0
5529 	      && GET_CODE (dest) != PC)
5530 	    {
5531 	      new_rtx = subst (dest, from, to, 0, 0, unique_copy);
5532 
5533 	      /* If this substitution failed, this whole thing fails.  */
5534 	      if (GET_CODE (new_rtx) == CLOBBER
5535 		  && XEXP (new_rtx, 0) == const0_rtx)
5536 		return new_rtx;
5537 
5538 	      SUBST (SET_DEST (XVECEXP (x, 0, i)), new_rtx);
5539 	    }
5540 	}
5541     }
5542   else
5543     {
5544       len = GET_RTX_LENGTH (code);
5545       fmt = GET_RTX_FORMAT (code);
5546 
5547       /* We don't need to process a SET_DEST that is a register, CC0,
5548 	 or PC, so set up to skip this common case.  All other cases
5549 	 where we want to suppress replacing something inside a
5550 	 SET_SRC are handled via the IN_DEST operand.  */
5551       if (code == SET
5552 	  && (REG_P (SET_DEST (x))
5553 	      || GET_CODE (SET_DEST (x)) == CC0
5554 	      || GET_CODE (SET_DEST (x)) == PC))
5555 	fmt = "ie";
5556 
5557       /* Trying to simplify the operands of a widening MULT is not likely
5558 	 to create RTL matching a machine insn.  */
5559       if (code == MULT
5560 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5561 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
5562 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
5563 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
5564 	  && REG_P (XEXP (XEXP (x, 0), 0))
5565 	  && REG_P (XEXP (XEXP (x, 1), 0))
5566 	  && from == to)
5567 	return x;
5568 
5569 
5570       /* Get the mode of operand 0 in case X is now a SIGN_EXTEND of a
5571 	 constant.  */
5572       if (fmt[0] == 'e')
5573 	op0_mode = GET_MODE (XEXP (x, 0));
5574 
5575       for (i = 0; i < len; i++)
5576 	{
5577 	  if (fmt[i] == 'E')
5578 	    {
5579 	      int j;
5580 	      for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5581 		{
5582 		  if (COMBINE_RTX_EQUAL_P (XVECEXP (x, i, j), from))
5583 		    {
5584 		      new_rtx = (unique_copy && n_occurrences
5585 			     ? copy_rtx (to) : to);
5586 		      n_occurrences++;
5587 		    }
5588 		  else
5589 		    {
5590 		      new_rtx = subst (XVECEXP (x, i, j), from, to, 0, 0,
5591 				       unique_copy);
5592 
5593 		      /* If this substitution failed, this whole thing
5594 			 fails.  */
5595 		      if (GET_CODE (new_rtx) == CLOBBER
5596 			  && XEXP (new_rtx, 0) == const0_rtx)
5597 			return new_rtx;
5598 		    }
5599 
5600 		  SUBST (XVECEXP (x, i, j), new_rtx);
5601 		}
5602 	    }
5603 	  else if (fmt[i] == 'e')
5604 	    {
5605 	      /* If this is a register being set, ignore it.  */
5606 	      new_rtx = XEXP (x, i);
5607 	      if (in_dest
5608 		  && i == 0
5609 		  && (((code == SUBREG || code == ZERO_EXTRACT)
5610 		       && REG_P (new_rtx))
5611 		      || code == STRICT_LOW_PART))
5612 		;
5613 
5614 	      else if (COMBINE_RTX_EQUAL_P (XEXP (x, i), from))
5615 		{
5616 		  /* In general, don't install a subreg involving two
5617 		     modes not tieable.  It can worsen register
5618 		     allocation, and can even make invalid reload
5619 		     insns, since the reg inside may need to be copied
5620 		     from in the outside mode, and that may be invalid
5621 		     if it is an fp reg copied in integer mode.
5622 
5623 		     We allow two exceptions to this: It is valid if
5624 		     it is inside another SUBREG and the mode of that
5625 		     SUBREG and the mode of the inside of TO is
5626 		     tieable and it is valid if X is a SET that copies
5627 		     FROM to CC0.  */
5628 
5629 		  if (GET_CODE (to) == SUBREG
5630 		      && !targetm.modes_tieable_p (GET_MODE (to),
5631 						   GET_MODE (SUBREG_REG (to)))
5632 		      && ! (code == SUBREG
5633 			    && (targetm.modes_tieable_p
5634 				(GET_MODE (x), GET_MODE (SUBREG_REG (to)))))
5635 		      && (!HAVE_cc0
5636 			  || (! (code == SET
5637 				 && i == 1
5638 				 && XEXP (x, 0) == cc0_rtx))))
5639 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5640 
5641 		  if (code == SUBREG
5642 		      && REG_P (to)
5643 		      && REGNO (to) < FIRST_PSEUDO_REGISTER
5644 		      && simplify_subreg_regno (REGNO (to), GET_MODE (to),
5645 						SUBREG_BYTE (x),
5646 						GET_MODE (x)) < 0)
5647 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5648 
5649 		  new_rtx = (unique_copy && n_occurrences ? copy_rtx (to) : to);
5650 		  n_occurrences++;
5651 		}
5652 	      else
5653 		/* If we are in a SET_DEST, suppress most cases unless we
5654 		   have gone inside a MEM, in which case we want to
5655 		   simplify the address.  We assume here that things that
5656 		   are actually part of the destination have their inner
5657 		   parts in the first expression.  This is true for SUBREG,
5658 		   STRICT_LOW_PART, and ZERO_EXTRACT, which are the only
5659 		   things aside from REG and MEM that should appear in a
5660 		   SET_DEST.  */
5661 		new_rtx = subst (XEXP (x, i), from, to,
5662 			     (((in_dest
5663 				&& (code == SUBREG || code == STRICT_LOW_PART
5664 				    || code == ZERO_EXTRACT))
5665 			       || code == SET)
5666 			      && i == 0),
5667 				 code == IF_THEN_ELSE && i == 0,
5668 				 unique_copy);
5669 
5670 	      /* If we found that we will have to reject this combination,
5671 		 indicate that by returning the CLOBBER ourselves, rather than
5672 		 an expression containing it.  This will speed things up as
5673 		 well as prevent accidents where two CLOBBERs are considered
5674 		 to be equal, thus producing an incorrect simplification.  */
5675 
5676 	      if (GET_CODE (new_rtx) == CLOBBER && XEXP (new_rtx, 0) == const0_rtx)
5677 		return new_rtx;
5678 
5679 	      if (GET_CODE (x) == SUBREG && CONST_SCALAR_INT_P (new_rtx))
5680 		{
5681 		  machine_mode mode = GET_MODE (x);
5682 
5683 		  x = simplify_subreg (GET_MODE (x), new_rtx,
5684 				       GET_MODE (SUBREG_REG (x)),
5685 				       SUBREG_BYTE (x));
5686 		  if (! x)
5687 		    x = gen_rtx_CLOBBER (mode, const0_rtx);
5688 		}
5689 	      else if (CONST_SCALAR_INT_P (new_rtx)
5690 		       && (GET_CODE (x) == ZERO_EXTEND
5691 			   || GET_CODE (x) == FLOAT
5692 			   || GET_CODE (x) == UNSIGNED_FLOAT))
5693 		{
5694 		  x = simplify_unary_operation (GET_CODE (x), GET_MODE (x),
5695 						new_rtx,
5696 						GET_MODE (XEXP (x, 0)));
5697 		  if (!x)
5698 		    return gen_rtx_CLOBBER (VOIDmode, const0_rtx);
5699 		}
5700 	      else
5701 		SUBST (XEXP (x, i), new_rtx);
5702 	    }
5703 	}
5704     }
5705 
5706   /* Check if we are loading something from the constant pool via float
5707      extension; in this case we would undo compress_float_constant
5708      optimization and degenerate constant load to an immediate value.  */
5709   if (GET_CODE (x) == FLOAT_EXTEND
5710       && MEM_P (XEXP (x, 0))
5711       && MEM_READONLY_P (XEXP (x, 0)))
5712     {
5713       rtx tmp = avoid_constant_pool_reference (x);
5714       if (x != tmp)
5715         return x;
5716     }
5717 
5718   /* Try to simplify X.  If the simplification changed the code, it is likely
5719      that further simplification will help, so loop, but limit the number
5720      of repetitions that will be performed.  */
5721 
5722   for (i = 0; i < 4; i++)
5723     {
5724       /* If X is sufficiently simple, don't bother trying to do anything
5725 	 with it.  */
5726       if (code != CONST_INT && code != REG && code != CLOBBER)
5727 	x = combine_simplify_rtx (x, op0_mode, in_dest, in_cond);
5728 
5729       if (GET_CODE (x) == code)
5730 	break;
5731 
5732       code = GET_CODE (x);
5733 
5734       /* We no longer know the original mode of operand 0 since we
5735 	 have changed the form of X)  */
5736       op0_mode = VOIDmode;
5737     }
5738 
5739   return x;
5740 }
5741 
5742 /* If X is a commutative operation whose operands are not in the canonical
5743    order, use substitutions to swap them.  */
5744 
5745 static void
maybe_swap_commutative_operands(rtx x)5746 maybe_swap_commutative_operands (rtx x)
5747 {
5748   if (COMMUTATIVE_ARITH_P (x)
5749       && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
5750     {
5751       rtx temp = XEXP (x, 0);
5752       SUBST (XEXP (x, 0), XEXP (x, 1));
5753       SUBST (XEXP (x, 1), temp);
5754     }
5755 }
5756 
5757 /* Simplify X, a piece of RTL.  We just operate on the expression at the
5758    outer level; call `subst' to simplify recursively.  Return the new
5759    expression.
5760 
5761    OP0_MODE is the original mode of XEXP (x, 0).  IN_DEST is nonzero
5762    if we are inside a SET_DEST.  IN_COND is nonzero if we are at the top level
5763    of a condition.  */
5764 
5765 static rtx
combine_simplify_rtx(rtx x,machine_mode op0_mode,int in_dest,int in_cond)5766 combine_simplify_rtx (rtx x, machine_mode op0_mode, int in_dest,
5767 		      int in_cond)
5768 {
5769   enum rtx_code code = GET_CODE (x);
5770   machine_mode mode = GET_MODE (x);
5771   scalar_int_mode int_mode;
5772   rtx temp;
5773   int i;
5774 
5775   /* If this is a commutative operation, put a constant last and a complex
5776      expression first.  We don't need to do this for comparisons here.  */
5777   maybe_swap_commutative_operands (x);
5778 
5779   /* Try to fold this expression in case we have constants that weren't
5780      present before.  */
5781   temp = 0;
5782   switch (GET_RTX_CLASS (code))
5783     {
5784     case RTX_UNARY:
5785       if (op0_mode == VOIDmode)
5786 	op0_mode = GET_MODE (XEXP (x, 0));
5787       temp = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
5788       break;
5789     case RTX_COMPARE:
5790     case RTX_COMM_COMPARE:
5791       {
5792 	machine_mode cmp_mode = GET_MODE (XEXP (x, 0));
5793 	if (cmp_mode == VOIDmode)
5794 	  {
5795 	    cmp_mode = GET_MODE (XEXP (x, 1));
5796 	    if (cmp_mode == VOIDmode)
5797 	      cmp_mode = op0_mode;
5798 	  }
5799 	temp = simplify_relational_operation (code, mode, cmp_mode,
5800 					      XEXP (x, 0), XEXP (x, 1));
5801       }
5802       break;
5803     case RTX_COMM_ARITH:
5804     case RTX_BIN_ARITH:
5805       temp = simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
5806       break;
5807     case RTX_BITFIELD_OPS:
5808     case RTX_TERNARY:
5809       temp = simplify_ternary_operation (code, mode, op0_mode, XEXP (x, 0),
5810 					 XEXP (x, 1), XEXP (x, 2));
5811       break;
5812     default:
5813       break;
5814     }
5815 
5816   if (temp)
5817     {
5818       x = temp;
5819       code = GET_CODE (temp);
5820       op0_mode = VOIDmode;
5821       mode = GET_MODE (temp);
5822     }
5823 
5824   /* If this is a simple operation applied to an IF_THEN_ELSE, try
5825      applying it to the arms of the IF_THEN_ELSE.  This often simplifies
5826      things.  Check for cases where both arms are testing the same
5827      condition.
5828 
5829      Don't do anything if all operands are very simple.  */
5830 
5831   if ((BINARY_P (x)
5832        && ((!OBJECT_P (XEXP (x, 0))
5833 	    && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5834 		  && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))
5835 	   || (!OBJECT_P (XEXP (x, 1))
5836 	       && ! (GET_CODE (XEXP (x, 1)) == SUBREG
5837 		     && OBJECT_P (SUBREG_REG (XEXP (x, 1)))))))
5838       || (UNARY_P (x)
5839 	  && (!OBJECT_P (XEXP (x, 0))
5840 	       && ! (GET_CODE (XEXP (x, 0)) == SUBREG
5841 		     && OBJECT_P (SUBREG_REG (XEXP (x, 0)))))))
5842     {
5843       rtx cond, true_rtx, false_rtx;
5844 
5845       cond = if_then_else_cond (x, &true_rtx, &false_rtx);
5846       if (cond != 0
5847 	  /* If everything is a comparison, what we have is highly unlikely
5848 	     to be simpler, so don't use it.  */
5849 	  && ! (COMPARISON_P (x)
5850 		&& (COMPARISON_P (true_rtx) || COMPARISON_P (false_rtx)))
5851 	  /* Similarly, if we end up with one of the expressions the same
5852 	     as the original, it is certainly not simpler.  */
5853 	  && ! rtx_equal_p (x, true_rtx)
5854 	  && ! rtx_equal_p (x, false_rtx))
5855 	{
5856 	  rtx cop1 = const0_rtx;
5857 	  enum rtx_code cond_code = simplify_comparison (NE, &cond, &cop1);
5858 
5859 	  if (cond_code == NE && COMPARISON_P (cond))
5860 	    return x;
5861 
5862 	  /* Simplify the alternative arms; this may collapse the true and
5863 	     false arms to store-flag values.  Be careful to use copy_rtx
5864 	     here since true_rtx or false_rtx might share RTL with x as a
5865 	     result of the if_then_else_cond call above.  */
5866 	  true_rtx = subst (copy_rtx (true_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5867 	  false_rtx = subst (copy_rtx (false_rtx), pc_rtx, pc_rtx, 0, 0, 0);
5868 
5869 	  /* If true_rtx and false_rtx are not general_operands, an if_then_else
5870 	     is unlikely to be simpler.  */
5871 	  if (general_operand (true_rtx, VOIDmode)
5872 	      && general_operand (false_rtx, VOIDmode))
5873 	    {
5874 	      enum rtx_code reversed;
5875 
5876 	      /* Restarting if we generate a store-flag expression will cause
5877 		 us to loop.  Just drop through in this case.  */
5878 
5879 	      /* If the result values are STORE_FLAG_VALUE and zero, we can
5880 		 just make the comparison operation.  */
5881 	      if (true_rtx == const_true_rtx && false_rtx == const0_rtx)
5882 		x = simplify_gen_relational (cond_code, mode, VOIDmode,
5883 					     cond, cop1);
5884 	      else if (true_rtx == const0_rtx && false_rtx == const_true_rtx
5885 		       && ((reversed = reversed_comparison_code_parts
5886 					(cond_code, cond, cop1, NULL))
5887 			   != UNKNOWN))
5888 		x = simplify_gen_relational (reversed, mode, VOIDmode,
5889 					     cond, cop1);
5890 
5891 	      /* Likewise, we can make the negate of a comparison operation
5892 		 if the result values are - STORE_FLAG_VALUE and zero.  */
5893 	      else if (CONST_INT_P (true_rtx)
5894 		       && INTVAL (true_rtx) == - STORE_FLAG_VALUE
5895 		       && false_rtx == const0_rtx)
5896 		x = simplify_gen_unary (NEG, mode,
5897 					simplify_gen_relational (cond_code,
5898 								 mode, VOIDmode,
5899 								 cond, cop1),
5900 					mode);
5901 	      else if (CONST_INT_P (false_rtx)
5902 		       && INTVAL (false_rtx) == - STORE_FLAG_VALUE
5903 		       && true_rtx == const0_rtx
5904 		       && ((reversed = reversed_comparison_code_parts
5905 					(cond_code, cond, cop1, NULL))
5906 			   != UNKNOWN))
5907 		x = simplify_gen_unary (NEG, mode,
5908 					simplify_gen_relational (reversed,
5909 								 mode, VOIDmode,
5910 								 cond, cop1),
5911 					mode);
5912 	      else
5913 		return gen_rtx_IF_THEN_ELSE (mode,
5914 					     simplify_gen_relational (cond_code,
5915 								      mode,
5916 								      VOIDmode,
5917 								      cond,
5918 								      cop1),
5919 					     true_rtx, false_rtx);
5920 
5921 	      code = GET_CODE (x);
5922 	      op0_mode = VOIDmode;
5923 	    }
5924 	}
5925     }
5926 
5927   /* First see if we can apply the inverse distributive law.  */
5928   if (code == PLUS || code == MINUS
5929       || code == AND || code == IOR || code == XOR)
5930     {
5931       x = apply_distributive_law (x);
5932       code = GET_CODE (x);
5933       op0_mode = VOIDmode;
5934     }
5935 
5936   /* If CODE is an associative operation not otherwise handled, see if we
5937      can associate some operands.  This can win if they are constants or
5938      if they are logically related (i.e. (a & b) & a).  */
5939   if ((code == PLUS || code == MINUS || code == MULT || code == DIV
5940        || code == AND || code == IOR || code == XOR
5941        || code == SMAX || code == SMIN || code == UMAX || code == UMIN)
5942       && ((INTEGRAL_MODE_P (mode) && code != DIV)
5943 	  || (flag_associative_math && FLOAT_MODE_P (mode))))
5944     {
5945       if (GET_CODE (XEXP (x, 0)) == code)
5946 	{
5947 	  rtx other = XEXP (XEXP (x, 0), 0);
5948 	  rtx inner_op0 = XEXP (XEXP (x, 0), 1);
5949 	  rtx inner_op1 = XEXP (x, 1);
5950 	  rtx inner;
5951 
5952 	  /* Make sure we pass the constant operand if any as the second
5953 	     one if this is a commutative operation.  */
5954 	  if (CONSTANT_P (inner_op0) && COMMUTATIVE_ARITH_P (x))
5955 	    std::swap (inner_op0, inner_op1);
5956 	  inner = simplify_binary_operation (code == MINUS ? PLUS
5957 					     : code == DIV ? MULT
5958 					     : code,
5959 					     mode, inner_op0, inner_op1);
5960 
5961 	  /* For commutative operations, try the other pair if that one
5962 	     didn't simplify.  */
5963 	  if (inner == 0 && COMMUTATIVE_ARITH_P (x))
5964 	    {
5965 	      other = XEXP (XEXP (x, 0), 1);
5966 	      inner = simplify_binary_operation (code, mode,
5967 						 XEXP (XEXP (x, 0), 0),
5968 						 XEXP (x, 1));
5969 	    }
5970 
5971 	  if (inner)
5972 	    return simplify_gen_binary (code, mode, other, inner);
5973 	}
5974     }
5975 
5976   /* A little bit of algebraic simplification here.  */
5977   switch (code)
5978     {
5979     case MEM:
5980       /* Ensure that our address has any ASHIFTs converted to MULT in case
5981 	 address-recognizing predicates are called later.  */
5982       temp = make_compound_operation (XEXP (x, 0), MEM);
5983       SUBST (XEXP (x, 0), temp);
5984       break;
5985 
5986     case SUBREG:
5987       if (op0_mode == VOIDmode)
5988 	op0_mode = GET_MODE (SUBREG_REG (x));
5989 
5990       /* See if this can be moved to simplify_subreg.  */
5991       if (CONSTANT_P (SUBREG_REG (x))
5992 	  && known_eq (subreg_lowpart_offset (mode, op0_mode), SUBREG_BYTE (x))
5993 	     /* Don't call gen_lowpart if the inner mode
5994 		is VOIDmode and we cannot simplify it, as SUBREG without
5995 		inner mode is invalid.  */
5996 	  && (GET_MODE (SUBREG_REG (x)) != VOIDmode
5997 	      || gen_lowpart_common (mode, SUBREG_REG (x))))
5998 	return gen_lowpart (mode, SUBREG_REG (x));
5999 
6000       if (GET_MODE_CLASS (GET_MODE (SUBREG_REG (x))) == MODE_CC)
6001 	break;
6002       {
6003 	rtx temp;
6004 	temp = simplify_subreg (mode, SUBREG_REG (x), op0_mode,
6005 				SUBREG_BYTE (x));
6006 	if (temp)
6007 	  return temp;
6008 
6009 	/* If op is known to have all lower bits zero, the result is zero.  */
6010 	scalar_int_mode int_mode, int_op0_mode;
6011 	if (!in_dest
6012 	    && is_a <scalar_int_mode> (mode, &int_mode)
6013 	    && is_a <scalar_int_mode> (op0_mode, &int_op0_mode)
6014 	    && (GET_MODE_PRECISION (int_mode)
6015 		< GET_MODE_PRECISION (int_op0_mode))
6016 	    && known_eq (subreg_lowpart_offset (int_mode, int_op0_mode),
6017 			 SUBREG_BYTE (x))
6018 	    && HWI_COMPUTABLE_MODE_P (int_op0_mode)
6019 	    && ((nonzero_bits (SUBREG_REG (x), int_op0_mode)
6020 		 & GET_MODE_MASK (int_mode)) == 0)
6021 	    && !side_effects_p (SUBREG_REG (x)))
6022 	  return CONST0_RTX (int_mode);
6023       }
6024 
6025       /* Don't change the mode of the MEM if that would change the meaning
6026 	 of the address.  */
6027       if (MEM_P (SUBREG_REG (x))
6028 	  && (MEM_VOLATILE_P (SUBREG_REG (x))
6029 	      || mode_dependent_address_p (XEXP (SUBREG_REG (x), 0),
6030 					   MEM_ADDR_SPACE (SUBREG_REG (x)))))
6031 	return gen_rtx_CLOBBER (mode, const0_rtx);
6032 
6033       /* Note that we cannot do any narrowing for non-constants since
6034 	 we might have been counting on using the fact that some bits were
6035 	 zero.  We now do this in the SET.  */
6036 
6037       break;
6038 
6039     case NEG:
6040       temp = expand_compound_operation (XEXP (x, 0));
6041 
6042       /* For C equal to the width of MODE minus 1, (neg (ashiftrt X C)) can be
6043 	 replaced by (lshiftrt X C).  This will convert
6044 	 (neg (sign_extract X 1 Y)) to (zero_extract X 1 Y).  */
6045 
6046       if (GET_CODE (temp) == ASHIFTRT
6047 	  && CONST_INT_P (XEXP (temp, 1))
6048 	  && INTVAL (XEXP (temp, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
6049 	return simplify_shift_const (NULL_RTX, LSHIFTRT, mode, XEXP (temp, 0),
6050 				     INTVAL (XEXP (temp, 1)));
6051 
6052       /* If X has only a single bit that might be nonzero, say, bit I, convert
6053 	 (neg X) to (ashiftrt (ashift X C-I) C-I) where C is the bitsize of
6054 	 MODE minus 1.  This will convert (neg (zero_extract X 1 Y)) to
6055 	 (sign_extract X 1 Y).  But only do this if TEMP isn't a register
6056 	 or a SUBREG of one since we'd be making the expression more
6057 	 complex if it was just a register.  */
6058 
6059       if (!REG_P (temp)
6060 	  && ! (GET_CODE (temp) == SUBREG
6061 		&& REG_P (SUBREG_REG (temp)))
6062 	  && is_a <scalar_int_mode> (mode, &int_mode)
6063 	  && (i = exact_log2 (nonzero_bits (temp, int_mode))) >= 0)
6064 	{
6065 	  rtx temp1 = simplify_shift_const
6066 	    (NULL_RTX, ASHIFTRT, int_mode,
6067 	     simplify_shift_const (NULL_RTX, ASHIFT, int_mode, temp,
6068 				   GET_MODE_PRECISION (int_mode) - 1 - i),
6069 	     GET_MODE_PRECISION (int_mode) - 1 - i);
6070 
6071 	  /* If all we did was surround TEMP with the two shifts, we
6072 	     haven't improved anything, so don't use it.  Otherwise,
6073 	     we are better off with TEMP1.  */
6074 	  if (GET_CODE (temp1) != ASHIFTRT
6075 	      || GET_CODE (XEXP (temp1, 0)) != ASHIFT
6076 	      || XEXP (XEXP (temp1, 0), 0) != temp)
6077 	    return temp1;
6078 	}
6079       break;
6080 
6081     case TRUNCATE:
6082       /* We can't handle truncation to a partial integer mode here
6083 	 because we don't know the real bitsize of the partial
6084 	 integer mode.  */
6085       if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
6086 	break;
6087 
6088       if (HWI_COMPUTABLE_MODE_P (mode))
6089 	SUBST (XEXP (x, 0),
6090 	       force_to_mode (XEXP (x, 0), GET_MODE (XEXP (x, 0)),
6091 			      GET_MODE_MASK (mode), 0));
6092 
6093       /* We can truncate a constant value and return it.  */
6094       {
6095 	poly_int64 c;
6096 	if (poly_int_rtx_p (XEXP (x, 0), &c))
6097 	  return gen_int_mode (c, mode);
6098       }
6099 
6100       /* Similarly to what we do in simplify-rtx.c, a truncate of a register
6101 	 whose value is a comparison can be replaced with a subreg if
6102 	 STORE_FLAG_VALUE permits.  */
6103       if (HWI_COMPUTABLE_MODE_P (mode)
6104 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
6105 	  && (temp = get_last_value (XEXP (x, 0)))
6106 	  && COMPARISON_P (temp))
6107 	return gen_lowpart (mode, XEXP (x, 0));
6108       break;
6109 
6110     case CONST:
6111       /* (const (const X)) can become (const X).  Do it this way rather than
6112 	 returning the inner CONST since CONST can be shared with a
6113 	 REG_EQUAL note.  */
6114       if (GET_CODE (XEXP (x, 0)) == CONST)
6115 	SUBST (XEXP (x, 0), XEXP (XEXP (x, 0), 0));
6116       break;
6117 
6118     case LO_SUM:
6119       /* Convert (lo_sum (high FOO) FOO) to FOO.  This is necessary so we
6120 	 can add in an offset.  find_split_point will split this address up
6121 	 again if it doesn't match.  */
6122       if (HAVE_lo_sum && GET_CODE (XEXP (x, 0)) == HIGH
6123 	  && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
6124 	return XEXP (x, 1);
6125       break;
6126 
6127     case PLUS:
6128       /* (plus (xor (and <foo> (const_int pow2 - 1)) <c>) <-c>)
6129 	 when c is (const_int (pow2 + 1) / 2) is a sign extension of a
6130 	 bit-field and can be replaced by either a sign_extend or a
6131 	 sign_extract.  The `and' may be a zero_extend and the two
6132 	 <c>, -<c> constants may be reversed.  */
6133       if (GET_CODE (XEXP (x, 0)) == XOR
6134 	  && is_a <scalar_int_mode> (mode, &int_mode)
6135 	  && CONST_INT_P (XEXP (x, 1))
6136 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
6137 	  && INTVAL (XEXP (x, 1)) == -INTVAL (XEXP (XEXP (x, 0), 1))
6138 	  && ((i = exact_log2 (UINTVAL (XEXP (XEXP (x, 0), 1)))) >= 0
6139 	      || (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0)
6140 	  && HWI_COMPUTABLE_MODE_P (int_mode)
6141 	  && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == AND
6142 	       && CONST_INT_P (XEXP (XEXP (XEXP (x, 0), 0), 1))
6143 	       && (UINTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1))
6144 		   == (HOST_WIDE_INT_1U << (i + 1)) - 1))
6145 	      || (GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
6146 		  && known_eq ((GET_MODE_PRECISION
6147 				(GET_MODE (XEXP (XEXP (XEXP (x, 0), 0), 0)))),
6148 			       (unsigned int) i + 1))))
6149 	return simplify_shift_const
6150 	  (NULL_RTX, ASHIFTRT, int_mode,
6151 	   simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6152 				 XEXP (XEXP (XEXP (x, 0), 0), 0),
6153 				 GET_MODE_PRECISION (int_mode) - (i + 1)),
6154 	   GET_MODE_PRECISION (int_mode) - (i + 1));
6155 
6156       /* If only the low-order bit of X is possibly nonzero, (plus x -1)
6157 	 can become (ashiftrt (ashift (xor x 1) C) C) where C is
6158 	 the bitsize of the mode - 1.  This allows simplification of
6159 	 "a = (b & 8) == 0;"  */
6160       if (XEXP (x, 1) == constm1_rtx
6161 	  && !REG_P (XEXP (x, 0))
6162 	  && ! (GET_CODE (XEXP (x, 0)) == SUBREG
6163 		&& REG_P (SUBREG_REG (XEXP (x, 0))))
6164 	  && is_a <scalar_int_mode> (mode, &int_mode)
6165 	  && nonzero_bits (XEXP (x, 0), int_mode) == 1)
6166 	return simplify_shift_const
6167 	  (NULL_RTX, ASHIFTRT, int_mode,
6168 	   simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6169 				 gen_rtx_XOR (int_mode, XEXP (x, 0),
6170 					      const1_rtx),
6171 				 GET_MODE_PRECISION (int_mode) - 1),
6172 	   GET_MODE_PRECISION (int_mode) - 1);
6173 
6174       /* If we are adding two things that have no bits in common, convert
6175 	 the addition into an IOR.  This will often be further simplified,
6176 	 for example in cases like ((a & 1) + (a & 2)), which can
6177 	 become a & 3.  */
6178 
6179       if (HWI_COMPUTABLE_MODE_P (mode)
6180 	  && (nonzero_bits (XEXP (x, 0), mode)
6181 	      & nonzero_bits (XEXP (x, 1), mode)) == 0)
6182 	{
6183 	  /* Try to simplify the expression further.  */
6184 	  rtx tor = simplify_gen_binary (IOR, mode, XEXP (x, 0), XEXP (x, 1));
6185 	  temp = combine_simplify_rtx (tor, VOIDmode, in_dest, 0);
6186 
6187 	  /* If we could, great.  If not, do not go ahead with the IOR
6188 	     replacement, since PLUS appears in many special purpose
6189 	     address arithmetic instructions.  */
6190 	  if (GET_CODE (temp) != CLOBBER
6191 	      && (GET_CODE (temp) != IOR
6192 		  || ((XEXP (temp, 0) != XEXP (x, 0)
6193 		       || XEXP (temp, 1) != XEXP (x, 1))
6194 		      && (XEXP (temp, 0) != XEXP (x, 1)
6195 			  || XEXP (temp, 1) != XEXP (x, 0)))))
6196 	    return temp;
6197 	}
6198 
6199       /* Canonicalize x + x into x << 1.  */
6200       if (GET_MODE_CLASS (mode) == MODE_INT
6201 	  && rtx_equal_p (XEXP (x, 0), XEXP (x, 1))
6202 	  && !side_effects_p (XEXP (x, 0)))
6203 	return simplify_gen_binary (ASHIFT, mode, XEXP (x, 0), const1_rtx);
6204 
6205       break;
6206 
6207     case MINUS:
6208       /* (minus <foo> (and <foo> (const_int -pow2))) becomes
6209 	 (and <foo> (const_int pow2-1))  */
6210       if (is_a <scalar_int_mode> (mode, &int_mode)
6211 	  && GET_CODE (XEXP (x, 1)) == AND
6212 	  && CONST_INT_P (XEXP (XEXP (x, 1), 1))
6213 	  && pow2p_hwi (-UINTVAL (XEXP (XEXP (x, 1), 1)))
6214 	  && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
6215 	return simplify_and_const_int (NULL_RTX, int_mode, XEXP (x, 0),
6216 				       -INTVAL (XEXP (XEXP (x, 1), 1)) - 1);
6217       break;
6218 
6219     case MULT:
6220       /* If we have (mult (plus A B) C), apply the distributive law and then
6221 	 the inverse distributive law to see if things simplify.  This
6222 	 occurs mostly in addresses, often when unrolling loops.  */
6223 
6224       if (GET_CODE (XEXP (x, 0)) == PLUS)
6225 	{
6226 	  rtx result = distribute_and_simplify_rtx (x, 0);
6227 	  if (result)
6228 	    return result;
6229 	}
6230 
6231       /* Try simplify a*(b/c) as (a*b)/c.  */
6232       if (FLOAT_MODE_P (mode) && flag_associative_math
6233 	  && GET_CODE (XEXP (x, 0)) == DIV)
6234 	{
6235 	  rtx tem = simplify_binary_operation (MULT, mode,
6236 					       XEXP (XEXP (x, 0), 0),
6237 					       XEXP (x, 1));
6238 	  if (tem)
6239 	    return simplify_gen_binary (DIV, mode, tem, XEXP (XEXP (x, 0), 1));
6240 	}
6241       break;
6242 
6243     case UDIV:
6244       /* If this is a divide by a power of two, treat it as a shift if
6245 	 its first operand is a shift.  */
6246       if (is_a <scalar_int_mode> (mode, &int_mode)
6247 	  && CONST_INT_P (XEXP (x, 1))
6248 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
6249 	  && (GET_CODE (XEXP (x, 0)) == ASHIFT
6250 	      || GET_CODE (XEXP (x, 0)) == LSHIFTRT
6251 	      || GET_CODE (XEXP (x, 0)) == ASHIFTRT
6252 	      || GET_CODE (XEXP (x, 0)) == ROTATE
6253 	      || GET_CODE (XEXP (x, 0)) == ROTATERT))
6254 	return simplify_shift_const (NULL_RTX, LSHIFTRT, int_mode,
6255 				     XEXP (x, 0), i);
6256       break;
6257 
6258     case EQ:  case NE:
6259     case GT:  case GTU:  case GE:  case GEU:
6260     case LT:  case LTU:  case LE:  case LEU:
6261     case UNEQ:  case LTGT:
6262     case UNGT:  case UNGE:
6263     case UNLT:  case UNLE:
6264     case UNORDERED: case ORDERED:
6265       /* If the first operand is a condition code, we can't do anything
6266 	 with it.  */
6267       if (GET_CODE (XEXP (x, 0)) == COMPARE
6268 	  || (GET_MODE_CLASS (GET_MODE (XEXP (x, 0))) != MODE_CC
6269 	      && ! CC0_P (XEXP (x, 0))))
6270 	{
6271 	  rtx op0 = XEXP (x, 0);
6272 	  rtx op1 = XEXP (x, 1);
6273 	  enum rtx_code new_code;
6274 
6275 	  if (GET_CODE (op0) == COMPARE)
6276 	    op1 = XEXP (op0, 1), op0 = XEXP (op0, 0);
6277 
6278 	  /* Simplify our comparison, if possible.  */
6279 	  new_code = simplify_comparison (code, &op0, &op1);
6280 
6281 	  /* If STORE_FLAG_VALUE is 1, we can convert (ne x 0) to simply X
6282 	     if only the low-order bit is possibly nonzero in X (such as when
6283 	     X is a ZERO_EXTRACT of one bit).  Similarly, we can convert EQ to
6284 	     (xor X 1) or (minus 1 X); we use the former.  Finally, if X is
6285 	     known to be either 0 or -1, NE becomes a NEG and EQ becomes
6286 	     (plus X 1).
6287 
6288 	     Remove any ZERO_EXTRACT we made when thinking this was a
6289 	     comparison.  It may now be simpler to use, e.g., an AND.  If a
6290 	     ZERO_EXTRACT is indeed appropriate, it will be placed back by
6291 	     the call to make_compound_operation in the SET case.
6292 
6293 	     Don't apply these optimizations if the caller would
6294 	     prefer a comparison rather than a value.
6295 	     E.g., for the condition in an IF_THEN_ELSE most targets need
6296 	     an explicit comparison.  */
6297 
6298 	  if (in_cond)
6299 	    ;
6300 
6301 	  else if (STORE_FLAG_VALUE == 1
6302 		   && new_code == NE
6303 		   && is_int_mode (mode, &int_mode)
6304 		   && op1 == const0_rtx
6305 		   && int_mode == GET_MODE (op0)
6306 		   && nonzero_bits (op0, int_mode) == 1)
6307 	    return gen_lowpart (int_mode,
6308 				expand_compound_operation (op0));
6309 
6310 	  else if (STORE_FLAG_VALUE == 1
6311 		   && new_code == NE
6312 		   && is_int_mode (mode, &int_mode)
6313 		   && op1 == const0_rtx
6314 		   && int_mode == GET_MODE (op0)
6315 		   && (num_sign_bit_copies (op0, int_mode)
6316 		       == GET_MODE_PRECISION (int_mode)))
6317 	    {
6318 	      op0 = expand_compound_operation (op0);
6319 	      return simplify_gen_unary (NEG, int_mode,
6320 					 gen_lowpart (int_mode, op0),
6321 					 int_mode);
6322 	    }
6323 
6324 	  else if (STORE_FLAG_VALUE == 1
6325 		   && new_code == EQ
6326 		   && is_int_mode (mode, &int_mode)
6327 		   && op1 == const0_rtx
6328 		   && int_mode == GET_MODE (op0)
6329 		   && nonzero_bits (op0, int_mode) == 1)
6330 	    {
6331 	      op0 = expand_compound_operation (op0);
6332 	      return simplify_gen_binary (XOR, int_mode,
6333 					  gen_lowpart (int_mode, op0),
6334 					  const1_rtx);
6335 	    }
6336 
6337 	  else if (STORE_FLAG_VALUE == 1
6338 		   && new_code == EQ
6339 		   && is_int_mode (mode, &int_mode)
6340 		   && op1 == const0_rtx
6341 		   && int_mode == GET_MODE (op0)
6342 		   && (num_sign_bit_copies (op0, int_mode)
6343 		       == GET_MODE_PRECISION (int_mode)))
6344 	    {
6345 	      op0 = expand_compound_operation (op0);
6346 	      return plus_constant (int_mode, gen_lowpart (int_mode, op0), 1);
6347 	    }
6348 
6349 	  /* If STORE_FLAG_VALUE is -1, we have cases similar to
6350 	     those above.  */
6351 	  if (in_cond)
6352 	    ;
6353 
6354 	  else if (STORE_FLAG_VALUE == -1
6355 		   && new_code == NE
6356 		   && is_int_mode (mode, &int_mode)
6357 		   && op1 == const0_rtx
6358 		   && int_mode == GET_MODE (op0)
6359 		   && (num_sign_bit_copies (op0, int_mode)
6360 		       == GET_MODE_PRECISION (int_mode)))
6361 	    return gen_lowpart (int_mode, expand_compound_operation (op0));
6362 
6363 	  else if (STORE_FLAG_VALUE == -1
6364 		   && new_code == NE
6365 		   && is_int_mode (mode, &int_mode)
6366 		   && op1 == const0_rtx
6367 		   && int_mode == GET_MODE (op0)
6368 		   && nonzero_bits (op0, int_mode) == 1)
6369 	    {
6370 	      op0 = expand_compound_operation (op0);
6371 	      return simplify_gen_unary (NEG, int_mode,
6372 					 gen_lowpart (int_mode, op0),
6373 					 int_mode);
6374 	    }
6375 
6376 	  else if (STORE_FLAG_VALUE == -1
6377 		   && new_code == EQ
6378 		   && is_int_mode (mode, &int_mode)
6379 		   && op1 == const0_rtx
6380 		   && int_mode == GET_MODE (op0)
6381 		   && (num_sign_bit_copies (op0, int_mode)
6382 		       == GET_MODE_PRECISION (int_mode)))
6383 	    {
6384 	      op0 = expand_compound_operation (op0);
6385 	      return simplify_gen_unary (NOT, int_mode,
6386 					 gen_lowpart (int_mode, op0),
6387 					 int_mode);
6388 	    }
6389 
6390 	  /* If X is 0/1, (eq X 0) is X-1.  */
6391 	  else if (STORE_FLAG_VALUE == -1
6392 		   && new_code == EQ
6393 		   && is_int_mode (mode, &int_mode)
6394 		   && op1 == const0_rtx
6395 		   && int_mode == GET_MODE (op0)
6396 		   && nonzero_bits (op0, int_mode) == 1)
6397 	    {
6398 	      op0 = expand_compound_operation (op0);
6399 	      return plus_constant (int_mode, gen_lowpart (int_mode, op0), -1);
6400 	    }
6401 
6402 	  /* If STORE_FLAG_VALUE says to just test the sign bit and X has just
6403 	     one bit that might be nonzero, we can convert (ne x 0) to
6404 	     (ashift x c) where C puts the bit in the sign bit.  Remove any
6405 	     AND with STORE_FLAG_VALUE when we are done, since we are only
6406 	     going to test the sign bit.  */
6407 	  if (new_code == NE
6408 	      && is_int_mode (mode, &int_mode)
6409 	      && HWI_COMPUTABLE_MODE_P (int_mode)
6410 	      && val_signbit_p (int_mode, STORE_FLAG_VALUE)
6411 	      && op1 == const0_rtx
6412 	      && int_mode == GET_MODE (op0)
6413 	      && (i = exact_log2 (nonzero_bits (op0, int_mode))) >= 0)
6414 	    {
6415 	      x = simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6416 					expand_compound_operation (op0),
6417 					GET_MODE_PRECISION (int_mode) - 1 - i);
6418 	      if (GET_CODE (x) == AND && XEXP (x, 1) == const_true_rtx)
6419 		return XEXP (x, 0);
6420 	      else
6421 		return x;
6422 	    }
6423 
6424 	  /* If the code changed, return a whole new comparison.
6425 	     We also need to avoid using SUBST in cases where
6426 	     simplify_comparison has widened a comparison with a CONST_INT,
6427 	     since in that case the wider CONST_INT may fail the sanity
6428 	     checks in do_SUBST.  */
6429 	  if (new_code != code
6430 	      || (CONST_INT_P (op1)
6431 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 0))
6432 		  && GET_MODE (op0) != GET_MODE (XEXP (x, 1))))
6433 	    return gen_rtx_fmt_ee (new_code, mode, op0, op1);
6434 
6435 	  /* Otherwise, keep this operation, but maybe change its operands.
6436 	     This also converts (ne (compare FOO BAR) 0) to (ne FOO BAR).  */
6437 	  SUBST (XEXP (x, 0), op0);
6438 	  SUBST (XEXP (x, 1), op1);
6439 	}
6440       break;
6441 
6442     case IF_THEN_ELSE:
6443       return simplify_if_then_else (x);
6444 
6445     case ZERO_EXTRACT:
6446     case SIGN_EXTRACT:
6447     case ZERO_EXTEND:
6448     case SIGN_EXTEND:
6449       /* If we are processing SET_DEST, we are done.  */
6450       if (in_dest)
6451 	return x;
6452 
6453       return expand_compound_operation (x);
6454 
6455     case SET:
6456       return simplify_set (x);
6457 
6458     case AND:
6459     case IOR:
6460       return simplify_logical (x);
6461 
6462     case ASHIFT:
6463     case LSHIFTRT:
6464     case ASHIFTRT:
6465     case ROTATE:
6466     case ROTATERT:
6467       /* If this is a shift by a constant amount, simplify it.  */
6468       if (CONST_INT_P (XEXP (x, 1)))
6469 	return simplify_shift_const (x, code, mode, XEXP (x, 0),
6470 				     INTVAL (XEXP (x, 1)));
6471 
6472       else if (SHIFT_COUNT_TRUNCATED && !REG_P (XEXP (x, 1)))
6473 	SUBST (XEXP (x, 1),
6474 	       force_to_mode (XEXP (x, 1), GET_MODE (XEXP (x, 1)),
6475 			      (HOST_WIDE_INT_1U
6476 			       << exact_log2 (GET_MODE_UNIT_BITSIZE
6477 					      (GET_MODE (x))))
6478 			      - 1,
6479 			      0));
6480       break;
6481 
6482     default:
6483       break;
6484     }
6485 
6486   return x;
6487 }
6488 
6489 /* Simplify X, an IF_THEN_ELSE expression.  Return the new expression.  */
6490 
6491 static rtx
simplify_if_then_else(rtx x)6492 simplify_if_then_else (rtx x)
6493 {
6494   machine_mode mode = GET_MODE (x);
6495   rtx cond = XEXP (x, 0);
6496   rtx true_rtx = XEXP (x, 1);
6497   rtx false_rtx = XEXP (x, 2);
6498   enum rtx_code true_code = GET_CODE (cond);
6499   int comparison_p = COMPARISON_P (cond);
6500   rtx temp;
6501   int i;
6502   enum rtx_code false_code;
6503   rtx reversed;
6504   scalar_int_mode int_mode, inner_mode;
6505 
6506   /* Simplify storing of the truth value.  */
6507   if (comparison_p && true_rtx == const_true_rtx && false_rtx == const0_rtx)
6508     return simplify_gen_relational (true_code, mode, VOIDmode,
6509 				    XEXP (cond, 0), XEXP (cond, 1));
6510 
6511   /* Also when the truth value has to be reversed.  */
6512   if (comparison_p
6513       && true_rtx == const0_rtx && false_rtx == const_true_rtx
6514       && (reversed = reversed_comparison (cond, mode)))
6515     return reversed;
6516 
6517   /* Sometimes we can simplify the arm of an IF_THEN_ELSE if a register used
6518      in it is being compared against certain values.  Get the true and false
6519      comparisons and see if that says anything about the value of each arm.  */
6520 
6521   if (comparison_p
6522       && ((false_code = reversed_comparison_code (cond, NULL))
6523 	  != UNKNOWN)
6524       && REG_P (XEXP (cond, 0)))
6525     {
6526       HOST_WIDE_INT nzb;
6527       rtx from = XEXP (cond, 0);
6528       rtx true_val = XEXP (cond, 1);
6529       rtx false_val = true_val;
6530       int swapped = 0;
6531 
6532       /* If FALSE_CODE is EQ, swap the codes and arms.  */
6533 
6534       if (false_code == EQ)
6535 	{
6536 	  swapped = 1, true_code = EQ, false_code = NE;
6537 	  std::swap (true_rtx, false_rtx);
6538 	}
6539 
6540       scalar_int_mode from_mode;
6541       if (is_a <scalar_int_mode> (GET_MODE (from), &from_mode))
6542 	{
6543 	  /* If we are comparing against zero and the expression being
6544 	     tested has only a single bit that might be nonzero, that is
6545 	     its value when it is not equal to zero.  Similarly if it is
6546 	     known to be -1 or 0.  */
6547 	  if (true_code == EQ
6548 	      && true_val == const0_rtx
6549 	      && pow2p_hwi (nzb = nonzero_bits (from, from_mode)))
6550 	    {
6551 	      false_code = EQ;
6552 	      false_val = gen_int_mode (nzb, from_mode);
6553 	    }
6554 	  else if (true_code == EQ
6555 		   && true_val == const0_rtx
6556 		   && (num_sign_bit_copies (from, from_mode)
6557 		       == GET_MODE_PRECISION (from_mode)))
6558 	    {
6559 	      false_code = EQ;
6560 	      false_val = constm1_rtx;
6561 	    }
6562 	}
6563 
6564       /* Now simplify an arm if we know the value of the register in the
6565 	 branch and it is used in the arm.  Be careful due to the potential
6566 	 of locally-shared RTL.  */
6567 
6568       if (reg_mentioned_p (from, true_rtx))
6569 	true_rtx = subst (known_cond (copy_rtx (true_rtx), true_code,
6570 				      from, true_val),
6571 			  pc_rtx, pc_rtx, 0, 0, 0);
6572       if (reg_mentioned_p (from, false_rtx))
6573 	false_rtx = subst (known_cond (copy_rtx (false_rtx), false_code,
6574 				       from, false_val),
6575 			   pc_rtx, pc_rtx, 0, 0, 0);
6576 
6577       SUBST (XEXP (x, 1), swapped ? false_rtx : true_rtx);
6578       SUBST (XEXP (x, 2), swapped ? true_rtx : false_rtx);
6579 
6580       true_rtx = XEXP (x, 1);
6581       false_rtx = XEXP (x, 2);
6582       true_code = GET_CODE (cond);
6583     }
6584 
6585   /* If we have (if_then_else FOO (pc) (label_ref BAR)) and FOO can be
6586      reversed, do so to avoid needing two sets of patterns for
6587      subtract-and-branch insns.  Similarly if we have a constant in the true
6588      arm, the false arm is the same as the first operand of the comparison, or
6589      the false arm is more complicated than the true arm.  */
6590 
6591   if (comparison_p
6592       && reversed_comparison_code (cond, NULL) != UNKNOWN
6593       && (true_rtx == pc_rtx
6594 	  || (CONSTANT_P (true_rtx)
6595 	      && !CONST_INT_P (false_rtx) && false_rtx != pc_rtx)
6596 	  || true_rtx == const0_rtx
6597 	  || (OBJECT_P (true_rtx) && !OBJECT_P (false_rtx))
6598 	  || (GET_CODE (true_rtx) == SUBREG && OBJECT_P (SUBREG_REG (true_rtx))
6599 	      && !OBJECT_P (false_rtx))
6600 	  || reg_mentioned_p (true_rtx, false_rtx)
6601 	  || rtx_equal_p (false_rtx, XEXP (cond, 0))))
6602     {
6603       true_code = reversed_comparison_code (cond, NULL);
6604       SUBST (XEXP (x, 0), reversed_comparison (cond, GET_MODE (cond)));
6605       SUBST (XEXP (x, 1), false_rtx);
6606       SUBST (XEXP (x, 2), true_rtx);
6607 
6608       std::swap (true_rtx, false_rtx);
6609       cond = XEXP (x, 0);
6610 
6611       /* It is possible that the conditional has been simplified out.  */
6612       true_code = GET_CODE (cond);
6613       comparison_p = COMPARISON_P (cond);
6614     }
6615 
6616   /* If the two arms are identical, we don't need the comparison.  */
6617 
6618   if (rtx_equal_p (true_rtx, false_rtx) && ! side_effects_p (cond))
6619     return true_rtx;
6620 
6621   /* Convert a == b ? b : a to "a".  */
6622   if (true_code == EQ && ! side_effects_p (cond)
6623       && !HONOR_NANS (mode)
6624       && rtx_equal_p (XEXP (cond, 0), false_rtx)
6625       && rtx_equal_p (XEXP (cond, 1), true_rtx))
6626     return false_rtx;
6627   else if (true_code == NE && ! side_effects_p (cond)
6628 	   && !HONOR_NANS (mode)
6629 	   && rtx_equal_p (XEXP (cond, 0), true_rtx)
6630 	   && rtx_equal_p (XEXP (cond, 1), false_rtx))
6631     return true_rtx;
6632 
6633   /* Look for cases where we have (abs x) or (neg (abs X)).  */
6634 
6635   if (GET_MODE_CLASS (mode) == MODE_INT
6636       && comparison_p
6637       && XEXP (cond, 1) == const0_rtx
6638       && GET_CODE (false_rtx) == NEG
6639       && rtx_equal_p (true_rtx, XEXP (false_rtx, 0))
6640       && rtx_equal_p (true_rtx, XEXP (cond, 0))
6641       && ! side_effects_p (true_rtx))
6642     switch (true_code)
6643       {
6644       case GT:
6645       case GE:
6646 	return simplify_gen_unary (ABS, mode, true_rtx, mode);
6647       case LT:
6648       case LE:
6649 	return
6650 	  simplify_gen_unary (NEG, mode,
6651 			      simplify_gen_unary (ABS, mode, true_rtx, mode),
6652 			      mode);
6653       default:
6654 	break;
6655       }
6656 
6657   /* Look for MIN or MAX.  */
6658 
6659   if ((! FLOAT_MODE_P (mode) || flag_unsafe_math_optimizations)
6660       && comparison_p
6661       && rtx_equal_p (XEXP (cond, 0), true_rtx)
6662       && rtx_equal_p (XEXP (cond, 1), false_rtx)
6663       && ! side_effects_p (cond))
6664     switch (true_code)
6665       {
6666       case GE:
6667       case GT:
6668 	return simplify_gen_binary (SMAX, mode, true_rtx, false_rtx);
6669       case LE:
6670       case LT:
6671 	return simplify_gen_binary (SMIN, mode, true_rtx, false_rtx);
6672       case GEU:
6673       case GTU:
6674 	return simplify_gen_binary (UMAX, mode, true_rtx, false_rtx);
6675       case LEU:
6676       case LTU:
6677 	return simplify_gen_binary (UMIN, mode, true_rtx, false_rtx);
6678       default:
6679 	break;
6680       }
6681 
6682   /* If we have (if_then_else COND (OP Z C1) Z) and OP is an identity when its
6683      second operand is zero, this can be done as (OP Z (mult COND C2)) where
6684      C2 = C1 * STORE_FLAG_VALUE. Similarly if OP has an outer ZERO_EXTEND or
6685      SIGN_EXTEND as long as Z is already extended (so we don't destroy it).
6686      We can do this kind of thing in some cases when STORE_FLAG_VALUE is
6687      neither 1 or -1, but it isn't worth checking for.  */
6688 
6689   if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6690       && comparison_p
6691       && is_int_mode (mode, &int_mode)
6692       && ! side_effects_p (x))
6693     {
6694       rtx t = make_compound_operation (true_rtx, SET);
6695       rtx f = make_compound_operation (false_rtx, SET);
6696       rtx cond_op0 = XEXP (cond, 0);
6697       rtx cond_op1 = XEXP (cond, 1);
6698       enum rtx_code op = UNKNOWN, extend_op = UNKNOWN;
6699       scalar_int_mode m = int_mode;
6700       rtx z = 0, c1 = NULL_RTX;
6701 
6702       if ((GET_CODE (t) == PLUS || GET_CODE (t) == MINUS
6703 	   || GET_CODE (t) == IOR || GET_CODE (t) == XOR
6704 	   || GET_CODE (t) == ASHIFT
6705 	   || GET_CODE (t) == LSHIFTRT || GET_CODE (t) == ASHIFTRT)
6706 	  && rtx_equal_p (XEXP (t, 0), f))
6707 	c1 = XEXP (t, 1), op = GET_CODE (t), z = f;
6708 
6709       /* If an identity-zero op is commutative, check whether there
6710 	 would be a match if we swapped the operands.  */
6711       else if ((GET_CODE (t) == PLUS || GET_CODE (t) == IOR
6712 		|| GET_CODE (t) == XOR)
6713 	       && rtx_equal_p (XEXP (t, 1), f))
6714 	c1 = XEXP (t, 0), op = GET_CODE (t), z = f;
6715       else if (GET_CODE (t) == SIGN_EXTEND
6716 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6717 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6718 		   || GET_CODE (XEXP (t, 0)) == MINUS
6719 		   || GET_CODE (XEXP (t, 0)) == IOR
6720 		   || GET_CODE (XEXP (t, 0)) == XOR
6721 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6722 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6723 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6724 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6725 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6726 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6727 	       && (num_sign_bit_copies (f, GET_MODE (f))
6728 		   > (unsigned int)
6729 		     (GET_MODE_PRECISION (int_mode)
6730 		      - GET_MODE_PRECISION (inner_mode))))
6731 	{
6732 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6733 	  extend_op = SIGN_EXTEND;
6734 	  m = inner_mode;
6735 	}
6736       else if (GET_CODE (t) == SIGN_EXTEND
6737 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6738 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6739 		   || GET_CODE (XEXP (t, 0)) == IOR
6740 		   || GET_CODE (XEXP (t, 0)) == XOR)
6741 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6742 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6743 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6744 	       && (num_sign_bit_copies (f, GET_MODE (f))
6745 		   > (unsigned int)
6746 		     (GET_MODE_PRECISION (int_mode)
6747 		      - GET_MODE_PRECISION (inner_mode))))
6748 	{
6749 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6750 	  extend_op = SIGN_EXTEND;
6751 	  m = inner_mode;
6752 	}
6753       else if (GET_CODE (t) == ZERO_EXTEND
6754 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6755 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6756 		   || GET_CODE (XEXP (t, 0)) == MINUS
6757 		   || GET_CODE (XEXP (t, 0)) == IOR
6758 		   || GET_CODE (XEXP (t, 0)) == XOR
6759 		   || GET_CODE (XEXP (t, 0)) == ASHIFT
6760 		   || GET_CODE (XEXP (t, 0)) == LSHIFTRT
6761 		   || GET_CODE (XEXP (t, 0)) == ASHIFTRT)
6762 	       && GET_CODE (XEXP (XEXP (t, 0), 0)) == SUBREG
6763 	       && HWI_COMPUTABLE_MODE_P (int_mode)
6764 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 0))
6765 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 0)), f)
6766 	       && ((nonzero_bits (f, GET_MODE (f))
6767 		    & ~GET_MODE_MASK (inner_mode))
6768 		   == 0))
6769 	{
6770 	  c1 = XEXP (XEXP (t, 0), 1); z = f; op = GET_CODE (XEXP (t, 0));
6771 	  extend_op = ZERO_EXTEND;
6772 	  m = inner_mode;
6773 	}
6774       else if (GET_CODE (t) == ZERO_EXTEND
6775 	       && is_a <scalar_int_mode> (GET_MODE (XEXP (t, 0)), &inner_mode)
6776 	       && (GET_CODE (XEXP (t, 0)) == PLUS
6777 		   || GET_CODE (XEXP (t, 0)) == IOR
6778 		   || GET_CODE (XEXP (t, 0)) == XOR)
6779 	       && GET_CODE (XEXP (XEXP (t, 0), 1)) == SUBREG
6780 	       && HWI_COMPUTABLE_MODE_P (int_mode)
6781 	       && subreg_lowpart_p (XEXP (XEXP (t, 0), 1))
6782 	       && rtx_equal_p (SUBREG_REG (XEXP (XEXP (t, 0), 1)), f)
6783 	       && ((nonzero_bits (f, GET_MODE (f))
6784 		    & ~GET_MODE_MASK (inner_mode))
6785 		   == 0))
6786 	{
6787 	  c1 = XEXP (XEXP (t, 0), 0); z = f; op = GET_CODE (XEXP (t, 0));
6788 	  extend_op = ZERO_EXTEND;
6789 	  m = inner_mode;
6790 	}
6791 
6792       if (z)
6793 	{
6794 	  machine_mode cm = m;
6795 	  if ((op == ASHIFT || op == LSHIFTRT || op == ASHIFTRT)
6796 	      && GET_MODE (c1) != VOIDmode)
6797 	    cm = GET_MODE (c1);
6798 	  temp = subst (simplify_gen_relational (true_code, cm, VOIDmode,
6799 						 cond_op0, cond_op1),
6800 			pc_rtx, pc_rtx, 0, 0, 0);
6801 	  temp = simplify_gen_binary (MULT, cm, temp,
6802 				      simplify_gen_binary (MULT, cm, c1,
6803 							   const_true_rtx));
6804 	  temp = subst (temp, pc_rtx, pc_rtx, 0, 0, 0);
6805 	  temp = simplify_gen_binary (op, m, gen_lowpart (m, z), temp);
6806 
6807 	  if (extend_op != UNKNOWN)
6808 	    temp = simplify_gen_unary (extend_op, int_mode, temp, m);
6809 
6810 	  return temp;
6811 	}
6812     }
6813 
6814   /* If we have (if_then_else (ne A 0) C1 0) and either A is known to be 0 or
6815      1 and C1 is a single bit or A is known to be 0 or -1 and C1 is the
6816      negation of a single bit, we can convert this operation to a shift.  We
6817      can actually do this more generally, but it doesn't seem worth it.  */
6818 
6819   if (true_code == NE
6820       && is_a <scalar_int_mode> (mode, &int_mode)
6821       && XEXP (cond, 1) == const0_rtx
6822       && false_rtx == const0_rtx
6823       && CONST_INT_P (true_rtx)
6824       && ((nonzero_bits (XEXP (cond, 0), int_mode) == 1
6825 	   && (i = exact_log2 (UINTVAL (true_rtx))) >= 0)
6826 	  || ((num_sign_bit_copies (XEXP (cond, 0), int_mode)
6827 	       == GET_MODE_PRECISION (int_mode))
6828 	      && (i = exact_log2 (-UINTVAL (true_rtx))) >= 0)))
6829     return
6830       simplify_shift_const (NULL_RTX, ASHIFT, int_mode,
6831 			    gen_lowpart (int_mode, XEXP (cond, 0)), i);
6832 
6833   /* (IF_THEN_ELSE (NE A 0) C1 0) is A or a zero-extend of A if the only
6834      non-zero bit in A is C1.  */
6835   if (true_code == NE && XEXP (cond, 1) == const0_rtx
6836       && false_rtx == const0_rtx && CONST_INT_P (true_rtx)
6837       && is_a <scalar_int_mode> (mode, &int_mode)
6838       && is_a <scalar_int_mode> (GET_MODE (XEXP (cond, 0)), &inner_mode)
6839       && (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))
6840 	  == nonzero_bits (XEXP (cond, 0), inner_mode)
6841       && (i = exact_log2 (UINTVAL (true_rtx) & GET_MODE_MASK (int_mode))) >= 0)
6842     {
6843       rtx val = XEXP (cond, 0);
6844       if (inner_mode == int_mode)
6845         return val;
6846       else if (GET_MODE_PRECISION (inner_mode) < GET_MODE_PRECISION (int_mode))
6847         return simplify_gen_unary (ZERO_EXTEND, int_mode, val, inner_mode);
6848     }
6849 
6850   return x;
6851 }
6852 
6853 /* Simplify X, a SET expression.  Return the new expression.  */
6854 
6855 static rtx
simplify_set(rtx x)6856 simplify_set (rtx x)
6857 {
6858   rtx src = SET_SRC (x);
6859   rtx dest = SET_DEST (x);
6860   machine_mode mode
6861     = GET_MODE (src) != VOIDmode ? GET_MODE (src) : GET_MODE (dest);
6862   rtx_insn *other_insn;
6863   rtx *cc_use;
6864   scalar_int_mode int_mode;
6865 
6866   /* (set (pc) (return)) gets written as (return).  */
6867   if (GET_CODE (dest) == PC && ANY_RETURN_P (src))
6868     return src;
6869 
6870   /* Now that we know for sure which bits of SRC we are using, see if we can
6871      simplify the expression for the object knowing that we only need the
6872      low-order bits.  */
6873 
6874   if (GET_MODE_CLASS (mode) == MODE_INT && HWI_COMPUTABLE_MODE_P (mode))
6875     {
6876       src = force_to_mode (src, mode, HOST_WIDE_INT_M1U, 0);
6877       SUBST (SET_SRC (x), src);
6878     }
6879 
6880   /* If we are setting CC0 or if the source is a COMPARE, look for the use of
6881      the comparison result and try to simplify it unless we already have used
6882      undobuf.other_insn.  */
6883   if ((GET_MODE_CLASS (mode) == MODE_CC
6884        || GET_CODE (src) == COMPARE
6885        || CC0_P (dest))
6886       && (cc_use = find_single_use (dest, subst_insn, &other_insn)) != 0
6887       && (undobuf.other_insn == 0 || other_insn == undobuf.other_insn)
6888       && COMPARISON_P (*cc_use)
6889       && rtx_equal_p (XEXP (*cc_use, 0), dest))
6890     {
6891       enum rtx_code old_code = GET_CODE (*cc_use);
6892       enum rtx_code new_code;
6893       rtx op0, op1, tmp;
6894       int other_changed = 0;
6895       rtx inner_compare = NULL_RTX;
6896       machine_mode compare_mode = GET_MODE (dest);
6897 
6898       if (GET_CODE (src) == COMPARE)
6899 	{
6900 	  op0 = XEXP (src, 0), op1 = XEXP (src, 1);
6901 	  if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6902 	    {
6903 	      inner_compare = op0;
6904 	      op0 = XEXP (inner_compare, 0), op1 = XEXP (inner_compare, 1);
6905 	    }
6906 	}
6907       else
6908 	op0 = src, op1 = CONST0_RTX (GET_MODE (src));
6909 
6910       tmp = simplify_relational_operation (old_code, compare_mode, VOIDmode,
6911 					   op0, op1);
6912       if (!tmp)
6913 	new_code = old_code;
6914       else if (!CONSTANT_P (tmp))
6915 	{
6916 	  new_code = GET_CODE (tmp);
6917 	  op0 = XEXP (tmp, 0);
6918 	  op1 = XEXP (tmp, 1);
6919 	}
6920       else
6921 	{
6922 	  rtx pat = PATTERN (other_insn);
6923 	  undobuf.other_insn = other_insn;
6924 	  SUBST (*cc_use, tmp);
6925 
6926 	  /* Attempt to simplify CC user.  */
6927 	  if (GET_CODE (pat) == SET)
6928 	    {
6929 	      rtx new_rtx = simplify_rtx (SET_SRC (pat));
6930 	      if (new_rtx != NULL_RTX)
6931 		SUBST (SET_SRC (pat), new_rtx);
6932 	    }
6933 
6934 	  /* Convert X into a no-op move.  */
6935 	  SUBST (SET_DEST (x), pc_rtx);
6936 	  SUBST (SET_SRC (x), pc_rtx);
6937 	  return x;
6938 	}
6939 
6940       /* Simplify our comparison, if possible.  */
6941       new_code = simplify_comparison (new_code, &op0, &op1);
6942 
6943 #ifdef SELECT_CC_MODE
6944       /* If this machine has CC modes other than CCmode, check to see if we
6945 	 need to use a different CC mode here.  */
6946       if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6947 	compare_mode = GET_MODE (op0);
6948       else if (inner_compare
6949 	       && GET_MODE_CLASS (GET_MODE (inner_compare)) == MODE_CC
6950 	       && new_code == old_code
6951 	       && op0 == XEXP (inner_compare, 0)
6952 	       && op1 == XEXP (inner_compare, 1))
6953 	compare_mode = GET_MODE (inner_compare);
6954       else
6955 	compare_mode = SELECT_CC_MODE (new_code, op0, op1);
6956 
6957       /* If the mode changed, we have to change SET_DEST, the mode in the
6958 	 compare, and the mode in the place SET_DEST is used.  If SET_DEST is
6959 	 a hard register, just build new versions with the proper mode.  If it
6960 	 is a pseudo, we lose unless it is only time we set the pseudo, in
6961 	 which case we can safely change its mode.  */
6962       if (!HAVE_cc0 && compare_mode != GET_MODE (dest))
6963 	{
6964 	  if (can_change_dest_mode (dest, 0, compare_mode))
6965 	    {
6966 	      unsigned int regno = REGNO (dest);
6967 	      rtx new_dest;
6968 
6969 	      if (regno < FIRST_PSEUDO_REGISTER)
6970 		new_dest = gen_rtx_REG (compare_mode, regno);
6971 	      else
6972 		{
6973 		  SUBST_MODE (regno_reg_rtx[regno], compare_mode);
6974 		  new_dest = regno_reg_rtx[regno];
6975 		}
6976 
6977 	      SUBST (SET_DEST (x), new_dest);
6978 	      SUBST (XEXP (*cc_use, 0), new_dest);
6979 	      other_changed = 1;
6980 
6981 	      dest = new_dest;
6982 	    }
6983 	}
6984 #endif  /* SELECT_CC_MODE */
6985 
6986       /* If the code changed, we have to build a new comparison in
6987 	 undobuf.other_insn.  */
6988       if (new_code != old_code)
6989 	{
6990 	  int other_changed_previously = other_changed;
6991 	  unsigned HOST_WIDE_INT mask;
6992 	  rtx old_cc_use = *cc_use;
6993 
6994 	  SUBST (*cc_use, gen_rtx_fmt_ee (new_code, GET_MODE (*cc_use),
6995 					  dest, const0_rtx));
6996 	  other_changed = 1;
6997 
6998 	  /* If the only change we made was to change an EQ into an NE or
6999 	     vice versa, OP0 has only one bit that might be nonzero, and OP1
7000 	     is zero, check if changing the user of the condition code will
7001 	     produce a valid insn.  If it won't, we can keep the original code
7002 	     in that insn by surrounding our operation with an XOR.  */
7003 
7004 	  if (((old_code == NE && new_code == EQ)
7005 	       || (old_code == EQ && new_code == NE))
7006 	      && ! other_changed_previously && op1 == const0_rtx
7007 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
7008 	      && pow2p_hwi (mask = nonzero_bits (op0, GET_MODE (op0))))
7009 	    {
7010 	      rtx pat = PATTERN (other_insn), note = 0;
7011 
7012 	      if ((recog_for_combine (&pat, other_insn, &note) < 0
7013 		   && ! check_asm_operands (pat)))
7014 		{
7015 		  *cc_use = old_cc_use;
7016 		  other_changed = 0;
7017 
7018 		  op0 = simplify_gen_binary (XOR, GET_MODE (op0), op0,
7019 					     gen_int_mode (mask,
7020 							   GET_MODE (op0)));
7021 		}
7022 	    }
7023 	}
7024 
7025       if (other_changed)
7026 	undobuf.other_insn = other_insn;
7027 
7028       /* Don't generate a compare of a CC with 0, just use that CC.  */
7029       if (GET_MODE (op0) == compare_mode && op1 == const0_rtx)
7030 	{
7031 	  SUBST (SET_SRC (x), op0);
7032 	  src = SET_SRC (x);
7033 	}
7034       /* Otherwise, if we didn't previously have the same COMPARE we
7035 	 want, create it from scratch.  */
7036       else if (GET_CODE (src) != COMPARE || GET_MODE (src) != compare_mode
7037 	       || XEXP (src, 0) != op0 || XEXP (src, 1) != op1)
7038 	{
7039 	  SUBST (SET_SRC (x), gen_rtx_COMPARE (compare_mode, op0, op1));
7040 	  src = SET_SRC (x);
7041 	}
7042     }
7043   else
7044     {
7045       /* Get SET_SRC in a form where we have placed back any
7046 	 compound expressions.  Then do the checks below.  */
7047       src = make_compound_operation (src, SET);
7048       SUBST (SET_SRC (x), src);
7049     }
7050 
7051   /* If we have (set x (subreg:m1 (op:m2 ...) 0)) with OP being some operation,
7052      and X being a REG or (subreg (reg)), we may be able to convert this to
7053      (set (subreg:m2 x) (op)).
7054 
7055      We can always do this if M1 is narrower than M2 because that means that
7056      we only care about the low bits of the result.
7057 
7058      However, on machines without WORD_REGISTER_OPERATIONS defined, we cannot
7059      perform a narrower operation than requested since the high-order bits will
7060      be undefined.  On machine where it is defined, this transformation is safe
7061      as long as M1 and M2 have the same number of words.  */
7062 
7063   if (GET_CODE (src) == SUBREG && subreg_lowpart_p (src)
7064       && !OBJECT_P (SUBREG_REG (src))
7065       && (known_equal_after_align_up
7066 	  (GET_MODE_SIZE (GET_MODE (src)),
7067 	   GET_MODE_SIZE (GET_MODE (SUBREG_REG (src))),
7068 	   UNITS_PER_WORD))
7069       && (WORD_REGISTER_OPERATIONS || !paradoxical_subreg_p (src))
7070       && ! (REG_P (dest) && REGNO (dest) < FIRST_PSEUDO_REGISTER
7071 	    && !REG_CAN_CHANGE_MODE_P (REGNO (dest),
7072 				       GET_MODE (SUBREG_REG (src)),
7073 				       GET_MODE (src)))
7074       && (REG_P (dest)
7075 	  || (GET_CODE (dest) == SUBREG
7076 	      && REG_P (SUBREG_REG (dest)))))
7077     {
7078       SUBST (SET_DEST (x),
7079 	     gen_lowpart (GET_MODE (SUBREG_REG (src)),
7080 				      dest));
7081       SUBST (SET_SRC (x), SUBREG_REG (src));
7082 
7083       src = SET_SRC (x), dest = SET_DEST (x);
7084     }
7085 
7086   /* If we have (set (cc0) (subreg ...)), we try to remove the subreg
7087      in SRC.  */
7088   if (dest == cc0_rtx
7089       && partial_subreg_p (src)
7090       && subreg_lowpart_p (src))
7091     {
7092       rtx inner = SUBREG_REG (src);
7093       machine_mode inner_mode = GET_MODE (inner);
7094 
7095       /* Here we make sure that we don't have a sign bit on.  */
7096       if (val_signbit_known_clear_p (GET_MODE (src),
7097 				     nonzero_bits (inner, inner_mode)))
7098 	{
7099 	  SUBST (SET_SRC (x), inner);
7100 	  src = SET_SRC (x);
7101 	}
7102     }
7103 
7104   /* If we have (set FOO (subreg:M (mem:N BAR) 0)) with M wider than N, this
7105      would require a paradoxical subreg.  Replace the subreg with a
7106      zero_extend to avoid the reload that would otherwise be required.
7107      Don't do this unless we have a scalar integer mode, otherwise the
7108      transformation is incorrect.  */
7109 
7110   enum rtx_code extend_op;
7111   if (paradoxical_subreg_p (src)
7112       && MEM_P (SUBREG_REG (src))
7113       && SCALAR_INT_MODE_P (GET_MODE (src))
7114       && (extend_op = load_extend_op (GET_MODE (SUBREG_REG (src)))) != UNKNOWN)
7115     {
7116       SUBST (SET_SRC (x),
7117 	     gen_rtx_fmt_e (extend_op, GET_MODE (src), SUBREG_REG (src)));
7118 
7119       src = SET_SRC (x);
7120     }
7121 
7122   /* If we don't have a conditional move, SET_SRC is an IF_THEN_ELSE, and we
7123      are comparing an item known to be 0 or -1 against 0, use a logical
7124      operation instead. Check for one of the arms being an IOR of the other
7125      arm with some value.  We compute three terms to be IOR'ed together.  In
7126      practice, at most two will be nonzero.  Then we do the IOR's.  */
7127 
7128   if (GET_CODE (dest) != PC
7129       && GET_CODE (src) == IF_THEN_ELSE
7130       && is_int_mode (GET_MODE (src), &int_mode)
7131       && (GET_CODE (XEXP (src, 0)) == EQ || GET_CODE (XEXP (src, 0)) == NE)
7132       && XEXP (XEXP (src, 0), 1) == const0_rtx
7133       && int_mode == GET_MODE (XEXP (XEXP (src, 0), 0))
7134       && (!HAVE_conditional_move
7135 	  || ! can_conditionally_move_p (int_mode))
7136       && (num_sign_bit_copies (XEXP (XEXP (src, 0), 0), int_mode)
7137 	  == GET_MODE_PRECISION (int_mode))
7138       && ! side_effects_p (src))
7139     {
7140       rtx true_rtx = (GET_CODE (XEXP (src, 0)) == NE
7141 		      ? XEXP (src, 1) : XEXP (src, 2));
7142       rtx false_rtx = (GET_CODE (XEXP (src, 0)) == NE
7143 		   ? XEXP (src, 2) : XEXP (src, 1));
7144       rtx term1 = const0_rtx, term2, term3;
7145 
7146       if (GET_CODE (true_rtx) == IOR
7147 	  && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
7148 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 1), false_rtx = const0_rtx;
7149       else if (GET_CODE (true_rtx) == IOR
7150 	       && rtx_equal_p (XEXP (true_rtx, 1), false_rtx))
7151 	term1 = false_rtx, true_rtx = XEXP (true_rtx, 0), false_rtx = const0_rtx;
7152       else if (GET_CODE (false_rtx) == IOR
7153 	       && rtx_equal_p (XEXP (false_rtx, 0), true_rtx))
7154 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 1), true_rtx = const0_rtx;
7155       else if (GET_CODE (false_rtx) == IOR
7156 	       && rtx_equal_p (XEXP (false_rtx, 1), true_rtx))
7157 	term1 = true_rtx, false_rtx = XEXP (false_rtx, 0), true_rtx = const0_rtx;
7158 
7159       term2 = simplify_gen_binary (AND, int_mode,
7160 				   XEXP (XEXP (src, 0), 0), true_rtx);
7161       term3 = simplify_gen_binary (AND, int_mode,
7162 				   simplify_gen_unary (NOT, int_mode,
7163 						       XEXP (XEXP (src, 0), 0),
7164 						       int_mode),
7165 				   false_rtx);
7166 
7167       SUBST (SET_SRC (x),
7168 	     simplify_gen_binary (IOR, int_mode,
7169 				  simplify_gen_binary (IOR, int_mode,
7170 						       term1, term2),
7171 				  term3));
7172 
7173       src = SET_SRC (x);
7174     }
7175 
7176   /* If either SRC or DEST is a CLOBBER of (const_int 0), make this
7177      whole thing fail.  */
7178   if (GET_CODE (src) == CLOBBER && XEXP (src, 0) == const0_rtx)
7179     return src;
7180   else if (GET_CODE (dest) == CLOBBER && XEXP (dest, 0) == const0_rtx)
7181     return dest;
7182   else
7183     /* Convert this into a field assignment operation, if possible.  */
7184     return make_field_assignment (x);
7185 }
7186 
7187 /* Simplify, X, and AND, IOR, or XOR operation, and return the simplified
7188    result.  */
7189 
7190 static rtx
simplify_logical(rtx x)7191 simplify_logical (rtx x)
7192 {
7193   rtx op0 = XEXP (x, 0);
7194   rtx op1 = XEXP (x, 1);
7195   scalar_int_mode mode;
7196 
7197   switch (GET_CODE (x))
7198     {
7199     case AND:
7200       /* We can call simplify_and_const_int only if we don't lose
7201 	 any (sign) bits when converting INTVAL (op1) to
7202 	 "unsigned HOST_WIDE_INT".  */
7203       if (is_a <scalar_int_mode> (GET_MODE (x), &mode)
7204 	  && CONST_INT_P (op1)
7205 	  && (HWI_COMPUTABLE_MODE_P (mode)
7206 	      || INTVAL (op1) > 0))
7207 	{
7208 	  x = simplify_and_const_int (x, mode, op0, INTVAL (op1));
7209 	  if (GET_CODE (x) != AND)
7210 	    return x;
7211 
7212 	  op0 = XEXP (x, 0);
7213 	  op1 = XEXP (x, 1);
7214 	}
7215 
7216       /* If we have any of (and (ior A B) C) or (and (xor A B) C),
7217 	 apply the distributive law and then the inverse distributive
7218 	 law to see if things simplify.  */
7219       if (GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
7220 	{
7221 	  rtx result = distribute_and_simplify_rtx (x, 0);
7222 	  if (result)
7223 	    return result;
7224 	}
7225       if (GET_CODE (op1) == IOR || GET_CODE (op1) == XOR)
7226 	{
7227 	  rtx result = distribute_and_simplify_rtx (x, 1);
7228 	  if (result)
7229 	    return result;
7230 	}
7231       break;
7232 
7233     case IOR:
7234       /* If we have (ior (and A B) C), apply the distributive law and then
7235 	 the inverse distributive law to see if things simplify.  */
7236 
7237       if (GET_CODE (op0) == AND)
7238 	{
7239 	  rtx result = distribute_and_simplify_rtx (x, 0);
7240 	  if (result)
7241 	    return result;
7242 	}
7243 
7244       if (GET_CODE (op1) == AND)
7245 	{
7246 	  rtx result = distribute_and_simplify_rtx (x, 1);
7247 	  if (result)
7248 	    return result;
7249 	}
7250       break;
7251 
7252     default:
7253       gcc_unreachable ();
7254     }
7255 
7256   return x;
7257 }
7258 
7259 /* We consider ZERO_EXTRACT, SIGN_EXTRACT, and SIGN_EXTEND as "compound
7260    operations" because they can be replaced with two more basic operations.
7261    ZERO_EXTEND is also considered "compound" because it can be replaced with
7262    an AND operation, which is simpler, though only one operation.
7263 
7264    The function expand_compound_operation is called with an rtx expression
7265    and will convert it to the appropriate shifts and AND operations,
7266    simplifying at each stage.
7267 
7268    The function make_compound_operation is called to convert an expression
7269    consisting of shifts and ANDs into the equivalent compound expression.
7270    It is the inverse of this function, loosely speaking.  */
7271 
7272 static rtx
expand_compound_operation(rtx x)7273 expand_compound_operation (rtx x)
7274 {
7275   unsigned HOST_WIDE_INT pos = 0, len;
7276   int unsignedp = 0;
7277   unsigned int modewidth;
7278   rtx tem;
7279   scalar_int_mode inner_mode;
7280 
7281   switch (GET_CODE (x))
7282     {
7283     case ZERO_EXTEND:
7284       unsignedp = 1;
7285       /* FALLTHRU */
7286     case SIGN_EXTEND:
7287       /* We can't necessarily use a const_int for a multiword mode;
7288 	 it depends on implicitly extending the value.
7289 	 Since we don't know the right way to extend it,
7290 	 we can't tell whether the implicit way is right.
7291 
7292 	 Even for a mode that is no wider than a const_int,
7293 	 we can't win, because we need to sign extend one of its bits through
7294 	 the rest of it, and we don't know which bit.  */
7295       if (CONST_INT_P (XEXP (x, 0)))
7296 	return x;
7297 
7298       /* Reject modes that aren't scalar integers because turning vector
7299 	 or complex modes into shifts causes problems.  */
7300       if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7301 	return x;
7302 
7303       /* Return if (subreg:MODE FROM 0) is not a safe replacement for
7304 	 (zero_extend:MODE FROM) or (sign_extend:MODE FROM).  It is for any MEM
7305 	 because (SUBREG (MEM...)) is guaranteed to cause the MEM to be
7306 	 reloaded. If not for that, MEM's would very rarely be safe.
7307 
7308 	 Reject modes bigger than a word, because we might not be able
7309 	 to reference a two-register group starting with an arbitrary register
7310 	 (and currently gen_lowpart might crash for a SUBREG).  */
7311 
7312       if (GET_MODE_SIZE (inner_mode) > UNITS_PER_WORD)
7313 	return x;
7314 
7315       len = GET_MODE_PRECISION (inner_mode);
7316       /* If the inner object has VOIDmode (the only way this can happen
7317 	 is if it is an ASM_OPERANDS), we can't do anything since we don't
7318 	 know how much masking to do.  */
7319       if (len == 0)
7320 	return x;
7321 
7322       break;
7323 
7324     case ZERO_EXTRACT:
7325       unsignedp = 1;
7326 
7327       /* fall through */
7328 
7329     case SIGN_EXTRACT:
7330       /* If the operand is a CLOBBER, just return it.  */
7331       if (GET_CODE (XEXP (x, 0)) == CLOBBER)
7332 	return XEXP (x, 0);
7333 
7334       if (!CONST_INT_P (XEXP (x, 1))
7335 	  || !CONST_INT_P (XEXP (x, 2)))
7336 	return x;
7337 
7338       /* Reject modes that aren't scalar integers because turning vector
7339 	 or complex modes into shifts causes problems.  */
7340       if (!is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode))
7341 	return x;
7342 
7343       len = INTVAL (XEXP (x, 1));
7344       pos = INTVAL (XEXP (x, 2));
7345 
7346       /* This should stay within the object being extracted, fail otherwise.  */
7347       if (len + pos > GET_MODE_PRECISION (inner_mode))
7348 	return x;
7349 
7350       if (BITS_BIG_ENDIAN)
7351 	pos = GET_MODE_PRECISION (inner_mode) - len - pos;
7352 
7353       break;
7354 
7355     default:
7356       return x;
7357     }
7358 
7359   /* We've rejected non-scalar operations by now.  */
7360   scalar_int_mode mode = as_a <scalar_int_mode> (GET_MODE (x));
7361 
7362   /* Convert sign extension to zero extension, if we know that the high
7363      bit is not set, as this is easier to optimize.  It will be converted
7364      back to cheaper alternative in make_extraction.  */
7365   if (GET_CODE (x) == SIGN_EXTEND
7366       && HWI_COMPUTABLE_MODE_P (mode)
7367       && ((nonzero_bits (XEXP (x, 0), inner_mode)
7368 	   & ~(((unsigned HOST_WIDE_INT) GET_MODE_MASK (inner_mode)) >> 1))
7369 	  == 0))
7370     {
7371       rtx temp = gen_rtx_ZERO_EXTEND (mode, XEXP (x, 0));
7372       rtx temp2 = expand_compound_operation (temp);
7373 
7374       /* Make sure this is a profitable operation.  */
7375       if (set_src_cost (x, mode, optimize_this_for_speed_p)
7376           > set_src_cost (temp2, mode, optimize_this_for_speed_p))
7377        return temp2;
7378       else if (set_src_cost (x, mode, optimize_this_for_speed_p)
7379                > set_src_cost (temp, mode, optimize_this_for_speed_p))
7380        return temp;
7381       else
7382        return x;
7383     }
7384 
7385   /* We can optimize some special cases of ZERO_EXTEND.  */
7386   if (GET_CODE (x) == ZERO_EXTEND)
7387     {
7388       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI if we
7389 	 know that the last value didn't have any inappropriate bits
7390 	 set.  */
7391       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7392 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7393 	  && HWI_COMPUTABLE_MODE_P (mode)
7394 	  && (nonzero_bits (XEXP (XEXP (x, 0), 0), mode)
7395 	      & ~GET_MODE_MASK (inner_mode)) == 0)
7396 	return XEXP (XEXP (x, 0), 0);
7397 
7398       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7399       if (GET_CODE (XEXP (x, 0)) == SUBREG
7400 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7401 	  && subreg_lowpart_p (XEXP (x, 0))
7402 	  && HWI_COMPUTABLE_MODE_P (mode)
7403 	  && (nonzero_bits (SUBREG_REG (XEXP (x, 0)), mode)
7404 	      & ~GET_MODE_MASK (inner_mode)) == 0)
7405 	return SUBREG_REG (XEXP (x, 0));
7406 
7407       /* (zero_extend:DI (truncate:SI foo:DI)) is just foo:DI when foo
7408 	 is a comparison and STORE_FLAG_VALUE permits.  This is like
7409 	 the first case, but it works even when MODE is larger
7410 	 than HOST_WIDE_INT.  */
7411       if (GET_CODE (XEXP (x, 0)) == TRUNCATE
7412 	  && GET_MODE (XEXP (XEXP (x, 0), 0)) == mode
7413 	  && COMPARISON_P (XEXP (XEXP (x, 0), 0))
7414 	  && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7415 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7416 	return XEXP (XEXP (x, 0), 0);
7417 
7418       /* Likewise for (zero_extend:DI (subreg:SI foo:DI 0)).  */
7419       if (GET_CODE (XEXP (x, 0)) == SUBREG
7420 	  && GET_MODE (SUBREG_REG (XEXP (x, 0))) == mode
7421 	  && subreg_lowpart_p (XEXP (x, 0))
7422 	  && COMPARISON_P (SUBREG_REG (XEXP (x, 0)))
7423 	  && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
7424 	  && (STORE_FLAG_VALUE & ~GET_MODE_MASK (inner_mode)) == 0)
7425 	return SUBREG_REG (XEXP (x, 0));
7426 
7427     }
7428 
7429   /* If we reach here, we want to return a pair of shifts.  The inner
7430      shift is a left shift of BITSIZE - POS - LEN bits.  The outer
7431      shift is a right shift of BITSIZE - LEN bits.  It is arithmetic or
7432      logical depending on the value of UNSIGNEDP.
7433 
7434      If this was a ZERO_EXTEND or ZERO_EXTRACT, this pair of shifts will be
7435      converted into an AND of a shift.
7436 
7437      We must check for the case where the left shift would have a negative
7438      count.  This can happen in a case like (x >> 31) & 255 on machines
7439      that can't shift by a constant.  On those machines, we would first
7440      combine the shift with the AND to produce a variable-position
7441      extraction.  Then the constant of 31 would be substituted in
7442      to produce such a position.  */
7443 
7444   modewidth = GET_MODE_PRECISION (mode);
7445   if (modewidth >= pos + len)
7446     {
7447       tem = gen_lowpart (mode, XEXP (x, 0));
7448       if (!tem || GET_CODE (tem) == CLOBBER)
7449 	return x;
7450       tem = simplify_shift_const (NULL_RTX, ASHIFT, mode,
7451 				  tem, modewidth - pos - len);
7452       tem = simplify_shift_const (NULL_RTX, unsignedp ? LSHIFTRT : ASHIFTRT,
7453 				  mode, tem, modewidth - len);
7454     }
7455   else if (unsignedp && len < HOST_BITS_PER_WIDE_INT)
7456     tem = simplify_and_const_int (NULL_RTX, mode,
7457 				  simplify_shift_const (NULL_RTX, LSHIFTRT,
7458 							mode, XEXP (x, 0),
7459 							pos),
7460 				  (HOST_WIDE_INT_1U << len) - 1);
7461   else
7462     /* Any other cases we can't handle.  */
7463     return x;
7464 
7465   /* If we couldn't do this for some reason, return the original
7466      expression.  */
7467   if (GET_CODE (tem) == CLOBBER)
7468     return x;
7469 
7470   return tem;
7471 }
7472 
7473 /* X is a SET which contains an assignment of one object into
7474    a part of another (such as a bit-field assignment, STRICT_LOW_PART,
7475    or certain SUBREGS). If possible, convert it into a series of
7476    logical operations.
7477 
7478    We half-heartedly support variable positions, but do not at all
7479    support variable lengths.  */
7480 
7481 static const_rtx
expand_field_assignment(const_rtx x)7482 expand_field_assignment (const_rtx x)
7483 {
7484   rtx inner;
7485   rtx pos;			/* Always counts from low bit.  */
7486   int len, inner_len;
7487   rtx mask, cleared, masked;
7488   scalar_int_mode compute_mode;
7489 
7490   /* Loop until we find something we can't simplify.  */
7491   while (1)
7492     {
7493       if (GET_CODE (SET_DEST (x)) == STRICT_LOW_PART
7494 	  && GET_CODE (XEXP (SET_DEST (x), 0)) == SUBREG)
7495 	{
7496 	  rtx x0 = XEXP (SET_DEST (x), 0);
7497 	  if (!GET_MODE_PRECISION (GET_MODE (x0)).is_constant (&len))
7498 	    break;
7499 	  inner = SUBREG_REG (XEXP (SET_DEST (x), 0));
7500 	  pos = gen_int_mode (subreg_lsb (XEXP (SET_DEST (x), 0)),
7501 			      MAX_MODE_INT);
7502 	}
7503       else if (GET_CODE (SET_DEST (x)) == ZERO_EXTRACT
7504 	       && CONST_INT_P (XEXP (SET_DEST (x), 1)))
7505 	{
7506 	  inner = XEXP (SET_DEST (x), 0);
7507 	  if (!GET_MODE_PRECISION (GET_MODE (inner)).is_constant (&inner_len))
7508 	    break;
7509 
7510 	  len = INTVAL (XEXP (SET_DEST (x), 1));
7511 	  pos = XEXP (SET_DEST (x), 2);
7512 
7513 	  /* A constant position should stay within the width of INNER.  */
7514 	  if (CONST_INT_P (pos) && INTVAL (pos) + len > inner_len)
7515 	    break;
7516 
7517 	  if (BITS_BIG_ENDIAN)
7518 	    {
7519 	      if (CONST_INT_P (pos))
7520 		pos = GEN_INT (inner_len - len - INTVAL (pos));
7521 	      else if (GET_CODE (pos) == MINUS
7522 		       && CONST_INT_P (XEXP (pos, 1))
7523 		       && INTVAL (XEXP (pos, 1)) == inner_len - len)
7524 		/* If position is ADJUST - X, new position is X.  */
7525 		pos = XEXP (pos, 0);
7526 	      else
7527 		pos = simplify_gen_binary (MINUS, GET_MODE (pos),
7528 					   gen_int_mode (inner_len - len,
7529 							 GET_MODE (pos)),
7530 					   pos);
7531 	    }
7532 	}
7533 
7534       /* If the destination is a subreg that overwrites the whole of the inner
7535 	 register, we can move the subreg to the source.  */
7536       else if (GET_CODE (SET_DEST (x)) == SUBREG
7537 	       /* We need SUBREGs to compute nonzero_bits properly.  */
7538 	       && nonzero_sign_valid
7539 	       && !read_modify_subreg_p (SET_DEST (x)))
7540 	{
7541 	  x = gen_rtx_SET (SUBREG_REG (SET_DEST (x)),
7542 			   gen_lowpart
7543 			   (GET_MODE (SUBREG_REG (SET_DEST (x))),
7544 			    SET_SRC (x)));
7545 	  continue;
7546 	}
7547       else
7548 	break;
7549 
7550       while (GET_CODE (inner) == SUBREG && subreg_lowpart_p (inner))
7551 	inner = SUBREG_REG (inner);
7552 
7553       /* Don't attempt bitwise arithmetic on non scalar integer modes.  */
7554       if (!is_a <scalar_int_mode> (GET_MODE (inner), &compute_mode))
7555 	{
7556 	  /* Don't do anything for vector or complex integral types.  */
7557 	  if (! FLOAT_MODE_P (GET_MODE (inner)))
7558 	    break;
7559 
7560 	  /* Try to find an integral mode to pun with.  */
7561 	  if (!int_mode_for_size (GET_MODE_BITSIZE (GET_MODE (inner)), 0)
7562 	      .exists (&compute_mode))
7563 	    break;
7564 
7565 	  inner = gen_lowpart (compute_mode, inner);
7566 	}
7567 
7568       /* Compute a mask of LEN bits, if we can do this on the host machine.  */
7569       if (len >= HOST_BITS_PER_WIDE_INT)
7570 	break;
7571 
7572       /* Don't try to compute in too wide unsupported modes.  */
7573       if (!targetm.scalar_mode_supported_p (compute_mode))
7574 	break;
7575 
7576       /* Now compute the equivalent expression.  Make a copy of INNER
7577 	 for the SET_DEST in case it is a MEM into which we will substitute;
7578 	 we don't want shared RTL in that case.  */
7579       mask = gen_int_mode ((HOST_WIDE_INT_1U << len) - 1,
7580 			   compute_mode);
7581       cleared = simplify_gen_binary (AND, compute_mode,
7582 				     simplify_gen_unary (NOT, compute_mode,
7583 				       simplify_gen_binary (ASHIFT,
7584 							    compute_mode,
7585 							    mask, pos),
7586 				       compute_mode),
7587 				     inner);
7588       masked = simplify_gen_binary (ASHIFT, compute_mode,
7589 				    simplify_gen_binary (
7590 				      AND, compute_mode,
7591 				      gen_lowpart (compute_mode, SET_SRC (x)),
7592 				      mask),
7593 				    pos);
7594 
7595       x = gen_rtx_SET (copy_rtx (inner),
7596 		       simplify_gen_binary (IOR, compute_mode,
7597 					    cleared, masked));
7598     }
7599 
7600   return x;
7601 }
7602 
7603 /* Return an RTX for a reference to LEN bits of INNER.  If POS_RTX is nonzero,
7604    it is an RTX that represents the (variable) starting position; otherwise,
7605    POS is the (constant) starting bit position.  Both are counted from the LSB.
7606 
7607    UNSIGNEDP is nonzero for an unsigned reference and zero for a signed one.
7608 
7609    IN_DEST is nonzero if this is a reference in the destination of a SET.
7610    This is used when a ZERO_ or SIGN_EXTRACT isn't needed.  If nonzero,
7611    a STRICT_LOW_PART will be used, if zero, ZERO_EXTEND or SIGN_EXTEND will
7612    be used.
7613 
7614    IN_COMPARE is nonzero if we are in a COMPARE.  This means that a
7615    ZERO_EXTRACT should be built even for bits starting at bit 0.
7616 
7617    MODE is the desired mode of the result (if IN_DEST == 0).
7618 
7619    The result is an RTX for the extraction or NULL_RTX if the target
7620    can't handle it.  */
7621 
7622 static rtx
make_extraction(machine_mode mode,rtx inner,HOST_WIDE_INT pos,rtx pos_rtx,unsigned HOST_WIDE_INT len,int unsignedp,int in_dest,int in_compare)7623 make_extraction (machine_mode mode, rtx inner, HOST_WIDE_INT pos,
7624 		 rtx pos_rtx, unsigned HOST_WIDE_INT len, int unsignedp,
7625 		 int in_dest, int in_compare)
7626 {
7627   /* This mode describes the size of the storage area
7628      to fetch the overall value from.  Within that, we
7629      ignore the POS lowest bits, etc.  */
7630   machine_mode is_mode = GET_MODE (inner);
7631   machine_mode inner_mode;
7632   scalar_int_mode wanted_inner_mode;
7633   scalar_int_mode wanted_inner_reg_mode = word_mode;
7634   scalar_int_mode pos_mode = word_mode;
7635   machine_mode extraction_mode = word_mode;
7636   rtx new_rtx = 0;
7637   rtx orig_pos_rtx = pos_rtx;
7638   HOST_WIDE_INT orig_pos;
7639 
7640   if (pos_rtx && CONST_INT_P (pos_rtx))
7641     pos = INTVAL (pos_rtx), pos_rtx = 0;
7642 
7643   if (GET_CODE (inner) == SUBREG
7644       && subreg_lowpart_p (inner)
7645       && (paradoxical_subreg_p (inner)
7646 	  /* If trying or potentionally trying to extract
7647 	     bits outside of is_mode, don't look through
7648 	     non-paradoxical SUBREGs.  See PR82192.  */
7649 	  || (pos_rtx == NULL_RTX
7650 	      && known_le (pos + len, GET_MODE_PRECISION (is_mode)))))
7651     {
7652       /* If going from (subreg:SI (mem:QI ...)) to (mem:QI ...),
7653 	 consider just the QI as the memory to extract from.
7654 	 The subreg adds or removes high bits; its mode is
7655 	 irrelevant to the meaning of this extraction,
7656 	 since POS and LEN count from the lsb.  */
7657       if (MEM_P (SUBREG_REG (inner)))
7658 	is_mode = GET_MODE (SUBREG_REG (inner));
7659       inner = SUBREG_REG (inner);
7660     }
7661   else if (GET_CODE (inner) == ASHIFT
7662 	   && CONST_INT_P (XEXP (inner, 1))
7663 	   && pos_rtx == 0 && pos == 0
7664 	   && len > UINTVAL (XEXP (inner, 1)))
7665     {
7666       /* We're extracting the least significant bits of an rtx
7667 	 (ashift X (const_int C)), where LEN > C.  Extract the
7668 	 least significant (LEN - C) bits of X, giving an rtx
7669 	 whose mode is MODE, then shift it left C times.  */
7670       new_rtx = make_extraction (mode, XEXP (inner, 0),
7671 			     0, 0, len - INTVAL (XEXP (inner, 1)),
7672 			     unsignedp, in_dest, in_compare);
7673       if (new_rtx != 0)
7674 	return gen_rtx_ASHIFT (mode, new_rtx, XEXP (inner, 1));
7675     }
7676   else if (GET_CODE (inner) == TRUNCATE
7677 	   /* If trying or potentionally trying to extract
7678 	      bits outside of is_mode, don't look through
7679 	      TRUNCATE.  See PR82192.  */
7680 	   && pos_rtx == NULL_RTX
7681 	   && known_le (pos + len, GET_MODE_PRECISION (is_mode)))
7682     inner = XEXP (inner, 0);
7683 
7684   inner_mode = GET_MODE (inner);
7685 
7686   /* See if this can be done without an extraction.  We never can if the
7687      width of the field is not the same as that of some integer mode. For
7688      registers, we can only avoid the extraction if the position is at the
7689      low-order bit and this is either not in the destination or we have the
7690      appropriate STRICT_LOW_PART operation available.
7691 
7692      For MEM, we can avoid an extract if the field starts on an appropriate
7693      boundary and we can change the mode of the memory reference.  */
7694 
7695   scalar_int_mode tmode;
7696   if (int_mode_for_size (len, 1).exists (&tmode)
7697       && ((pos_rtx == 0 && (pos % BITS_PER_WORD) == 0
7698 	   && !MEM_P (inner)
7699 	   && (pos == 0 || REG_P (inner))
7700 	   && (inner_mode == tmode
7701 	       || !REG_P (inner)
7702 	       || TRULY_NOOP_TRUNCATION_MODES_P (tmode, inner_mode)
7703 	       || reg_truncated_to_mode (tmode, inner))
7704 	   && (! in_dest
7705 	       || (REG_P (inner)
7706 		   && have_insn_for (STRICT_LOW_PART, tmode))))
7707 	  || (MEM_P (inner) && pos_rtx == 0
7708 	      && (pos
7709 		  % (STRICT_ALIGNMENT ? GET_MODE_ALIGNMENT (tmode)
7710 		     : BITS_PER_UNIT)) == 0
7711 	      /* We can't do this if we are widening INNER_MODE (it
7712 		 may not be aligned, for one thing).  */
7713 	      && !paradoxical_subreg_p (tmode, inner_mode)
7714 	      && known_le (pos + len, GET_MODE_PRECISION (is_mode))
7715 	      && (inner_mode == tmode
7716 		  || (! mode_dependent_address_p (XEXP (inner, 0),
7717 						  MEM_ADDR_SPACE (inner))
7718 		      && ! MEM_VOLATILE_P (inner))))))
7719     {
7720       /* If INNER is a MEM, make a new MEM that encompasses just the desired
7721 	 field.  If the original and current mode are the same, we need not
7722 	 adjust the offset.  Otherwise, we do if bytes big endian.
7723 
7724 	 If INNER is not a MEM, get a piece consisting of just the field
7725 	 of interest (in this case POS % BITS_PER_WORD must be 0).  */
7726 
7727       if (MEM_P (inner))
7728 	{
7729 	  poly_int64 offset;
7730 
7731 	  /* POS counts from lsb, but make OFFSET count in memory order.  */
7732 	  if (BYTES_BIG_ENDIAN)
7733 	    offset = bits_to_bytes_round_down (GET_MODE_PRECISION (is_mode)
7734 					       - len - pos);
7735 	  else
7736 	    offset = pos / BITS_PER_UNIT;
7737 
7738 	  new_rtx = adjust_address_nv (inner, tmode, offset);
7739 	}
7740       else if (REG_P (inner))
7741 	{
7742 	  if (tmode != inner_mode)
7743 	    {
7744 	      /* We can't call gen_lowpart in a DEST since we
7745 		 always want a SUBREG (see below) and it would sometimes
7746 		 return a new hard register.  */
7747 	      if (pos || in_dest)
7748 		{
7749 		  poly_uint64 offset
7750 		    = subreg_offset_from_lsb (tmode, inner_mode, pos);
7751 
7752 		  /* Avoid creating invalid subregs, for example when
7753 		     simplifying (x>>32)&255.  */
7754 		  if (!validate_subreg (tmode, inner_mode, inner, offset))
7755 		    return NULL_RTX;
7756 
7757 		  new_rtx = gen_rtx_SUBREG (tmode, inner, offset);
7758 		}
7759 	      else
7760 		new_rtx = gen_lowpart (tmode, inner);
7761 	    }
7762 	  else
7763 	    new_rtx = inner;
7764 	}
7765       else
7766 	new_rtx = force_to_mode (inner, tmode,
7767 				 len >= HOST_BITS_PER_WIDE_INT
7768 				 ? HOST_WIDE_INT_M1U
7769 				 : (HOST_WIDE_INT_1U << len) - 1, 0);
7770 
7771       /* If this extraction is going into the destination of a SET,
7772 	 make a STRICT_LOW_PART unless we made a MEM.  */
7773 
7774       if (in_dest)
7775 	return (MEM_P (new_rtx) ? new_rtx
7776 		: (GET_CODE (new_rtx) != SUBREG
7777 		   ? gen_rtx_CLOBBER (tmode, const0_rtx)
7778 		   : gen_rtx_STRICT_LOW_PART (VOIDmode, new_rtx)));
7779 
7780       if (mode == tmode)
7781 	return new_rtx;
7782 
7783       if (CONST_SCALAR_INT_P (new_rtx))
7784 	return simplify_unary_operation (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7785 					 mode, new_rtx, tmode);
7786 
7787       /* If we know that no extraneous bits are set, and that the high
7788 	 bit is not set, convert the extraction to the cheaper of
7789 	 sign and zero extension, that are equivalent in these cases.  */
7790       if (flag_expensive_optimizations
7791 	  && (HWI_COMPUTABLE_MODE_P (tmode)
7792 	      && ((nonzero_bits (new_rtx, tmode)
7793 		   & ~(((unsigned HOST_WIDE_INT)GET_MODE_MASK (tmode)) >> 1))
7794 		  == 0)))
7795 	{
7796 	  rtx temp = gen_rtx_ZERO_EXTEND (mode, new_rtx);
7797 	  rtx temp1 = gen_rtx_SIGN_EXTEND (mode, new_rtx);
7798 
7799 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7800 	     backends.  */
7801 	  if (set_src_cost (temp, mode, optimize_this_for_speed_p)
7802 	      <= set_src_cost (temp1, mode, optimize_this_for_speed_p))
7803 	    return temp;
7804 	  return temp1;
7805 	}
7806 
7807       /* Otherwise, sign- or zero-extend unless we already are in the
7808 	 proper mode.  */
7809 
7810       return (gen_rtx_fmt_e (unsignedp ? ZERO_EXTEND : SIGN_EXTEND,
7811 			     mode, new_rtx));
7812     }
7813 
7814   /* Unless this is a COMPARE or we have a funny memory reference,
7815      don't do anything with zero-extending field extracts starting at
7816      the low-order bit since they are simple AND operations.  */
7817   if (pos_rtx == 0 && pos == 0 && ! in_dest
7818       && ! in_compare && unsignedp)
7819     return 0;
7820 
7821   /* Unless INNER is not MEM, reject this if we would be spanning bytes or
7822      if the position is not a constant and the length is not 1.  In all
7823      other cases, we would only be going outside our object in cases when
7824      an original shift would have been undefined.  */
7825   if (MEM_P (inner)
7826       && ((pos_rtx == 0 && maybe_gt (pos + len, GET_MODE_PRECISION (is_mode)))
7827 	  || (pos_rtx != 0 && len != 1)))
7828     return 0;
7829 
7830   enum extraction_pattern pattern = (in_dest ? EP_insv
7831 				     : unsignedp ? EP_extzv : EP_extv);
7832 
7833   /* If INNER is not from memory, we want it to have the mode of a register
7834      extraction pattern's structure operand, or word_mode if there is no
7835      such pattern.  The same applies to extraction_mode and pos_mode
7836      and their respective operands.
7837 
7838      For memory, assume that the desired extraction_mode and pos_mode
7839      are the same as for a register operation, since at present we don't
7840      have named patterns for aligned memory structures.  */
7841   struct extraction_insn insn;
7842   unsigned int inner_size;
7843   if (GET_MODE_BITSIZE (inner_mode).is_constant (&inner_size)
7844       && get_best_reg_extraction_insn (&insn, pattern, inner_size, mode))
7845     {
7846       wanted_inner_reg_mode = insn.struct_mode.require ();
7847       pos_mode = insn.pos_mode;
7848       extraction_mode = insn.field_mode;
7849     }
7850 
7851   /* Never narrow an object, since that might not be safe.  */
7852 
7853   if (mode != VOIDmode
7854       && partial_subreg_p (extraction_mode, mode))
7855     extraction_mode = mode;
7856 
7857   /* Punt if len is too large for extraction_mode.  */
7858   if (maybe_gt (len, GET_MODE_PRECISION (extraction_mode)))
7859     return NULL_RTX;
7860 
7861   if (!MEM_P (inner))
7862     wanted_inner_mode = wanted_inner_reg_mode;
7863   else
7864     {
7865       /* Be careful not to go beyond the extracted object and maintain the
7866 	 natural alignment of the memory.  */
7867       wanted_inner_mode = smallest_int_mode_for_size (len);
7868       while (pos % GET_MODE_BITSIZE (wanted_inner_mode) + len
7869 	     > GET_MODE_BITSIZE (wanted_inner_mode))
7870 	wanted_inner_mode = GET_MODE_WIDER_MODE (wanted_inner_mode).require ();
7871     }
7872 
7873   orig_pos = pos;
7874 
7875   if (BITS_BIG_ENDIAN)
7876     {
7877       /* POS is passed as if BITS_BIG_ENDIAN == 0, so we need to convert it to
7878 	 BITS_BIG_ENDIAN style.  If position is constant, compute new
7879 	 position.  Otherwise, build subtraction.
7880 	 Note that POS is relative to the mode of the original argument.
7881 	 If it's a MEM we need to recompute POS relative to that.
7882 	 However, if we're extracting from (or inserting into) a register,
7883 	 we want to recompute POS relative to wanted_inner_mode.  */
7884       int width;
7885       if (!MEM_P (inner))
7886 	width = GET_MODE_BITSIZE (wanted_inner_mode);
7887       else if (!GET_MODE_BITSIZE (is_mode).is_constant (&width))
7888 	return NULL_RTX;
7889 
7890       if (pos_rtx == 0)
7891 	pos = width - len - pos;
7892       else
7893 	pos_rtx
7894 	  = gen_rtx_MINUS (GET_MODE (pos_rtx),
7895 			   gen_int_mode (width - len, GET_MODE (pos_rtx)),
7896 			   pos_rtx);
7897       /* POS may be less than 0 now, but we check for that below.
7898 	 Note that it can only be less than 0 if !MEM_P (inner).  */
7899     }
7900 
7901   /* If INNER has a wider mode, and this is a constant extraction, try to
7902      make it smaller and adjust the byte to point to the byte containing
7903      the value.  */
7904   if (wanted_inner_mode != VOIDmode
7905       && inner_mode != wanted_inner_mode
7906       && ! pos_rtx
7907       && partial_subreg_p (wanted_inner_mode, is_mode)
7908       && MEM_P (inner)
7909       && ! mode_dependent_address_p (XEXP (inner, 0), MEM_ADDR_SPACE (inner))
7910       && ! MEM_VOLATILE_P (inner))
7911     {
7912       poly_int64 offset = 0;
7913 
7914       /* The computations below will be correct if the machine is big
7915 	 endian in both bits and bytes or little endian in bits and bytes.
7916 	 If it is mixed, we must adjust.  */
7917 
7918       /* If bytes are big endian and we had a paradoxical SUBREG, we must
7919 	 adjust OFFSET to compensate.  */
7920       if (BYTES_BIG_ENDIAN
7921 	  && paradoxical_subreg_p (is_mode, inner_mode))
7922 	offset -= GET_MODE_SIZE (is_mode) - GET_MODE_SIZE (inner_mode);
7923 
7924       /* We can now move to the desired byte.  */
7925       offset += (pos / GET_MODE_BITSIZE (wanted_inner_mode))
7926 		* GET_MODE_SIZE (wanted_inner_mode);
7927       pos %= GET_MODE_BITSIZE (wanted_inner_mode);
7928 
7929       if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN
7930 	  && is_mode != wanted_inner_mode)
7931 	offset = (GET_MODE_SIZE (is_mode)
7932 		  - GET_MODE_SIZE (wanted_inner_mode) - offset);
7933 
7934       inner = adjust_address_nv (inner, wanted_inner_mode, offset);
7935     }
7936 
7937   /* If INNER is not memory, get it into the proper mode.  If we are changing
7938      its mode, POS must be a constant and smaller than the size of the new
7939      mode.  */
7940   else if (!MEM_P (inner))
7941     {
7942       /* On the LHS, don't create paradoxical subregs implicitely truncating
7943 	 the register unless TARGET_TRULY_NOOP_TRUNCATION.  */
7944       if (in_dest
7945 	  && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (inner),
7946 					     wanted_inner_mode))
7947 	return NULL_RTX;
7948 
7949       if (GET_MODE (inner) != wanted_inner_mode
7950 	  && (pos_rtx != 0
7951 	      || orig_pos + len > GET_MODE_BITSIZE (wanted_inner_mode)))
7952 	return NULL_RTX;
7953 
7954       if (orig_pos < 0)
7955 	return NULL_RTX;
7956 
7957       inner = force_to_mode (inner, wanted_inner_mode,
7958 			     pos_rtx
7959 			     || len + orig_pos >= HOST_BITS_PER_WIDE_INT
7960 			     ? HOST_WIDE_INT_M1U
7961 			     : (((HOST_WIDE_INT_1U << len) - 1)
7962 				<< orig_pos),
7963 			     0);
7964     }
7965 
7966   /* Adjust mode of POS_RTX, if needed.  If we want a wider mode, we
7967      have to zero extend.  Otherwise, we can just use a SUBREG.
7968 
7969      We dealt with constant rtxes earlier, so pos_rtx cannot
7970      have VOIDmode at this point.  */
7971   if (pos_rtx != 0
7972       && (GET_MODE_SIZE (pos_mode)
7973 	  > GET_MODE_SIZE (as_a <scalar_int_mode> (GET_MODE (pos_rtx)))))
7974     {
7975       rtx temp = simplify_gen_unary (ZERO_EXTEND, pos_mode, pos_rtx,
7976 				     GET_MODE (pos_rtx));
7977 
7978       /* If we know that no extraneous bits are set, and that the high
7979 	 bit is not set, convert extraction to cheaper one - either
7980 	 SIGN_EXTENSION or ZERO_EXTENSION, that are equivalent in these
7981 	 cases.  */
7982       if (flag_expensive_optimizations
7983 	  && (HWI_COMPUTABLE_MODE_P (GET_MODE (pos_rtx))
7984 	      && ((nonzero_bits (pos_rtx, GET_MODE (pos_rtx))
7985 		   & ~(((unsigned HOST_WIDE_INT)
7986 			GET_MODE_MASK (GET_MODE (pos_rtx)))
7987 		       >> 1))
7988 		  == 0)))
7989 	{
7990 	  rtx temp1 = simplify_gen_unary (SIGN_EXTEND, pos_mode, pos_rtx,
7991 					  GET_MODE (pos_rtx));
7992 
7993 	  /* Prefer ZERO_EXTENSION, since it gives more information to
7994 	     backends.  */
7995 	  if (set_src_cost (temp1, pos_mode, optimize_this_for_speed_p)
7996 	      < set_src_cost (temp, pos_mode, optimize_this_for_speed_p))
7997 	    temp = temp1;
7998 	}
7999       pos_rtx = temp;
8000     }
8001 
8002   /* Make POS_RTX unless we already have it and it is correct.  If we don't
8003      have a POS_RTX but we do have an ORIG_POS_RTX, the latter must
8004      be a CONST_INT.  */
8005   if (pos_rtx == 0 && orig_pos_rtx != 0 && INTVAL (orig_pos_rtx) == pos)
8006     pos_rtx = orig_pos_rtx;
8007 
8008   else if (pos_rtx == 0)
8009     pos_rtx = GEN_INT (pos);
8010 
8011   /* Make the required operation.  See if we can use existing rtx.  */
8012   new_rtx = gen_rtx_fmt_eee (unsignedp ? ZERO_EXTRACT : SIGN_EXTRACT,
8013 			 extraction_mode, inner, GEN_INT (len), pos_rtx);
8014   if (! in_dest)
8015     new_rtx = gen_lowpart (mode, new_rtx);
8016 
8017   return new_rtx;
8018 }
8019 
8020 /* See if X (of mode MODE) contains an ASHIFT of COUNT or more bits that
8021    can be commuted with any other operations in X.  Return X without
8022    that shift if so.  */
8023 
8024 static rtx
extract_left_shift(scalar_int_mode mode,rtx x,int count)8025 extract_left_shift (scalar_int_mode mode, rtx x, int count)
8026 {
8027   enum rtx_code code = GET_CODE (x);
8028   rtx tem;
8029 
8030   switch (code)
8031     {
8032     case ASHIFT:
8033       /* This is the shift itself.  If it is wide enough, we will return
8034 	 either the value being shifted if the shift count is equal to
8035 	 COUNT or a shift for the difference.  */
8036       if (CONST_INT_P (XEXP (x, 1))
8037 	  && INTVAL (XEXP (x, 1)) >= count)
8038 	return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (x, 0),
8039 				     INTVAL (XEXP (x, 1)) - count);
8040       break;
8041 
8042     case NEG:  case NOT:
8043       if ((tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8044 	return simplify_gen_unary (code, mode, tem, mode);
8045 
8046       break;
8047 
8048     case PLUS:  case IOR:  case XOR:  case AND:
8049       /* If we can safely shift this constant and we find the inner shift,
8050 	 make a new operation.  */
8051       if (CONST_INT_P (XEXP (x, 1))
8052 	  && (UINTVAL (XEXP (x, 1))
8053 	      & (((HOST_WIDE_INT_1U << count)) - 1)) == 0
8054 	  && (tem = extract_left_shift (mode, XEXP (x, 0), count)) != 0)
8055 	{
8056 	  HOST_WIDE_INT val = INTVAL (XEXP (x, 1)) >> count;
8057 	  return simplify_gen_binary (code, mode, tem,
8058 				      gen_int_mode (val, mode));
8059 	}
8060       break;
8061 
8062     default:
8063       break;
8064     }
8065 
8066   return 0;
8067 }
8068 
8069 /* Subroutine of make_compound_operation.  *X_PTR is the rtx at the current
8070    level of the expression and MODE is its mode.  IN_CODE is as for
8071    make_compound_operation.  *NEXT_CODE_PTR is the value of IN_CODE
8072    that should be used when recursing on operands of *X_PTR.
8073 
8074    There are two possible actions:
8075 
8076    - Return null.  This tells the caller to recurse on *X_PTR with IN_CODE
8077      equal to *NEXT_CODE_PTR, after which *X_PTR holds the final value.
8078 
8079    - Return a new rtx, which the caller returns directly.  */
8080 
8081 static rtx
make_compound_operation_int(scalar_int_mode mode,rtx * x_ptr,enum rtx_code in_code,enum rtx_code * next_code_ptr)8082 make_compound_operation_int (scalar_int_mode mode, rtx *x_ptr,
8083 			     enum rtx_code in_code,
8084 			     enum rtx_code *next_code_ptr)
8085 {
8086   rtx x = *x_ptr;
8087   enum rtx_code next_code = *next_code_ptr;
8088   enum rtx_code code = GET_CODE (x);
8089   int mode_width = GET_MODE_PRECISION (mode);
8090   rtx rhs, lhs;
8091   rtx new_rtx = 0;
8092   int i;
8093   rtx tem;
8094   scalar_int_mode inner_mode;
8095   bool equality_comparison = false;
8096 
8097   if (in_code == EQ)
8098     {
8099       equality_comparison = true;
8100       in_code = COMPARE;
8101     }
8102 
8103   /* Process depending on the code of this operation.  If NEW is set
8104      nonzero, it will be returned.  */
8105 
8106   switch (code)
8107     {
8108     case ASHIFT:
8109       /* Convert shifts by constants into multiplications if inside
8110 	 an address.  */
8111       if (in_code == MEM && CONST_INT_P (XEXP (x, 1))
8112 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
8113 	  && INTVAL (XEXP (x, 1)) >= 0)
8114 	{
8115 	  HOST_WIDE_INT count = INTVAL (XEXP (x, 1));
8116 	  HOST_WIDE_INT multval = HOST_WIDE_INT_1 << count;
8117 
8118 	  new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8119 	  if (GET_CODE (new_rtx) == NEG)
8120 	    {
8121 	      new_rtx = XEXP (new_rtx, 0);
8122 	      multval = -multval;
8123 	    }
8124 	  multval = trunc_int_for_mode (multval, mode);
8125 	  new_rtx = gen_rtx_MULT (mode, new_rtx, gen_int_mode (multval, mode));
8126 	}
8127       break;
8128 
8129     case PLUS:
8130       lhs = XEXP (x, 0);
8131       rhs = XEXP (x, 1);
8132       lhs = make_compound_operation (lhs, next_code);
8133       rhs = make_compound_operation (rhs, next_code);
8134       if (GET_CODE (lhs) == MULT && GET_CODE (XEXP (lhs, 0)) == NEG)
8135 	{
8136 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (lhs, 0), 0),
8137 				     XEXP (lhs, 1));
8138 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8139 	}
8140       else if (GET_CODE (lhs) == MULT
8141 	       && (CONST_INT_P (XEXP (lhs, 1)) && INTVAL (XEXP (lhs, 1)) < 0))
8142 	{
8143 	  tem = simplify_gen_binary (MULT, mode, XEXP (lhs, 0),
8144 				     simplify_gen_unary (NEG, mode,
8145 							 XEXP (lhs, 1),
8146 							 mode));
8147 	  new_rtx = simplify_gen_binary (MINUS, mode, rhs, tem);
8148 	}
8149       else
8150 	{
8151 	  SUBST (XEXP (x, 0), lhs);
8152 	  SUBST (XEXP (x, 1), rhs);
8153 	}
8154       maybe_swap_commutative_operands (x);
8155       return x;
8156 
8157     case MINUS:
8158       lhs = XEXP (x, 0);
8159       rhs = XEXP (x, 1);
8160       lhs = make_compound_operation (lhs, next_code);
8161       rhs = make_compound_operation (rhs, next_code);
8162       if (GET_CODE (rhs) == MULT && GET_CODE (XEXP (rhs, 0)) == NEG)
8163 	{
8164 	  tem = simplify_gen_binary (MULT, mode, XEXP (XEXP (rhs, 0), 0),
8165 				     XEXP (rhs, 1));
8166 	  return simplify_gen_binary (PLUS, mode, tem, lhs);
8167 	}
8168       else if (GET_CODE (rhs) == MULT
8169 	       && (CONST_INT_P (XEXP (rhs, 1)) && INTVAL (XEXP (rhs, 1)) < 0))
8170 	{
8171 	  tem = simplify_gen_binary (MULT, mode, XEXP (rhs, 0),
8172 				     simplify_gen_unary (NEG, mode,
8173 							 XEXP (rhs, 1),
8174 							 mode));
8175 	  return simplify_gen_binary (PLUS, mode, tem, lhs);
8176 	}
8177       else
8178 	{
8179 	  SUBST (XEXP (x, 0), lhs);
8180 	  SUBST (XEXP (x, 1), rhs);
8181 	  return x;
8182 	}
8183 
8184     case AND:
8185       /* If the second operand is not a constant, we can't do anything
8186 	 with it.  */
8187       if (!CONST_INT_P (XEXP (x, 1)))
8188 	break;
8189 
8190       /* If the constant is a power of two minus one and the first operand
8191 	 is a logical right shift, make an extraction.  */
8192       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8193 	  && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8194 	{
8195 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8196 	  new_rtx = make_extraction (mode, new_rtx, 0, XEXP (XEXP (x, 0), 1),
8197 				     i, 1, 0, in_code == COMPARE);
8198 	}
8199 
8200       /* Same as previous, but for (subreg (lshiftrt ...)) in first op.  */
8201       else if (GET_CODE (XEXP (x, 0)) == SUBREG
8202 	       && subreg_lowpart_p (XEXP (x, 0))
8203 	       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (XEXP (x, 0))),
8204 					  &inner_mode)
8205 	       && GET_CODE (SUBREG_REG (XEXP (x, 0))) == LSHIFTRT
8206 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8207 	{
8208 	  rtx inner_x0 = SUBREG_REG (XEXP (x, 0));
8209 	  new_rtx = make_compound_operation (XEXP (inner_x0, 0), next_code);
8210 	  new_rtx = make_extraction (inner_mode, new_rtx, 0,
8211 				     XEXP (inner_x0, 1),
8212 				     i, 1, 0, in_code == COMPARE);
8213 
8214 	  /* If we narrowed the mode when dropping the subreg, then we lose.  */
8215 	  if (GET_MODE_SIZE (inner_mode) < GET_MODE_SIZE (mode))
8216 	    new_rtx = NULL;
8217 
8218 	  /* If that didn't give anything, see if the AND simplifies on
8219 	     its own.  */
8220 	  if (!new_rtx && i >= 0)
8221 	    {
8222 	      new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8223 	      new_rtx = make_extraction (mode, new_rtx, 0, NULL_RTX, i, 1,
8224 					 0, in_code == COMPARE);
8225 	    }
8226 	}
8227       /* Same as previous, but for (xor/ior (lshiftrt...) (lshiftrt...)).  */
8228       else if ((GET_CODE (XEXP (x, 0)) == XOR
8229 		|| GET_CODE (XEXP (x, 0)) == IOR)
8230 	       && GET_CODE (XEXP (XEXP (x, 0), 0)) == LSHIFTRT
8231 	       && GET_CODE (XEXP (XEXP (x, 0), 1)) == LSHIFTRT
8232 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8233 	{
8234 	  /* Apply the distributive law, and then try to make extractions.  */
8235 	  new_rtx = gen_rtx_fmt_ee (GET_CODE (XEXP (x, 0)), mode,
8236 				    gen_rtx_AND (mode, XEXP (XEXP (x, 0), 0),
8237 						 XEXP (x, 1)),
8238 				    gen_rtx_AND (mode, XEXP (XEXP (x, 0), 1),
8239 						 XEXP (x, 1)));
8240 	  new_rtx = make_compound_operation (new_rtx, in_code);
8241 	}
8242 
8243       /* If we are have (and (rotate X C) M) and C is larger than the number
8244 	 of bits in M, this is an extraction.  */
8245 
8246       else if (GET_CODE (XEXP (x, 0)) == ROTATE
8247 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8248 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0
8249 	       && i <= INTVAL (XEXP (XEXP (x, 0), 1)))
8250 	{
8251 	  new_rtx = make_compound_operation (XEXP (XEXP (x, 0), 0), next_code);
8252 	  new_rtx = make_extraction (mode, new_rtx,
8253 				     (GET_MODE_PRECISION (mode)
8254 				      - INTVAL (XEXP (XEXP (x, 0), 1))),
8255 				     NULL_RTX, i, 1, 0, in_code == COMPARE);
8256 	}
8257 
8258       /* On machines without logical shifts, if the operand of the AND is
8259 	 a logical shift and our mask turns off all the propagated sign
8260 	 bits, we can replace the logical shift with an arithmetic shift.  */
8261       else if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8262 	       && !have_insn_for (LSHIFTRT, mode)
8263 	       && have_insn_for (ASHIFTRT, mode)
8264 	       && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8265 	       && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8266 	       && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8267 	       && mode_width <= HOST_BITS_PER_WIDE_INT)
8268 	{
8269 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
8270 
8271 	  mask >>= INTVAL (XEXP (XEXP (x, 0), 1));
8272 	  if ((INTVAL (XEXP (x, 1)) & ~mask) == 0)
8273 	    SUBST (XEXP (x, 0),
8274 		   gen_rtx_ASHIFTRT (mode,
8275 				     make_compound_operation (XEXP (XEXP (x,
8276 									  0),
8277 								    0),
8278 							      next_code),
8279 				     XEXP (XEXP (x, 0), 1)));
8280 	}
8281 
8282       /* If the constant is one less than a power of two, this might be
8283 	 representable by an extraction even if no shift is present.
8284 	 If it doesn't end up being a ZERO_EXTEND, we will ignore it unless
8285 	 we are in a COMPARE.  */
8286       else if ((i = exact_log2 (UINTVAL (XEXP (x, 1)) + 1)) >= 0)
8287 	new_rtx = make_extraction (mode,
8288 				   make_compound_operation (XEXP (x, 0),
8289 							    next_code),
8290 				   0, NULL_RTX, i, 1, 0, in_code == COMPARE);
8291 
8292       /* If we are in a comparison and this is an AND with a power of two,
8293 	 convert this into the appropriate bit extract.  */
8294       else if (in_code == COMPARE
8295 	       && (i = exact_log2 (UINTVAL (XEXP (x, 1)))) >= 0
8296 	       && (equality_comparison || i < GET_MODE_PRECISION (mode) - 1))
8297 	new_rtx = make_extraction (mode,
8298 				   make_compound_operation (XEXP (x, 0),
8299 							    next_code),
8300 				   i, NULL_RTX, 1, 1, 0, 1);
8301 
8302       /* If the one operand is a paradoxical subreg of a register or memory and
8303 	 the constant (limited to the smaller mode) has only zero bits where
8304 	 the sub expression has known zero bits, this can be expressed as
8305 	 a zero_extend.  */
8306       else if (GET_CODE (XEXP (x, 0)) == SUBREG)
8307 	{
8308 	  rtx sub;
8309 
8310 	  sub = XEXP (XEXP (x, 0), 0);
8311 	  machine_mode sub_mode = GET_MODE (sub);
8312 	  int sub_width;
8313 	  if ((REG_P (sub) || MEM_P (sub))
8314 	      && GET_MODE_PRECISION (sub_mode).is_constant (&sub_width)
8315 	      && sub_width < mode_width)
8316 	    {
8317 	      unsigned HOST_WIDE_INT mode_mask = GET_MODE_MASK (sub_mode);
8318 	      unsigned HOST_WIDE_INT mask;
8319 
8320 	      /* original AND constant with all the known zero bits set */
8321 	      mask = UINTVAL (XEXP (x, 1)) | (~nonzero_bits (sub, sub_mode));
8322 	      if ((mask & mode_mask) == mode_mask)
8323 		{
8324 		  new_rtx = make_compound_operation (sub, next_code);
8325 		  new_rtx = make_extraction (mode, new_rtx, 0, 0, sub_width,
8326 					     1, 0, in_code == COMPARE);
8327 		}
8328 	    }
8329 	}
8330 
8331       break;
8332 
8333     case LSHIFTRT:
8334       /* If the sign bit is known to be zero, replace this with an
8335 	 arithmetic shift.  */
8336       if (have_insn_for (ASHIFTRT, mode)
8337 	  && ! have_insn_for (LSHIFTRT, mode)
8338 	  && mode_width <= HOST_BITS_PER_WIDE_INT
8339 	  && (nonzero_bits (XEXP (x, 0), mode) & (1 << (mode_width - 1))) == 0)
8340 	{
8341 	  new_rtx = gen_rtx_ASHIFTRT (mode,
8342 				      make_compound_operation (XEXP (x, 0),
8343 							       next_code),
8344 				      XEXP (x, 1));
8345 	  break;
8346 	}
8347 
8348       /* fall through */
8349 
8350     case ASHIFTRT:
8351       lhs = XEXP (x, 0);
8352       rhs = XEXP (x, 1);
8353 
8354       /* If we have (ashiftrt (ashift foo C1) C2) with C2 >= C1,
8355 	 this is a SIGN_EXTRACT.  */
8356       if (CONST_INT_P (rhs)
8357 	  && GET_CODE (lhs) == ASHIFT
8358 	  && CONST_INT_P (XEXP (lhs, 1))
8359 	  && INTVAL (rhs) >= INTVAL (XEXP (lhs, 1))
8360 	  && INTVAL (XEXP (lhs, 1)) >= 0
8361 	  && INTVAL (rhs) < mode_width)
8362 	{
8363 	  new_rtx = make_compound_operation (XEXP (lhs, 0), next_code);
8364 	  new_rtx = make_extraction (mode, new_rtx,
8365 				     INTVAL (rhs) - INTVAL (XEXP (lhs, 1)),
8366 				     NULL_RTX, mode_width - INTVAL (rhs),
8367 				     code == LSHIFTRT, 0, in_code == COMPARE);
8368 	  break;
8369 	}
8370 
8371       /* See if we have operations between an ASHIFTRT and an ASHIFT.
8372 	 If so, try to merge the shifts into a SIGN_EXTEND.  We could
8373 	 also do this for some cases of SIGN_EXTRACT, but it doesn't
8374 	 seem worth the effort; the case checked for occurs on Alpha.  */
8375 
8376       if (!OBJECT_P (lhs)
8377 	  && ! (GET_CODE (lhs) == SUBREG
8378 		&& (OBJECT_P (SUBREG_REG (lhs))))
8379 	  && CONST_INT_P (rhs)
8380 	  && INTVAL (rhs) >= 0
8381 	  && INTVAL (rhs) < HOST_BITS_PER_WIDE_INT
8382 	  && INTVAL (rhs) < mode_width
8383 	  && (new_rtx = extract_left_shift (mode, lhs, INTVAL (rhs))) != 0)
8384 	new_rtx = make_extraction (mode, make_compound_operation (new_rtx,
8385 								  next_code),
8386 				   0, NULL_RTX, mode_width - INTVAL (rhs),
8387 				   code == LSHIFTRT, 0, in_code == COMPARE);
8388 
8389       break;
8390 
8391     case SUBREG:
8392       /* Call ourselves recursively on the inner expression.  If we are
8393 	 narrowing the object and it has a different RTL code from
8394 	 what it originally did, do this SUBREG as a force_to_mode.  */
8395       {
8396 	rtx inner = SUBREG_REG (x), simplified;
8397 	enum rtx_code subreg_code = in_code;
8398 
8399 	/* If the SUBREG is masking of a logical right shift,
8400 	   make an extraction.  */
8401 	if (GET_CODE (inner) == LSHIFTRT
8402 	    && is_a <scalar_int_mode> (GET_MODE (inner), &inner_mode)
8403 	    && GET_MODE_SIZE (mode) < GET_MODE_SIZE (inner_mode)
8404 	    && CONST_INT_P (XEXP (inner, 1))
8405 	    && UINTVAL (XEXP (inner, 1)) < GET_MODE_PRECISION (inner_mode)
8406 	    && subreg_lowpart_p (x))
8407 	  {
8408 	    new_rtx = make_compound_operation (XEXP (inner, 0), next_code);
8409 	    int width = GET_MODE_PRECISION (inner_mode)
8410 			- INTVAL (XEXP (inner, 1));
8411 	    if (width > mode_width)
8412 	      width = mode_width;
8413 	    new_rtx = make_extraction (mode, new_rtx, 0, XEXP (inner, 1),
8414 				       width, 1, 0, in_code == COMPARE);
8415 	    break;
8416 	  }
8417 
8418 	/* If in_code is COMPARE, it isn't always safe to pass it through
8419 	   to the recursive make_compound_operation call.  */
8420 	if (subreg_code == COMPARE
8421 	    && (!subreg_lowpart_p (x)
8422 		|| GET_CODE (inner) == SUBREG
8423 		/* (subreg:SI (and:DI (reg:DI) (const_int 0x800000000)) 0)
8424 		   is (const_int 0), rather than
8425 		   (subreg:SI (lshiftrt:DI (reg:DI) (const_int 35)) 0).
8426 		   Similarly (subreg:QI (and:SI (reg:SI) (const_int 0x80)) 0)
8427 		   for non-equality comparisons against 0 is not equivalent
8428 		   to (subreg:QI (lshiftrt:SI (reg:SI) (const_int 7)) 0).  */
8429 		|| (GET_CODE (inner) == AND
8430 		    && CONST_INT_P (XEXP (inner, 1))
8431 		    && partial_subreg_p (x)
8432 		    && exact_log2 (UINTVAL (XEXP (inner, 1)))
8433 		       >= GET_MODE_BITSIZE (mode) - 1)))
8434 	  subreg_code = SET;
8435 
8436 	tem = make_compound_operation (inner, subreg_code);
8437 
8438 	simplified
8439 	  = simplify_subreg (mode, tem, GET_MODE (inner), SUBREG_BYTE (x));
8440 	if (simplified)
8441 	  tem = simplified;
8442 
8443 	if (GET_CODE (tem) != GET_CODE (inner)
8444 	    && partial_subreg_p (x)
8445 	    && subreg_lowpart_p (x))
8446 	  {
8447 	    rtx newer
8448 	      = force_to_mode (tem, mode, HOST_WIDE_INT_M1U, 0);
8449 
8450 	    /* If we have something other than a SUBREG, we might have
8451 	       done an expansion, so rerun ourselves.  */
8452 	    if (GET_CODE (newer) != SUBREG)
8453 	      newer = make_compound_operation (newer, in_code);
8454 
8455 	    /* force_to_mode can expand compounds.  If it just re-expanded
8456 	       the compound, use gen_lowpart to convert to the desired
8457 	       mode.  */
8458 	    if (rtx_equal_p (newer, x)
8459 		/* Likewise if it re-expanded the compound only partially.
8460 		   This happens for SUBREG of ZERO_EXTRACT if they extract
8461 		   the same number of bits.  */
8462 		|| (GET_CODE (newer) == SUBREG
8463 		    && (GET_CODE (SUBREG_REG (newer)) == LSHIFTRT
8464 			|| GET_CODE (SUBREG_REG (newer)) == ASHIFTRT)
8465 		    && GET_CODE (inner) == AND
8466 		    && rtx_equal_p (SUBREG_REG (newer), XEXP (inner, 0))))
8467 	      return gen_lowpart (GET_MODE (x), tem);
8468 
8469 	    return newer;
8470 	  }
8471 
8472 	if (simplified)
8473 	  return tem;
8474       }
8475       break;
8476 
8477     default:
8478       break;
8479     }
8480 
8481   if (new_rtx)
8482     *x_ptr = gen_lowpart (mode, new_rtx);
8483   *next_code_ptr = next_code;
8484   return NULL_RTX;
8485 }
8486 
8487 /* Look at the expression rooted at X.  Look for expressions
8488    equivalent to ZERO_EXTRACT, SIGN_EXTRACT, ZERO_EXTEND, SIGN_EXTEND.
8489    Form these expressions.
8490 
8491    Return the new rtx, usually just X.
8492 
8493    Also, for machines like the VAX that don't have logical shift insns,
8494    try to convert logical to arithmetic shift operations in cases where
8495    they are equivalent.  This undoes the canonicalizations to logical
8496    shifts done elsewhere.
8497 
8498    We try, as much as possible, to re-use rtl expressions to save memory.
8499 
8500    IN_CODE says what kind of expression we are processing.  Normally, it is
8501    SET.  In a memory address it is MEM.  When processing the arguments of
8502    a comparison or a COMPARE against zero, it is COMPARE, or EQ if more
8503    precisely it is an equality comparison against zero.  */
8504 
8505 rtx
make_compound_operation(rtx x,enum rtx_code in_code)8506 make_compound_operation (rtx x, enum rtx_code in_code)
8507 {
8508   enum rtx_code code = GET_CODE (x);
8509   const char *fmt;
8510   int i, j;
8511   enum rtx_code next_code;
8512   rtx new_rtx, tem;
8513 
8514   /* Select the code to be used in recursive calls.  Once we are inside an
8515      address, we stay there.  If we have a comparison, set to COMPARE,
8516      but once inside, go back to our default of SET.  */
8517 
8518   next_code = (code == MEM ? MEM
8519 	       : ((code == COMPARE || COMPARISON_P (x))
8520 		  && XEXP (x, 1) == const0_rtx) ? COMPARE
8521 	       : in_code == COMPARE || in_code == EQ ? SET : in_code);
8522 
8523   scalar_int_mode mode;
8524   if (is_a <scalar_int_mode> (GET_MODE (x), &mode))
8525     {
8526       rtx new_rtx = make_compound_operation_int (mode, &x, in_code,
8527 						 &next_code);
8528       if (new_rtx)
8529 	return new_rtx;
8530       code = GET_CODE (x);
8531     }
8532 
8533   /* Now recursively process each operand of this operation.  We need to
8534      handle ZERO_EXTEND specially so that we don't lose track of the
8535      inner mode.  */
8536   if (code == ZERO_EXTEND)
8537     {
8538       new_rtx = make_compound_operation (XEXP (x, 0), next_code);
8539       tem = simplify_const_unary_operation (ZERO_EXTEND, GET_MODE (x),
8540 					    new_rtx, GET_MODE (XEXP (x, 0)));
8541       if (tem)
8542 	return tem;
8543       SUBST (XEXP (x, 0), new_rtx);
8544       return x;
8545     }
8546 
8547   fmt = GET_RTX_FORMAT (code);
8548   for (i = 0; i < GET_RTX_LENGTH (code); i++)
8549     if (fmt[i] == 'e')
8550       {
8551 	new_rtx = make_compound_operation (XEXP (x, i), next_code);
8552 	SUBST (XEXP (x, i), new_rtx);
8553       }
8554     else if (fmt[i] == 'E')
8555       for (j = 0; j < XVECLEN (x, i); j++)
8556 	{
8557 	  new_rtx = make_compound_operation (XVECEXP (x, i, j), next_code);
8558 	  SUBST (XVECEXP (x, i, j), new_rtx);
8559 	}
8560 
8561   maybe_swap_commutative_operands (x);
8562   return x;
8563 }
8564 
8565 /* Given M see if it is a value that would select a field of bits
8566    within an item, but not the entire word.  Return -1 if not.
8567    Otherwise, return the starting position of the field, where 0 is the
8568    low-order bit.
8569 
8570    *PLEN is set to the length of the field.  */
8571 
8572 static int
get_pos_from_mask(unsigned HOST_WIDE_INT m,unsigned HOST_WIDE_INT * plen)8573 get_pos_from_mask (unsigned HOST_WIDE_INT m, unsigned HOST_WIDE_INT *plen)
8574 {
8575   /* Get the bit number of the first 1 bit from the right, -1 if none.  */
8576   int pos = m ? ctz_hwi (m) : -1;
8577   int len = 0;
8578 
8579   if (pos >= 0)
8580     /* Now shift off the low-order zero bits and see if we have a
8581        power of two minus 1.  */
8582     len = exact_log2 ((m >> pos) + 1);
8583 
8584   if (len <= 0)
8585     pos = -1;
8586 
8587   *plen = len;
8588   return pos;
8589 }
8590 
8591 /* If X refers to a register that equals REG in value, replace these
8592    references with REG.  */
8593 static rtx
canon_reg_for_combine(rtx x,rtx reg)8594 canon_reg_for_combine (rtx x, rtx reg)
8595 {
8596   rtx op0, op1, op2;
8597   const char *fmt;
8598   int i;
8599   bool copied;
8600 
8601   enum rtx_code code = GET_CODE (x);
8602   switch (GET_RTX_CLASS (code))
8603     {
8604     case RTX_UNARY:
8605       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8606       if (op0 != XEXP (x, 0))
8607 	return simplify_gen_unary (GET_CODE (x), GET_MODE (x), op0,
8608 				   GET_MODE (reg));
8609       break;
8610 
8611     case RTX_BIN_ARITH:
8612     case RTX_COMM_ARITH:
8613       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8614       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8615       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8616 	return simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
8617       break;
8618 
8619     case RTX_COMPARE:
8620     case RTX_COMM_COMPARE:
8621       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8622       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8623       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
8624 	return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
8625 					GET_MODE (op0), op0, op1);
8626       break;
8627 
8628     case RTX_TERNARY:
8629     case RTX_BITFIELD_OPS:
8630       op0 = canon_reg_for_combine (XEXP (x, 0), reg);
8631       op1 = canon_reg_for_combine (XEXP (x, 1), reg);
8632       op2 = canon_reg_for_combine (XEXP (x, 2), reg);
8633       if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1) || op2 != XEXP (x, 2))
8634 	return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
8635 				     GET_MODE (op0), op0, op1, op2);
8636       /* FALLTHRU */
8637 
8638     case RTX_OBJ:
8639       if (REG_P (x))
8640 	{
8641 	  if (rtx_equal_p (get_last_value (reg), x)
8642 	      || rtx_equal_p (reg, get_last_value (x)))
8643 	    return reg;
8644 	  else
8645 	    break;
8646 	}
8647 
8648       /* fall through */
8649 
8650     default:
8651       fmt = GET_RTX_FORMAT (code);
8652       copied = false;
8653       for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
8654 	if (fmt[i] == 'e')
8655 	  {
8656 	    rtx op = canon_reg_for_combine (XEXP (x, i), reg);
8657 	    if (op != XEXP (x, i))
8658 	      {
8659 		if (!copied)
8660 		  {
8661 		    copied = true;
8662 		    x = copy_rtx (x);
8663 		  }
8664 		XEXP (x, i) = op;
8665 	      }
8666 	  }
8667 	else if (fmt[i] == 'E')
8668 	  {
8669 	    int j;
8670 	    for (j = 0; j < XVECLEN (x, i); j++)
8671 	      {
8672 		rtx op = canon_reg_for_combine (XVECEXP (x, i, j), reg);
8673 		if (op != XVECEXP (x, i, j))
8674 		  {
8675 		    if (!copied)
8676 		      {
8677 			copied = true;
8678 			x = copy_rtx (x);
8679 		      }
8680 		    XVECEXP (x, i, j) = op;
8681 		  }
8682 	      }
8683 	  }
8684 
8685       break;
8686     }
8687 
8688   return x;
8689 }
8690 
8691 /* Return X converted to MODE.  If the value is already truncated to
8692    MODE we can just return a subreg even though in the general case we
8693    would need an explicit truncation.  */
8694 
8695 static rtx
gen_lowpart_or_truncate(machine_mode mode,rtx x)8696 gen_lowpart_or_truncate (machine_mode mode, rtx x)
8697 {
8698   if (!CONST_INT_P (x)
8699       && partial_subreg_p (mode, GET_MODE (x))
8700       && !TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (x))
8701       && !(REG_P (x) && reg_truncated_to_mode (mode, x)))
8702     {
8703       /* Bit-cast X into an integer mode.  */
8704       if (!SCALAR_INT_MODE_P (GET_MODE (x)))
8705 	x = gen_lowpart (int_mode_for_mode (GET_MODE (x)).require (), x);
8706       x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode).require (),
8707 			      x, GET_MODE (x));
8708     }
8709 
8710   return gen_lowpart (mode, x);
8711 }
8712 
8713 /* See if X can be simplified knowing that we will only refer to it in
8714    MODE and will only refer to those bits that are nonzero in MASK.
8715    If other bits are being computed or if masking operations are done
8716    that select a superset of the bits in MASK, they can sometimes be
8717    ignored.
8718 
8719    Return a possibly simplified expression, but always convert X to
8720    MODE.  If X is a CONST_INT, AND the CONST_INT with MASK.
8721 
8722    If JUST_SELECT is nonzero, don't optimize by noticing that bits in MASK
8723    are all off in X.  This is used when X will be complemented, by either
8724    NOT, NEG, or XOR.  */
8725 
8726 static rtx
force_to_mode(rtx x,machine_mode mode,unsigned HOST_WIDE_INT mask,int just_select)8727 force_to_mode (rtx x, machine_mode mode, unsigned HOST_WIDE_INT mask,
8728 	       int just_select)
8729 {
8730   enum rtx_code code = GET_CODE (x);
8731   int next_select = just_select || code == XOR || code == NOT || code == NEG;
8732   machine_mode op_mode;
8733   unsigned HOST_WIDE_INT nonzero;
8734 
8735   /* If this is a CALL or ASM_OPERANDS, don't do anything.  Some of the
8736      code below will do the wrong thing since the mode of such an
8737      expression is VOIDmode.
8738 
8739      Also do nothing if X is a CLOBBER; this can happen if X was
8740      the return value from a call to gen_lowpart.  */
8741   if (code == CALL || code == ASM_OPERANDS || code == CLOBBER)
8742     return x;
8743 
8744   /* We want to perform the operation in its present mode unless we know
8745      that the operation is valid in MODE, in which case we do the operation
8746      in MODE.  */
8747   op_mode = ((GET_MODE_CLASS (mode) == GET_MODE_CLASS (GET_MODE (x))
8748 	      && have_insn_for (code, mode))
8749 	     ? mode : GET_MODE (x));
8750 
8751   /* It is not valid to do a right-shift in a narrower mode
8752      than the one it came in with.  */
8753   if ((code == LSHIFTRT || code == ASHIFTRT)
8754       && partial_subreg_p (mode, GET_MODE (x)))
8755     op_mode = GET_MODE (x);
8756 
8757   /* Truncate MASK to fit OP_MODE.  */
8758   if (op_mode)
8759     mask &= GET_MODE_MASK (op_mode);
8760 
8761   /* Determine what bits of X are guaranteed to be (non)zero.  */
8762   nonzero = nonzero_bits (x, mode);
8763 
8764   /* If none of the bits in X are needed, return a zero.  */
8765   if (!just_select && (nonzero & mask) == 0 && !side_effects_p (x))
8766     x = const0_rtx;
8767 
8768   /* If X is a CONST_INT, return a new one.  Do this here since the
8769      test below will fail.  */
8770   if (CONST_INT_P (x))
8771     {
8772       if (SCALAR_INT_MODE_P (mode))
8773 	return gen_int_mode (INTVAL (x) & mask, mode);
8774       else
8775 	{
8776 	  x = GEN_INT (INTVAL (x) & mask);
8777 	  return gen_lowpart_common (mode, x);
8778 	}
8779     }
8780 
8781   /* If X is narrower than MODE and we want all the bits in X's mode, just
8782      get X in the proper mode.  */
8783   if (paradoxical_subreg_p (mode, GET_MODE (x))
8784       && (GET_MODE_MASK (GET_MODE (x)) & ~mask) == 0)
8785     return gen_lowpart (mode, x);
8786 
8787   /* We can ignore the effect of a SUBREG if it narrows the mode or
8788      if the constant masks to zero all the bits the mode doesn't have.  */
8789   if (GET_CODE (x) == SUBREG
8790       && subreg_lowpart_p (x)
8791       && (partial_subreg_p (x)
8792 	  || (mask
8793 	      & GET_MODE_MASK (GET_MODE (x))
8794 	      & ~GET_MODE_MASK (GET_MODE (SUBREG_REG (x)))) == 0))
8795     return force_to_mode (SUBREG_REG (x), mode, mask, next_select);
8796 
8797   scalar_int_mode int_mode, xmode;
8798   if (is_a <scalar_int_mode> (mode, &int_mode)
8799       && is_a <scalar_int_mode> (GET_MODE (x), &xmode))
8800     /* OP_MODE is either MODE or XMODE, so it must be a scalar
8801        integer too.  */
8802     return force_int_to_mode (x, int_mode, xmode,
8803 			      as_a <scalar_int_mode> (op_mode),
8804 			      mask, just_select);
8805 
8806   return gen_lowpart_or_truncate (mode, x);
8807 }
8808 
8809 /* Subroutine of force_to_mode that handles cases in which both X and
8810    the result are scalar integers.  MODE is the mode of the result,
8811    XMODE is the mode of X, and OP_MODE says which of MODE or XMODE
8812    is preferred for simplified versions of X.  The other arguments
8813    are as for force_to_mode.  */
8814 
8815 static rtx
force_int_to_mode(rtx x,scalar_int_mode mode,scalar_int_mode xmode,scalar_int_mode op_mode,unsigned HOST_WIDE_INT mask,int just_select)8816 force_int_to_mode (rtx x, scalar_int_mode mode, scalar_int_mode xmode,
8817 		   scalar_int_mode op_mode, unsigned HOST_WIDE_INT mask,
8818 		   int just_select)
8819 {
8820   enum rtx_code code = GET_CODE (x);
8821   int next_select = just_select || code == XOR || code == NOT || code == NEG;
8822   unsigned HOST_WIDE_INT fuller_mask;
8823   rtx op0, op1, temp;
8824   poly_int64 const_op0;
8825 
8826   /* When we have an arithmetic operation, or a shift whose count we
8827      do not know, we need to assume that all bits up to the highest-order
8828      bit in MASK will be needed.  This is how we form such a mask.  */
8829   if (mask & (HOST_WIDE_INT_1U << (HOST_BITS_PER_WIDE_INT - 1)))
8830     fuller_mask = HOST_WIDE_INT_M1U;
8831   else
8832     fuller_mask = ((HOST_WIDE_INT_1U << (floor_log2 (mask) + 1))
8833 		   - 1);
8834 
8835   switch (code)
8836     {
8837     case CLOBBER:
8838       /* If X is a (clobber (const_int)), return it since we know we are
8839 	 generating something that won't match.  */
8840       return x;
8841 
8842     case SIGN_EXTEND:
8843     case ZERO_EXTEND:
8844     case ZERO_EXTRACT:
8845     case SIGN_EXTRACT:
8846       x = expand_compound_operation (x);
8847       if (GET_CODE (x) != code)
8848 	return force_to_mode (x, mode, mask, next_select);
8849       break;
8850 
8851     case TRUNCATE:
8852       /* Similarly for a truncate.  */
8853       return force_to_mode (XEXP (x, 0), mode, mask, next_select);
8854 
8855     case AND:
8856       /* If this is an AND with a constant, convert it into an AND
8857 	 whose constant is the AND of that constant with MASK.  If it
8858 	 remains an AND of MASK, delete it since it is redundant.  */
8859 
8860       if (CONST_INT_P (XEXP (x, 1)))
8861 	{
8862 	  x = simplify_and_const_int (x, op_mode, XEXP (x, 0),
8863 				      mask & INTVAL (XEXP (x, 1)));
8864 	  xmode = op_mode;
8865 
8866 	  /* If X is still an AND, see if it is an AND with a mask that
8867 	     is just some low-order bits.  If so, and it is MASK, we don't
8868 	     need it.  */
8869 
8870 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8871 	      && (INTVAL (XEXP (x, 1)) & GET_MODE_MASK (xmode)) == mask)
8872 	    x = XEXP (x, 0);
8873 
8874 	  /* If it remains an AND, try making another AND with the bits
8875 	     in the mode mask that aren't in MASK turned on.  If the
8876 	     constant in the AND is wide enough, this might make a
8877 	     cheaper constant.  */
8878 
8879 	  if (GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1))
8880 	      && GET_MODE_MASK (xmode) != mask
8881 	      && HWI_COMPUTABLE_MODE_P (xmode))
8882 	    {
8883 	      unsigned HOST_WIDE_INT cval
8884 		= UINTVAL (XEXP (x, 1)) | (GET_MODE_MASK (xmode) & ~mask);
8885 	      rtx y;
8886 
8887 	      y = simplify_gen_binary (AND, xmode, XEXP (x, 0),
8888 				       gen_int_mode (cval, xmode));
8889 	      if (set_src_cost (y, xmode, optimize_this_for_speed_p)
8890 		  < set_src_cost (x, xmode, optimize_this_for_speed_p))
8891 		x = y;
8892 	    }
8893 
8894 	  break;
8895 	}
8896 
8897       goto binop;
8898 
8899     case PLUS:
8900       /* In (and (plus FOO C1) M), if M is a mask that just turns off
8901 	 low-order bits (as in an alignment operation) and FOO is already
8902 	 aligned to that boundary, mask C1 to that boundary as well.
8903 	 This may eliminate that PLUS and, later, the AND.  */
8904 
8905       {
8906 	unsigned int width = GET_MODE_PRECISION (mode);
8907 	unsigned HOST_WIDE_INT smask = mask;
8908 
8909 	/* If MODE is narrower than HOST_WIDE_INT and mask is a negative
8910 	   number, sign extend it.  */
8911 
8912 	if (width < HOST_BITS_PER_WIDE_INT
8913 	    && (smask & (HOST_WIDE_INT_1U << (width - 1))) != 0)
8914 	  smask |= HOST_WIDE_INT_M1U << width;
8915 
8916 	if (CONST_INT_P (XEXP (x, 1))
8917 	    && pow2p_hwi (- smask)
8918 	    && (nonzero_bits (XEXP (x, 0), mode) & ~smask) == 0
8919 	    && (INTVAL (XEXP (x, 1)) & ~smask) != 0)
8920 	  return force_to_mode (plus_constant (xmode, XEXP (x, 0),
8921 					       (INTVAL (XEXP (x, 1)) & smask)),
8922 				mode, smask, next_select);
8923       }
8924 
8925       /* fall through */
8926 
8927     case MULT:
8928       /* Substituting into the operands of a widening MULT is not likely to
8929 	 create RTL matching a machine insn.  */
8930       if (code == MULT
8931 	  && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
8932 	      || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
8933 	  && (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
8934 	      || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
8935 	  && REG_P (XEXP (XEXP (x, 0), 0))
8936 	  && REG_P (XEXP (XEXP (x, 1), 0)))
8937 	return gen_lowpart_or_truncate (mode, x);
8938 
8939       /* For PLUS, MINUS and MULT, we need any bits less significant than the
8940 	 most significant bit in MASK since carries from those bits will
8941 	 affect the bits we are interested in.  */
8942       mask = fuller_mask;
8943       goto binop;
8944 
8945     case MINUS:
8946       /* If X is (minus C Y) where C's least set bit is larger than any bit
8947 	 in the mask, then we may replace with (neg Y).  */
8948       if (poly_int_rtx_p (XEXP (x, 0), &const_op0)
8949 	  && known_alignment (poly_uint64 (const_op0)) > mask)
8950 	{
8951 	  x = simplify_gen_unary (NEG, xmode, XEXP (x, 1), xmode);
8952 	  return force_to_mode (x, mode, mask, next_select);
8953 	}
8954 
8955       /* Similarly, if C contains every bit in the fuller_mask, then we may
8956 	 replace with (not Y).  */
8957       if (CONST_INT_P (XEXP (x, 0))
8958 	  && ((UINTVAL (XEXP (x, 0)) | fuller_mask) == UINTVAL (XEXP (x, 0))))
8959 	{
8960 	  x = simplify_gen_unary (NOT, xmode, XEXP (x, 1), xmode);
8961 	  return force_to_mode (x, mode, mask, next_select);
8962 	}
8963 
8964       mask = fuller_mask;
8965       goto binop;
8966 
8967     case IOR:
8968     case XOR:
8969       /* If X is (ior (lshiftrt FOO C1) C2), try to commute the IOR and
8970 	 LSHIFTRT so we end up with an (and (lshiftrt (ior ...) ...) ...)
8971 	 operation which may be a bitfield extraction.  Ensure that the
8972 	 constant we form is not wider than the mode of X.  */
8973 
8974       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
8975 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8976 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
8977 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT
8978 	  && CONST_INT_P (XEXP (x, 1))
8979 	  && ((INTVAL (XEXP (XEXP (x, 0), 1))
8980 	       + floor_log2 (INTVAL (XEXP (x, 1))))
8981 	      < GET_MODE_PRECISION (xmode))
8982 	  && (UINTVAL (XEXP (x, 1))
8983 	      & ~nonzero_bits (XEXP (x, 0), xmode)) == 0)
8984 	{
8985 	  temp = gen_int_mode ((INTVAL (XEXP (x, 1)) & mask)
8986 			       << INTVAL (XEXP (XEXP (x, 0), 1)),
8987 			       xmode);
8988 	  temp = simplify_gen_binary (GET_CODE (x), xmode,
8989 				      XEXP (XEXP (x, 0), 0), temp);
8990 	  x = simplify_gen_binary (LSHIFTRT, xmode, temp,
8991 				   XEXP (XEXP (x, 0), 1));
8992 	  return force_to_mode (x, mode, mask, next_select);
8993 	}
8994 
8995     binop:
8996       /* For most binary operations, just propagate into the operation and
8997 	 change the mode if we have an operation of that mode.  */
8998 
8999       op0 = force_to_mode (XEXP (x, 0), mode, mask, next_select);
9000       op1 = force_to_mode (XEXP (x, 1), mode, mask, next_select);
9001 
9002       /* If we ended up truncating both operands, truncate the result of the
9003 	 operation instead.  */
9004       if (GET_CODE (op0) == TRUNCATE
9005 	  && GET_CODE (op1) == TRUNCATE)
9006 	{
9007 	  op0 = XEXP (op0, 0);
9008 	  op1 = XEXP (op1, 0);
9009 	}
9010 
9011       op0 = gen_lowpart_or_truncate (op_mode, op0);
9012       op1 = gen_lowpart_or_truncate (op_mode, op1);
9013 
9014       if (op_mode != xmode || op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
9015 	{
9016 	  x = simplify_gen_binary (code, op_mode, op0, op1);
9017 	  xmode = op_mode;
9018 	}
9019       break;
9020 
9021     case ASHIFT:
9022       /* For left shifts, do the same, but just for the first operand.
9023 	 However, we cannot do anything with shifts where we cannot
9024 	 guarantee that the counts are smaller than the size of the mode
9025 	 because such a count will have a different meaning in a
9026 	 wider mode.  */
9027 
9028       if (! (CONST_INT_P (XEXP (x, 1))
9029 	     && INTVAL (XEXP (x, 1)) >= 0
9030 	     && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (mode))
9031 	  && ! (GET_MODE (XEXP (x, 1)) != VOIDmode
9032 		&& (nonzero_bits (XEXP (x, 1), GET_MODE (XEXP (x, 1)))
9033 		    < (unsigned HOST_WIDE_INT) GET_MODE_PRECISION (mode))))
9034 	break;
9035 
9036       /* If the shift count is a constant and we can do arithmetic in
9037 	 the mode of the shift, refine which bits we need.  Otherwise, use the
9038 	 conservative form of the mask.  */
9039       if (CONST_INT_P (XEXP (x, 1))
9040 	  && INTVAL (XEXP (x, 1)) >= 0
9041 	  && INTVAL (XEXP (x, 1)) < GET_MODE_PRECISION (op_mode)
9042 	  && HWI_COMPUTABLE_MODE_P (op_mode))
9043 	mask >>= INTVAL (XEXP (x, 1));
9044       else
9045 	mask = fuller_mask;
9046 
9047       op0 = gen_lowpart_or_truncate (op_mode,
9048 				     force_to_mode (XEXP (x, 0), mode,
9049 						    mask, next_select));
9050 
9051       if (op_mode != xmode || op0 != XEXP (x, 0))
9052 	{
9053 	  x = simplify_gen_binary (code, op_mode, op0, XEXP (x, 1));
9054 	  xmode = op_mode;
9055 	}
9056       break;
9057 
9058     case LSHIFTRT:
9059       /* Here we can only do something if the shift count is a constant,
9060 	 this shift constant is valid for the host, and we can do arithmetic
9061 	 in OP_MODE.  */
9062 
9063       if (CONST_INT_P (XEXP (x, 1))
9064 	  && INTVAL (XEXP (x, 1)) >= 0
9065 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT
9066 	  && HWI_COMPUTABLE_MODE_P (op_mode))
9067 	{
9068 	  rtx inner = XEXP (x, 0);
9069 	  unsigned HOST_WIDE_INT inner_mask;
9070 
9071 	  /* Select the mask of the bits we need for the shift operand.  */
9072 	  inner_mask = mask << INTVAL (XEXP (x, 1));
9073 
9074 	  /* We can only change the mode of the shift if we can do arithmetic
9075 	     in the mode of the shift and INNER_MASK is no wider than the
9076 	     width of X's mode.  */
9077 	  if ((inner_mask & ~GET_MODE_MASK (xmode)) != 0)
9078 	    op_mode = xmode;
9079 
9080 	  inner = force_to_mode (inner, op_mode, inner_mask, next_select);
9081 
9082 	  if (xmode != op_mode || inner != XEXP (x, 0))
9083 	    {
9084 	      x = simplify_gen_binary (LSHIFTRT, op_mode, inner, XEXP (x, 1));
9085 	      xmode = op_mode;
9086 	    }
9087 	}
9088 
9089       /* If we have (and (lshiftrt FOO C1) C2) where the combination of the
9090 	 shift and AND produces only copies of the sign bit (C2 is one less
9091 	 than a power of two), we can do this with just a shift.  */
9092 
9093       if (GET_CODE (x) == LSHIFTRT
9094 	  && CONST_INT_P (XEXP (x, 1))
9095 	  /* The shift puts one of the sign bit copies in the least significant
9096 	     bit.  */
9097 	  && ((INTVAL (XEXP (x, 1))
9098 	       + num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0))))
9099 	      >= GET_MODE_PRECISION (xmode))
9100 	  && pow2p_hwi (mask + 1)
9101 	  /* Number of bits left after the shift must be more than the mask
9102 	     needs.  */
9103 	  && ((INTVAL (XEXP (x, 1)) + exact_log2 (mask + 1))
9104 	      <= GET_MODE_PRECISION (xmode))
9105 	  /* Must be more sign bit copies than the mask needs.  */
9106 	  && ((int) num_sign_bit_copies (XEXP (x, 0), GET_MODE (XEXP (x, 0)))
9107 	      >= exact_log2 (mask + 1)))
9108 	{
9109 	  int nbits = GET_MODE_PRECISION (xmode) - exact_log2 (mask + 1);
9110 	  x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0),
9111 				   gen_int_shift_amount (xmode, nbits));
9112 	}
9113       goto shiftrt;
9114 
9115     case ASHIFTRT:
9116       /* If we are just looking for the sign bit, we don't need this shift at
9117 	 all, even if it has a variable count.  */
9118       if (val_signbit_p (xmode, mask))
9119 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9120 
9121       /* If this is a shift by a constant, get a mask that contains those bits
9122 	 that are not copies of the sign bit.  We then have two cases:  If
9123 	 MASK only includes those bits, this can be a logical shift, which may
9124 	 allow simplifications.  If MASK is a single-bit field not within
9125 	 those bits, we are requesting a copy of the sign bit and hence can
9126 	 shift the sign bit to the appropriate location.  */
9127 
9128       if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) >= 0
9129 	  && INTVAL (XEXP (x, 1)) < HOST_BITS_PER_WIDE_INT)
9130 	{
9131 	  unsigned HOST_WIDE_INT nonzero;
9132 	  int i;
9133 
9134 	  /* If the considered data is wider than HOST_WIDE_INT, we can't
9135 	     represent a mask for all its bits in a single scalar.
9136 	     But we only care about the lower bits, so calculate these.  */
9137 
9138 	  if (GET_MODE_PRECISION (xmode) > HOST_BITS_PER_WIDE_INT)
9139 	    {
9140 	      nonzero = HOST_WIDE_INT_M1U;
9141 
9142 	      /* GET_MODE_PRECISION (GET_MODE (x)) - INTVAL (XEXP (x, 1))
9143 		 is the number of bits a full-width mask would have set.
9144 		 We need only shift if these are fewer than nonzero can
9145 		 hold.  If not, we must keep all bits set in nonzero.  */
9146 
9147 	      if (GET_MODE_PRECISION (xmode) - INTVAL (XEXP (x, 1))
9148 		  < HOST_BITS_PER_WIDE_INT)
9149 		nonzero >>= INTVAL (XEXP (x, 1))
9150 			    + HOST_BITS_PER_WIDE_INT
9151 			    - GET_MODE_PRECISION (xmode);
9152 	    }
9153 	  else
9154 	    {
9155 	      nonzero = GET_MODE_MASK (xmode);
9156 	      nonzero >>= INTVAL (XEXP (x, 1));
9157 	    }
9158 
9159 	  if ((mask & ~nonzero) == 0)
9160 	    {
9161 	      x = simplify_shift_const (NULL_RTX, LSHIFTRT, xmode,
9162 					XEXP (x, 0), INTVAL (XEXP (x, 1)));
9163 	      if (GET_CODE (x) != ASHIFTRT)
9164 		return force_to_mode (x, mode, mask, next_select);
9165 	    }
9166 
9167 	  else if ((i = exact_log2 (mask)) >= 0)
9168 	    {
9169 	      x = simplify_shift_const
9170 		  (NULL_RTX, LSHIFTRT, xmode, XEXP (x, 0),
9171 		   GET_MODE_PRECISION (xmode) - 1 - i);
9172 
9173 	      if (GET_CODE (x) != ASHIFTRT)
9174 		return force_to_mode (x, mode, mask, next_select);
9175 	    }
9176 	}
9177 
9178       /* If MASK is 1, convert this to an LSHIFTRT.  This can be done
9179 	 even if the shift count isn't a constant.  */
9180       if (mask == 1)
9181 	x = simplify_gen_binary (LSHIFTRT, xmode, XEXP (x, 0), XEXP (x, 1));
9182 
9183     shiftrt:
9184 
9185       /* If this is a zero- or sign-extension operation that just affects bits
9186 	 we don't care about, remove it.  Be sure the call above returned
9187 	 something that is still a shift.  */
9188 
9189       if ((GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ASHIFTRT)
9190 	  && CONST_INT_P (XEXP (x, 1))
9191 	  && INTVAL (XEXP (x, 1)) >= 0
9192 	  && (INTVAL (XEXP (x, 1))
9193 	      <= GET_MODE_PRECISION (xmode) - (floor_log2 (mask) + 1))
9194 	  && GET_CODE (XEXP (x, 0)) == ASHIFT
9195 	  && XEXP (XEXP (x, 0), 1) == XEXP (x, 1))
9196 	return force_to_mode (XEXP (XEXP (x, 0), 0), mode, mask,
9197 			      next_select);
9198 
9199       break;
9200 
9201     case ROTATE:
9202     case ROTATERT:
9203       /* If the shift count is constant and we can do computations
9204 	 in the mode of X, compute where the bits we care about are.
9205 	 Otherwise, we can't do anything.  Don't change the mode of
9206 	 the shift or propagate MODE into the shift, though.  */
9207       if (CONST_INT_P (XEXP (x, 1))
9208 	  && INTVAL (XEXP (x, 1)) >= 0)
9209 	{
9210 	  temp = simplify_binary_operation (code == ROTATE ? ROTATERT : ROTATE,
9211 					    xmode, gen_int_mode (mask, xmode),
9212 					    XEXP (x, 1));
9213 	  if (temp && CONST_INT_P (temp))
9214 	    x = simplify_gen_binary (code, xmode,
9215 				     force_to_mode (XEXP (x, 0), xmode,
9216 						    INTVAL (temp), next_select),
9217 				     XEXP (x, 1));
9218 	}
9219       break;
9220 
9221     case NEG:
9222       /* If we just want the low-order bit, the NEG isn't needed since it
9223 	 won't change the low-order bit.  */
9224       if (mask == 1)
9225 	return force_to_mode (XEXP (x, 0), mode, mask, just_select);
9226 
9227       /* We need any bits less significant than the most significant bit in
9228 	 MASK since carries from those bits will affect the bits we are
9229 	 interested in.  */
9230       mask = fuller_mask;
9231       goto unop;
9232 
9233     case NOT:
9234       /* (not FOO) is (xor FOO CONST), so if FOO is an LSHIFTRT, we can do the
9235 	 same as the XOR case above.  Ensure that the constant we form is not
9236 	 wider than the mode of X.  */
9237 
9238       if (GET_CODE (XEXP (x, 0)) == LSHIFTRT
9239 	  && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9240 	  && INTVAL (XEXP (XEXP (x, 0), 1)) >= 0
9241 	  && (INTVAL (XEXP (XEXP (x, 0), 1)) + floor_log2 (mask)
9242 	      < GET_MODE_PRECISION (xmode))
9243 	  && INTVAL (XEXP (XEXP (x, 0), 1)) < HOST_BITS_PER_WIDE_INT)
9244 	{
9245 	  temp = gen_int_mode (mask << INTVAL (XEXP (XEXP (x, 0), 1)), xmode);
9246 	  temp = simplify_gen_binary (XOR, xmode, XEXP (XEXP (x, 0), 0), temp);
9247 	  x = simplify_gen_binary (LSHIFTRT, xmode,
9248 				   temp, XEXP (XEXP (x, 0), 1));
9249 
9250 	  return force_to_mode (x, mode, mask, next_select);
9251 	}
9252 
9253       /* (and (not FOO) CONST) is (not (or FOO (not CONST))), so we must
9254 	 use the full mask inside the NOT.  */
9255       mask = fuller_mask;
9256 
9257     unop:
9258       op0 = gen_lowpart_or_truncate (op_mode,
9259 				     force_to_mode (XEXP (x, 0), mode, mask,
9260 						    next_select));
9261       if (op_mode != xmode || op0 != XEXP (x, 0))
9262 	{
9263 	  x = simplify_gen_unary (code, op_mode, op0, op_mode);
9264 	  xmode = op_mode;
9265 	}
9266       break;
9267 
9268     case NE:
9269       /* (and (ne FOO 0) CONST) can be (and FOO CONST) if CONST is included
9270 	 in STORE_FLAG_VALUE and FOO has a single bit that might be nonzero,
9271 	 which is equal to STORE_FLAG_VALUE.  */
9272       if ((mask & ~STORE_FLAG_VALUE) == 0
9273 	  && XEXP (x, 1) == const0_rtx
9274 	  && GET_MODE (XEXP (x, 0)) == mode
9275 	  && pow2p_hwi (nonzero_bits (XEXP (x, 0), mode))
9276 	  && (nonzero_bits (XEXP (x, 0), mode)
9277 	      == (unsigned HOST_WIDE_INT) STORE_FLAG_VALUE))
9278 	return force_to_mode (XEXP (x, 0), mode, mask, next_select);
9279 
9280       break;
9281 
9282     case IF_THEN_ELSE:
9283       /* We have no way of knowing if the IF_THEN_ELSE can itself be
9284 	 written in a narrower mode.  We play it safe and do not do so.  */
9285 
9286       op0 = gen_lowpart_or_truncate (xmode,
9287 				     force_to_mode (XEXP (x, 1), mode,
9288 						    mask, next_select));
9289       op1 = gen_lowpart_or_truncate (xmode,
9290 				     force_to_mode (XEXP (x, 2), mode,
9291 						    mask, next_select));
9292       if (op0 != XEXP (x, 1) || op1 != XEXP (x, 2))
9293 	x = simplify_gen_ternary (IF_THEN_ELSE, xmode,
9294 				  GET_MODE (XEXP (x, 0)), XEXP (x, 0),
9295 				  op0, op1);
9296       break;
9297 
9298     default:
9299       break;
9300     }
9301 
9302   /* Ensure we return a value of the proper mode.  */
9303   return gen_lowpart_or_truncate (mode, x);
9304 }
9305 
9306 /* Return nonzero if X is an expression that has one of two values depending on
9307    whether some other value is zero or nonzero.  In that case, we return the
9308    value that is being tested, *PTRUE is set to the value if the rtx being
9309    returned has a nonzero value, and *PFALSE is set to the other alternative.
9310 
9311    If we return zero, we set *PTRUE and *PFALSE to X.  */
9312 
9313 static rtx
if_then_else_cond(rtx x,rtx * ptrue,rtx * pfalse)9314 if_then_else_cond (rtx x, rtx *ptrue, rtx *pfalse)
9315 {
9316   machine_mode mode = GET_MODE (x);
9317   enum rtx_code code = GET_CODE (x);
9318   rtx cond0, cond1, true0, true1, false0, false1;
9319   unsigned HOST_WIDE_INT nz;
9320   scalar_int_mode int_mode;
9321 
9322   /* If we are comparing a value against zero, we are done.  */
9323   if ((code == NE || code == EQ)
9324       && XEXP (x, 1) == const0_rtx)
9325     {
9326       *ptrue = (code == NE) ? const_true_rtx : const0_rtx;
9327       *pfalse = (code == NE) ? const0_rtx : const_true_rtx;
9328       return XEXP (x, 0);
9329     }
9330 
9331   /* If this is a unary operation whose operand has one of two values, apply
9332      our opcode to compute those values.  */
9333   else if (UNARY_P (x)
9334 	   && (cond0 = if_then_else_cond (XEXP (x, 0), &true0, &false0)) != 0)
9335     {
9336       *ptrue = simplify_gen_unary (code, mode, true0, GET_MODE (XEXP (x, 0)));
9337       *pfalse = simplify_gen_unary (code, mode, false0,
9338 				    GET_MODE (XEXP (x, 0)));
9339       return cond0;
9340     }
9341 
9342   /* If this is a COMPARE, do nothing, since the IF_THEN_ELSE we would
9343      make can't possibly match and would suppress other optimizations.  */
9344   else if (code == COMPARE)
9345     ;
9346 
9347   /* If this is a binary operation, see if either side has only one of two
9348      values.  If either one does or if both do and they are conditional on
9349      the same value, compute the new true and false values.  */
9350   else if (BINARY_P (x))
9351     {
9352       rtx op0 = XEXP (x, 0);
9353       rtx op1 = XEXP (x, 1);
9354       cond0 = if_then_else_cond (op0, &true0, &false0);
9355       cond1 = if_then_else_cond (op1, &true1, &false1);
9356 
9357       if ((cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1))
9358 	  && (REG_P (op0) || REG_P (op1)))
9359 	{
9360 	  /* Try to enable a simplification by undoing work done by
9361 	     if_then_else_cond if it converted a REG into something more
9362 	     complex.  */
9363 	  if (REG_P (op0))
9364 	    {
9365 	      cond0 = 0;
9366 	      true0 = false0 = op0;
9367 	    }
9368 	  else
9369 	    {
9370 	      cond1 = 0;
9371 	      true1 = false1 = op1;
9372 	    }
9373 	}
9374 
9375       if ((cond0 != 0 || cond1 != 0)
9376 	  && ! (cond0 != 0 && cond1 != 0 && !rtx_equal_p (cond0, cond1)))
9377 	{
9378 	  /* If if_then_else_cond returned zero, then true/false are the
9379 	     same rtl.  We must copy one of them to prevent invalid rtl
9380 	     sharing.  */
9381 	  if (cond0 == 0)
9382 	    true0 = copy_rtx (true0);
9383 	  else if (cond1 == 0)
9384 	    true1 = copy_rtx (true1);
9385 
9386 	  if (COMPARISON_P (x))
9387 	    {
9388 	      *ptrue = simplify_gen_relational (code, mode, VOIDmode,
9389 						true0, true1);
9390 	      *pfalse = simplify_gen_relational (code, mode, VOIDmode,
9391 						 false0, false1);
9392 	     }
9393 	  else
9394 	    {
9395 	      *ptrue = simplify_gen_binary (code, mode, true0, true1);
9396 	      *pfalse = simplify_gen_binary (code, mode, false0, false1);
9397 	    }
9398 
9399 	  return cond0 ? cond0 : cond1;
9400 	}
9401 
9402       /* See if we have PLUS, IOR, XOR, MINUS or UMAX, where one of the
9403 	 operands is zero when the other is nonzero, and vice-versa,
9404 	 and STORE_FLAG_VALUE is 1 or -1.  */
9405 
9406       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9407 	  && (code == PLUS || code == IOR || code == XOR || code == MINUS
9408 	      || code == UMAX)
9409 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9410 	{
9411 	  rtx op0 = XEXP (XEXP (x, 0), 1);
9412 	  rtx op1 = XEXP (XEXP (x, 1), 1);
9413 
9414 	  cond0 = XEXP (XEXP (x, 0), 0);
9415 	  cond1 = XEXP (XEXP (x, 1), 0);
9416 
9417 	  if (COMPARISON_P (cond0)
9418 	      && COMPARISON_P (cond1)
9419 	      && SCALAR_INT_MODE_P (mode)
9420 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9421 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9422 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9423 		  || ((swap_condition (GET_CODE (cond0))
9424 		       == reversed_comparison_code (cond1, NULL))
9425 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9426 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9427 	      && ! side_effects_p (x))
9428 	    {
9429 	      *ptrue = simplify_gen_binary (MULT, mode, op0, const_true_rtx);
9430 	      *pfalse = simplify_gen_binary (MULT, mode,
9431 					     (code == MINUS
9432 					      ? simplify_gen_unary (NEG, mode,
9433 								    op1, mode)
9434 					      : op1),
9435 					      const_true_rtx);
9436 	      return cond0;
9437 	    }
9438 	}
9439 
9440       /* Similarly for MULT, AND and UMIN, except that for these the result
9441 	 is always zero.  */
9442       if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
9443 	  && (code == MULT || code == AND || code == UMIN)
9444 	  && GET_CODE (XEXP (x, 0)) == MULT && GET_CODE (XEXP (x, 1)) == MULT)
9445 	{
9446 	  cond0 = XEXP (XEXP (x, 0), 0);
9447 	  cond1 = XEXP (XEXP (x, 1), 0);
9448 
9449 	  if (COMPARISON_P (cond0)
9450 	      && COMPARISON_P (cond1)
9451 	      && ((GET_CODE (cond0) == reversed_comparison_code (cond1, NULL)
9452 		   && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 0))
9453 		   && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 1)))
9454 		  || ((swap_condition (GET_CODE (cond0))
9455 		       == reversed_comparison_code (cond1, NULL))
9456 		      && rtx_equal_p (XEXP (cond0, 0), XEXP (cond1, 1))
9457 		      && rtx_equal_p (XEXP (cond0, 1), XEXP (cond1, 0))))
9458 	      && ! side_effects_p (x))
9459 	    {
9460 	      *ptrue = *pfalse = const0_rtx;
9461 	      return cond0;
9462 	    }
9463 	}
9464     }
9465 
9466   else if (code == IF_THEN_ELSE)
9467     {
9468       /* If we have IF_THEN_ELSE already, extract the condition and
9469 	 canonicalize it if it is NE or EQ.  */
9470       cond0 = XEXP (x, 0);
9471       *ptrue = XEXP (x, 1), *pfalse = XEXP (x, 2);
9472       if (GET_CODE (cond0) == NE && XEXP (cond0, 1) == const0_rtx)
9473 	return XEXP (cond0, 0);
9474       else if (GET_CODE (cond0) == EQ && XEXP (cond0, 1) == const0_rtx)
9475 	{
9476 	  *ptrue = XEXP (x, 2), *pfalse = XEXP (x, 1);
9477 	  return XEXP (cond0, 0);
9478 	}
9479       else
9480 	return cond0;
9481     }
9482 
9483   /* If X is a SUBREG, we can narrow both the true and false values
9484      if the inner expression, if there is a condition.  */
9485   else if (code == SUBREG
9486 	   && (cond0 = if_then_else_cond (SUBREG_REG (x), &true0,
9487 					  &false0)) != 0)
9488     {
9489       true0 = simplify_gen_subreg (mode, true0,
9490 				   GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9491       false0 = simplify_gen_subreg (mode, false0,
9492 				    GET_MODE (SUBREG_REG (x)), SUBREG_BYTE (x));
9493       if (true0 && false0)
9494 	{
9495 	  *ptrue = true0;
9496 	  *pfalse = false0;
9497 	  return cond0;
9498 	}
9499     }
9500 
9501   /* If X is a constant, this isn't special and will cause confusions
9502      if we treat it as such.  Likewise if it is equivalent to a constant.  */
9503   else if (CONSTANT_P (x)
9504 	   || ((cond0 = get_last_value (x)) != 0 && CONSTANT_P (cond0)))
9505     ;
9506 
9507   /* If we're in BImode, canonicalize on 0 and STORE_FLAG_VALUE, as that
9508      will be least confusing to the rest of the compiler.  */
9509   else if (mode == BImode)
9510     {
9511       *ptrue = GEN_INT (STORE_FLAG_VALUE), *pfalse = const0_rtx;
9512       return x;
9513     }
9514 
9515   /* If X is known to be either 0 or -1, those are the true and
9516      false values when testing X.  */
9517   else if (x == constm1_rtx || x == const0_rtx
9518 	   || (is_a <scalar_int_mode> (mode, &int_mode)
9519 	       && (num_sign_bit_copies (x, int_mode)
9520 		   == GET_MODE_PRECISION (int_mode))))
9521     {
9522       *ptrue = constm1_rtx, *pfalse = const0_rtx;
9523       return x;
9524     }
9525 
9526   /* Likewise for 0 or a single bit.  */
9527   else if (HWI_COMPUTABLE_MODE_P (mode)
9528 	   && pow2p_hwi (nz = nonzero_bits (x, mode)))
9529     {
9530       *ptrue = gen_int_mode (nz, mode), *pfalse = const0_rtx;
9531       return x;
9532     }
9533 
9534   /* Otherwise fail; show no condition with true and false values the same.  */
9535   *ptrue = *pfalse = x;
9536   return 0;
9537 }
9538 
9539 /* Return the value of expression X given the fact that condition COND
9540    is known to be true when applied to REG as its first operand and VAL
9541    as its second.  X is known to not be shared and so can be modified in
9542    place.
9543 
9544    We only handle the simplest cases, and specifically those cases that
9545    arise with IF_THEN_ELSE expressions.  */
9546 
9547 static rtx
known_cond(rtx x,enum rtx_code cond,rtx reg,rtx val)9548 known_cond (rtx x, enum rtx_code cond, rtx reg, rtx val)
9549 {
9550   enum rtx_code code = GET_CODE (x);
9551   const char *fmt;
9552   int i, j;
9553 
9554   if (side_effects_p (x))
9555     return x;
9556 
9557   /* If either operand of the condition is a floating point value,
9558      then we have to avoid collapsing an EQ comparison.  */
9559   if (cond == EQ
9560       && rtx_equal_p (x, reg)
9561       && ! FLOAT_MODE_P (GET_MODE (x))
9562       && ! FLOAT_MODE_P (GET_MODE (val)))
9563     return val;
9564 
9565   if (cond == UNEQ && rtx_equal_p (x, reg))
9566     return val;
9567 
9568   /* If X is (abs REG) and we know something about REG's relationship
9569      with zero, we may be able to simplify this.  */
9570 
9571   if (code == ABS && rtx_equal_p (XEXP (x, 0), reg) && val == const0_rtx)
9572     switch (cond)
9573       {
9574       case GE:  case GT:  case EQ:
9575 	return XEXP (x, 0);
9576       case LT:  case LE:
9577 	return simplify_gen_unary (NEG, GET_MODE (XEXP (x, 0)),
9578 				   XEXP (x, 0),
9579 				   GET_MODE (XEXP (x, 0)));
9580       default:
9581 	break;
9582       }
9583 
9584   /* The only other cases we handle are MIN, MAX, and comparisons if the
9585      operands are the same as REG and VAL.  */
9586 
9587   else if (COMPARISON_P (x) || COMMUTATIVE_ARITH_P (x))
9588     {
9589       if (rtx_equal_p (XEXP (x, 0), val))
9590         {
9591 	  std::swap (val, reg);
9592 	  cond = swap_condition (cond);
9593         }
9594 
9595       if (rtx_equal_p (XEXP (x, 0), reg) && rtx_equal_p (XEXP (x, 1), val))
9596 	{
9597 	  if (COMPARISON_P (x))
9598 	    {
9599 	      if (comparison_dominates_p (cond, code))
9600 		return VECTOR_MODE_P (GET_MODE (x)) ? x : const_true_rtx;
9601 
9602 	      code = reversed_comparison_code (x, NULL);
9603 	      if (code != UNKNOWN
9604 		  && comparison_dominates_p (cond, code))
9605 		return CONST0_RTX (GET_MODE (x));
9606 	      else
9607 		return x;
9608 	    }
9609 	  else if (code == SMAX || code == SMIN
9610 		   || code == UMIN || code == UMAX)
9611 	    {
9612 	      int unsignedp = (code == UMIN || code == UMAX);
9613 
9614 	      /* Do not reverse the condition when it is NE or EQ.
9615 		 This is because we cannot conclude anything about
9616 		 the value of 'SMAX (x, y)' when x is not equal to y,
9617 		 but we can when x equals y.  */
9618 	      if ((code == SMAX || code == UMAX)
9619 		  && ! (cond == EQ || cond == NE))
9620 		cond = reverse_condition (cond);
9621 
9622 	      switch (cond)
9623 		{
9624 		case GE:   case GT:
9625 		  return unsignedp ? x : XEXP (x, 1);
9626 		case LE:   case LT:
9627 		  return unsignedp ? x : XEXP (x, 0);
9628 		case GEU:  case GTU:
9629 		  return unsignedp ? XEXP (x, 1) : x;
9630 		case LEU:  case LTU:
9631 		  return unsignedp ? XEXP (x, 0) : x;
9632 		default:
9633 		  break;
9634 		}
9635 	    }
9636 	}
9637     }
9638   else if (code == SUBREG)
9639     {
9640       machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
9641       rtx new_rtx, r = known_cond (SUBREG_REG (x), cond, reg, val);
9642 
9643       if (SUBREG_REG (x) != r)
9644 	{
9645 	  /* We must simplify subreg here, before we lose track of the
9646 	     original inner_mode.  */
9647 	  new_rtx = simplify_subreg (GET_MODE (x), r,
9648 				     inner_mode, SUBREG_BYTE (x));
9649 	  if (new_rtx)
9650 	    return new_rtx;
9651 	  else
9652 	    SUBST (SUBREG_REG (x), r);
9653 	}
9654 
9655       return x;
9656     }
9657   /* We don't have to handle SIGN_EXTEND here, because even in the
9658      case of replacing something with a modeless CONST_INT, a
9659      CONST_INT is already (supposed to be) a valid sign extension for
9660      its narrower mode, which implies it's already properly
9661      sign-extended for the wider mode.  Now, for ZERO_EXTEND, the
9662      story is different.  */
9663   else if (code == ZERO_EXTEND)
9664     {
9665       machine_mode inner_mode = GET_MODE (XEXP (x, 0));
9666       rtx new_rtx, r = known_cond (XEXP (x, 0), cond, reg, val);
9667 
9668       if (XEXP (x, 0) != r)
9669 	{
9670 	  /* We must simplify the zero_extend here, before we lose
9671 	     track of the original inner_mode.  */
9672 	  new_rtx = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x),
9673 					      r, inner_mode);
9674 	  if (new_rtx)
9675 	    return new_rtx;
9676 	  else
9677 	    SUBST (XEXP (x, 0), r);
9678 	}
9679 
9680       return x;
9681     }
9682 
9683   fmt = GET_RTX_FORMAT (code);
9684   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
9685     {
9686       if (fmt[i] == 'e')
9687 	SUBST (XEXP (x, i), known_cond (XEXP (x, i), cond, reg, val));
9688       else if (fmt[i] == 'E')
9689 	for (j = XVECLEN (x, i) - 1; j >= 0; j--)
9690 	  SUBST (XVECEXP (x, i, j), known_cond (XVECEXP (x, i, j),
9691 						cond, reg, val));
9692     }
9693 
9694   return x;
9695 }
9696 
9697 /* See if X and Y are equal for the purposes of seeing if we can rewrite an
9698    assignment as a field assignment.  */
9699 
9700 static int
rtx_equal_for_field_assignment_p(rtx x,rtx y,bool widen_x)9701 rtx_equal_for_field_assignment_p (rtx x, rtx y, bool widen_x)
9702 {
9703   if (widen_x && GET_MODE (x) != GET_MODE (y))
9704     {
9705       if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (y)))
9706 	return 0;
9707       if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
9708 	return 0;
9709       x = adjust_address_nv (x, GET_MODE (y),
9710 			     byte_lowpart_offset (GET_MODE (y),
9711 						  GET_MODE (x)));
9712     }
9713 
9714   if (x == y || rtx_equal_p (x, y))
9715     return 1;
9716 
9717   if (x == 0 || y == 0 || GET_MODE (x) != GET_MODE (y))
9718     return 0;
9719 
9720   /* Check for a paradoxical SUBREG of a MEM compared with the MEM.
9721      Note that all SUBREGs of MEM are paradoxical; otherwise they
9722      would have been rewritten.  */
9723   if (MEM_P (x) && GET_CODE (y) == SUBREG
9724       && MEM_P (SUBREG_REG (y))
9725       && rtx_equal_p (SUBREG_REG (y),
9726 		      gen_lowpart (GET_MODE (SUBREG_REG (y)), x)))
9727     return 1;
9728 
9729   if (MEM_P (y) && GET_CODE (x) == SUBREG
9730       && MEM_P (SUBREG_REG (x))
9731       && rtx_equal_p (SUBREG_REG (x),
9732 		      gen_lowpart (GET_MODE (SUBREG_REG (x)), y)))
9733     return 1;
9734 
9735   /* We used to see if get_last_value of X and Y were the same but that's
9736      not correct.  In one direction, we'll cause the assignment to have
9737      the wrong destination and in the case, we'll import a register into this
9738      insn that might have already have been dead.   So fail if none of the
9739      above cases are true.  */
9740   return 0;
9741 }
9742 
9743 /* See if X, a SET operation, can be rewritten as a bit-field assignment.
9744    Return that assignment if so.
9745 
9746    We only handle the most common cases.  */
9747 
9748 static rtx
make_field_assignment(rtx x)9749 make_field_assignment (rtx x)
9750 {
9751   rtx dest = SET_DEST (x);
9752   rtx src = SET_SRC (x);
9753   rtx assign;
9754   rtx rhs, lhs;
9755   HOST_WIDE_INT c1;
9756   HOST_WIDE_INT pos;
9757   unsigned HOST_WIDE_INT len;
9758   rtx other;
9759 
9760   /* All the rules in this function are specific to scalar integers.  */
9761   scalar_int_mode mode;
9762   if (!is_a <scalar_int_mode> (GET_MODE (dest), &mode))
9763     return x;
9764 
9765   /* If SRC was (and (not (ashift (const_int 1) POS)) DEST), this is
9766      a clear of a one-bit field.  We will have changed it to
9767      (and (rotate (const_int -2) POS) DEST), so check for that.  Also check
9768      for a SUBREG.  */
9769 
9770   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == ROTATE
9771       && CONST_INT_P (XEXP (XEXP (src, 0), 0))
9772       && INTVAL (XEXP (XEXP (src, 0), 0)) == -2
9773       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9774     {
9775       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9776 				1, 1, 1, 0);
9777       if (assign != 0)
9778 	return gen_rtx_SET (assign, const0_rtx);
9779       return x;
9780     }
9781 
9782   if (GET_CODE (src) == AND && GET_CODE (XEXP (src, 0)) == SUBREG
9783       && subreg_lowpart_p (XEXP (src, 0))
9784       && partial_subreg_p (XEXP (src, 0))
9785       && GET_CODE (SUBREG_REG (XEXP (src, 0))) == ROTATE
9786       && CONST_INT_P (XEXP (SUBREG_REG (XEXP (src, 0)), 0))
9787       && INTVAL (XEXP (SUBREG_REG (XEXP (src, 0)), 0)) == -2
9788       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9789     {
9790       assign = make_extraction (VOIDmode, dest, 0,
9791 				XEXP (SUBREG_REG (XEXP (src, 0)), 1),
9792 				1, 1, 1, 0);
9793       if (assign != 0)
9794 	return gen_rtx_SET (assign, const0_rtx);
9795       return x;
9796     }
9797 
9798   /* If SRC is (ior (ashift (const_int 1) POS) DEST), this is a set of a
9799      one-bit field.  */
9800   if (GET_CODE (src) == IOR && GET_CODE (XEXP (src, 0)) == ASHIFT
9801       && XEXP (XEXP (src, 0), 0) == const1_rtx
9802       && rtx_equal_for_field_assignment_p (dest, XEXP (src, 1)))
9803     {
9804       assign = make_extraction (VOIDmode, dest, 0, XEXP (XEXP (src, 0), 1),
9805 				1, 1, 1, 0);
9806       if (assign != 0)
9807 	return gen_rtx_SET (assign, const1_rtx);
9808       return x;
9809     }
9810 
9811   /* If DEST is already a field assignment, i.e. ZERO_EXTRACT, and the
9812      SRC is an AND with all bits of that field set, then we can discard
9813      the AND.  */
9814   if (GET_CODE (dest) == ZERO_EXTRACT
9815       && CONST_INT_P (XEXP (dest, 1))
9816       && GET_CODE (src) == AND
9817       && CONST_INT_P (XEXP (src, 1)))
9818     {
9819       HOST_WIDE_INT width = INTVAL (XEXP (dest, 1));
9820       unsigned HOST_WIDE_INT and_mask = INTVAL (XEXP (src, 1));
9821       unsigned HOST_WIDE_INT ze_mask;
9822 
9823       if (width >= HOST_BITS_PER_WIDE_INT)
9824 	ze_mask = -1;
9825       else
9826 	ze_mask = ((unsigned HOST_WIDE_INT)1 << width) - 1;
9827 
9828       /* Complete overlap.  We can remove the source AND.  */
9829       if ((and_mask & ze_mask) == ze_mask)
9830 	return gen_rtx_SET (dest, XEXP (src, 0));
9831 
9832       /* Partial overlap.  We can reduce the source AND.  */
9833       if ((and_mask & ze_mask) != and_mask)
9834 	{
9835 	  src = gen_rtx_AND (mode, XEXP (src, 0),
9836 			     gen_int_mode (and_mask & ze_mask, mode));
9837 	  return gen_rtx_SET (dest, src);
9838 	}
9839     }
9840 
9841   /* The other case we handle is assignments into a constant-position
9842      field.  They look like (ior/xor (and DEST C1) OTHER).  If C1 represents
9843      a mask that has all one bits except for a group of zero bits and
9844      OTHER is known to have zeros where C1 has ones, this is such an
9845      assignment.  Compute the position and length from C1.  Shift OTHER
9846      to the appropriate position, force it to the required mode, and
9847      make the extraction.  Check for the AND in both operands.  */
9848 
9849   /* One or more SUBREGs might obscure the constant-position field
9850      assignment.  The first one we are likely to encounter is an outer
9851      narrowing SUBREG, which we can just strip for the purposes of
9852      identifying the constant-field assignment.  */
9853   scalar_int_mode src_mode = mode;
9854   if (GET_CODE (src) == SUBREG
9855       && subreg_lowpart_p (src)
9856       && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (src)), &src_mode))
9857     src = SUBREG_REG (src);
9858 
9859   if (GET_CODE (src) != IOR && GET_CODE (src) != XOR)
9860     return x;
9861 
9862   rhs = expand_compound_operation (XEXP (src, 0));
9863   lhs = expand_compound_operation (XEXP (src, 1));
9864 
9865   if (GET_CODE (rhs) == AND
9866       && CONST_INT_P (XEXP (rhs, 1))
9867       && rtx_equal_for_field_assignment_p (XEXP (rhs, 0), dest))
9868     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9869   /* The second SUBREG that might get in the way is a paradoxical
9870      SUBREG around the first operand of the AND.  We want to
9871      pretend the operand is as wide as the destination here.   We
9872      do this by adjusting the MEM to wider mode for the sole
9873      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9874      note this trick only works for MEMs.  */
9875   else if (GET_CODE (rhs) == AND
9876 	   && paradoxical_subreg_p (XEXP (rhs, 0))
9877 	   && MEM_P (SUBREG_REG (XEXP (rhs, 0)))
9878 	   && CONST_INT_P (XEXP (rhs, 1))
9879 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (rhs, 0)),
9880 						dest, true))
9881     c1 = INTVAL (XEXP (rhs, 1)), other = lhs;
9882   else if (GET_CODE (lhs) == AND
9883 	   && CONST_INT_P (XEXP (lhs, 1))
9884 	   && rtx_equal_for_field_assignment_p (XEXP (lhs, 0), dest))
9885     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9886   /* The second SUBREG that might get in the way is a paradoxical
9887      SUBREG around the first operand of the AND.  We want to
9888      pretend the operand is as wide as the destination here.   We
9889      do this by adjusting the MEM to wider mode for the sole
9890      purpose of the call to rtx_equal_for_field_assignment_p.   Also
9891      note this trick only works for MEMs.  */
9892   else if (GET_CODE (lhs) == AND
9893 	   && paradoxical_subreg_p (XEXP (lhs, 0))
9894 	   && MEM_P (SUBREG_REG (XEXP (lhs, 0)))
9895 	   && CONST_INT_P (XEXP (lhs, 1))
9896 	   && rtx_equal_for_field_assignment_p (SUBREG_REG (XEXP (lhs, 0)),
9897 						dest, true))
9898     c1 = INTVAL (XEXP (lhs, 1)), other = rhs;
9899   else
9900     return x;
9901 
9902   pos = get_pos_from_mask ((~c1) & GET_MODE_MASK (mode), &len);
9903   if (pos < 0
9904       || pos + len > GET_MODE_PRECISION (mode)
9905       || GET_MODE_PRECISION (mode) > HOST_BITS_PER_WIDE_INT
9906       || (c1 & nonzero_bits (other, mode)) != 0)
9907     return x;
9908 
9909   assign = make_extraction (VOIDmode, dest, pos, NULL_RTX, len, 1, 1, 0);
9910   if (assign == 0)
9911     return x;
9912 
9913   /* The mode to use for the source is the mode of the assignment, or of
9914      what is inside a possible STRICT_LOW_PART.  */
9915   machine_mode new_mode = (GET_CODE (assign) == STRICT_LOW_PART
9916 			   ? GET_MODE (XEXP (assign, 0)) : GET_MODE (assign));
9917 
9918   /* Shift OTHER right POS places and make it the source, restricting it
9919      to the proper length and mode.  */
9920 
9921   src = canon_reg_for_combine (simplify_shift_const (NULL_RTX, LSHIFTRT,
9922 						     src_mode, other, pos),
9923 			       dest);
9924   src = force_to_mode (src, new_mode,
9925 		       len >= HOST_BITS_PER_WIDE_INT
9926 		       ? HOST_WIDE_INT_M1U
9927 		       : (HOST_WIDE_INT_1U << len) - 1,
9928 		       0);
9929 
9930   /* If SRC is masked by an AND that does not make a difference in
9931      the value being stored, strip it.  */
9932   if (GET_CODE (assign) == ZERO_EXTRACT
9933       && CONST_INT_P (XEXP (assign, 1))
9934       && INTVAL (XEXP (assign, 1)) < HOST_BITS_PER_WIDE_INT
9935       && GET_CODE (src) == AND
9936       && CONST_INT_P (XEXP (src, 1))
9937       && UINTVAL (XEXP (src, 1))
9938 	 == (HOST_WIDE_INT_1U << INTVAL (XEXP (assign, 1))) - 1)
9939     src = XEXP (src, 0);
9940 
9941   return gen_rtx_SET (assign, src);
9942 }
9943 
9944 /* See if X is of the form (+ (* a c) (* b c)) and convert to (* (+ a b) c)
9945    if so.  */
9946 
9947 static rtx
apply_distributive_law(rtx x)9948 apply_distributive_law (rtx x)
9949 {
9950   enum rtx_code code = GET_CODE (x);
9951   enum rtx_code inner_code;
9952   rtx lhs, rhs, other;
9953   rtx tem;
9954 
9955   /* Distributivity is not true for floating point as it can change the
9956      value.  So we don't do it unless -funsafe-math-optimizations.  */
9957   if (FLOAT_MODE_P (GET_MODE (x))
9958       && ! flag_unsafe_math_optimizations)
9959     return x;
9960 
9961   /* The outer operation can only be one of the following:  */
9962   if (code != IOR && code != AND && code != XOR
9963       && code != PLUS && code != MINUS)
9964     return x;
9965 
9966   lhs = XEXP (x, 0);
9967   rhs = XEXP (x, 1);
9968 
9969   /* If either operand is a primitive we can't do anything, so get out
9970      fast.  */
9971   if (OBJECT_P (lhs) || OBJECT_P (rhs))
9972     return x;
9973 
9974   lhs = expand_compound_operation (lhs);
9975   rhs = expand_compound_operation (rhs);
9976   inner_code = GET_CODE (lhs);
9977   if (inner_code != GET_CODE (rhs))
9978     return x;
9979 
9980   /* See if the inner and outer operations distribute.  */
9981   switch (inner_code)
9982     {
9983     case LSHIFTRT:
9984     case ASHIFTRT:
9985     case AND:
9986     case IOR:
9987       /* These all distribute except over PLUS.  */
9988       if (code == PLUS || code == MINUS)
9989 	return x;
9990       break;
9991 
9992     case MULT:
9993       if (code != PLUS && code != MINUS)
9994 	return x;
9995       break;
9996 
9997     case ASHIFT:
9998       /* This is also a multiply, so it distributes over everything.  */
9999       break;
10000 
10001     /* This used to handle SUBREG, but this turned out to be counter-
10002        productive, since (subreg (op ...)) usually is not handled by
10003        insn patterns, and this "optimization" therefore transformed
10004        recognizable patterns into unrecognizable ones.  Therefore the
10005        SUBREG case was removed from here.
10006 
10007        It is possible that distributing SUBREG over arithmetic operations
10008        leads to an intermediate result than can then be optimized further,
10009        e.g. by moving the outer SUBREG to the other side of a SET as done
10010        in simplify_set.  This seems to have been the original intent of
10011        handling SUBREGs here.
10012 
10013        However, with current GCC this does not appear to actually happen,
10014        at least on major platforms.  If some case is found where removing
10015        the SUBREG case here prevents follow-on optimizations, distributing
10016        SUBREGs ought to be re-added at that place, e.g. in simplify_set.  */
10017 
10018     default:
10019       return x;
10020     }
10021 
10022   /* Set LHS and RHS to the inner operands (A and B in the example
10023      above) and set OTHER to the common operand (C in the example).
10024      There is only one way to do this unless the inner operation is
10025      commutative.  */
10026   if (COMMUTATIVE_ARITH_P (lhs)
10027       && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 0)))
10028     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 1);
10029   else if (COMMUTATIVE_ARITH_P (lhs)
10030 	   && rtx_equal_p (XEXP (lhs, 0), XEXP (rhs, 1)))
10031     other = XEXP (lhs, 0), lhs = XEXP (lhs, 1), rhs = XEXP (rhs, 0);
10032   else if (COMMUTATIVE_ARITH_P (lhs)
10033 	   && rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 0)))
10034     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 1);
10035   else if (rtx_equal_p (XEXP (lhs, 1), XEXP (rhs, 1)))
10036     other = XEXP (lhs, 1), lhs = XEXP (lhs, 0), rhs = XEXP (rhs, 0);
10037   else
10038     return x;
10039 
10040   /* Form the new inner operation, seeing if it simplifies first.  */
10041   tem = simplify_gen_binary (code, GET_MODE (x), lhs, rhs);
10042 
10043   /* There is one exception to the general way of distributing:
10044      (a | c) ^ (b | c) -> (a ^ b) & ~c  */
10045   if (code == XOR && inner_code == IOR)
10046     {
10047       inner_code = AND;
10048       other = simplify_gen_unary (NOT, GET_MODE (x), other, GET_MODE (x));
10049     }
10050 
10051   /* We may be able to continuing distributing the result, so call
10052      ourselves recursively on the inner operation before forming the
10053      outer operation, which we return.  */
10054   return simplify_gen_binary (inner_code, GET_MODE (x),
10055 			      apply_distributive_law (tem), other);
10056 }
10057 
10058 /* See if X is of the form (* (+ A B) C), and if so convert to
10059    (+ (* A C) (* B C)) and try to simplify.
10060 
10061    Most of the time, this results in no change.  However, if some of
10062    the operands are the same or inverses of each other, simplifications
10063    will result.
10064 
10065    For example, (and (ior A B) (not B)) can occur as the result of
10066    expanding a bit field assignment.  When we apply the distributive
10067    law to this, we get (ior (and (A (not B))) (and (B (not B)))),
10068    which then simplifies to (and (A (not B))).
10069 
10070    Note that no checks happen on the validity of applying the inverse
10071    distributive law.  This is pointless since we can do it in the
10072    few places where this routine is called.
10073 
10074    N is the index of the term that is decomposed (the arithmetic operation,
10075    i.e. (+ A B) in the first example above).  !N is the index of the term that
10076    is distributed, i.e. of C in the first example above.  */
10077 static rtx
distribute_and_simplify_rtx(rtx x,int n)10078 distribute_and_simplify_rtx (rtx x, int n)
10079 {
10080   machine_mode mode;
10081   enum rtx_code outer_code, inner_code;
10082   rtx decomposed, distributed, inner_op0, inner_op1, new_op0, new_op1, tmp;
10083 
10084   /* Distributivity is not true for floating point as it can change the
10085      value.  So we don't do it unless -funsafe-math-optimizations.  */
10086   if (FLOAT_MODE_P (GET_MODE (x))
10087       && ! flag_unsafe_math_optimizations)
10088     return NULL_RTX;
10089 
10090   decomposed = XEXP (x, n);
10091   if (!ARITHMETIC_P (decomposed))
10092     return NULL_RTX;
10093 
10094   mode = GET_MODE (x);
10095   outer_code = GET_CODE (x);
10096   distributed = XEXP (x, !n);
10097 
10098   inner_code = GET_CODE (decomposed);
10099   inner_op0 = XEXP (decomposed, 0);
10100   inner_op1 = XEXP (decomposed, 1);
10101 
10102   /* Special case (and (xor B C) (not A)), which is equivalent to
10103      (xor (ior A B) (ior A C))  */
10104   if (outer_code == AND && inner_code == XOR && GET_CODE (distributed) == NOT)
10105     {
10106       distributed = XEXP (distributed, 0);
10107       outer_code = IOR;
10108     }
10109 
10110   if (n == 0)
10111     {
10112       /* Distribute the second term.  */
10113       new_op0 = simplify_gen_binary (outer_code, mode, inner_op0, distributed);
10114       new_op1 = simplify_gen_binary (outer_code, mode, inner_op1, distributed);
10115     }
10116   else
10117     {
10118       /* Distribute the first term.  */
10119       new_op0 = simplify_gen_binary (outer_code, mode, distributed, inner_op0);
10120       new_op1 = simplify_gen_binary (outer_code, mode, distributed, inner_op1);
10121     }
10122 
10123   tmp = apply_distributive_law (simplify_gen_binary (inner_code, mode,
10124 						     new_op0, new_op1));
10125   if (GET_CODE (tmp) != outer_code
10126       && (set_src_cost (tmp, mode, optimize_this_for_speed_p)
10127 	  < set_src_cost (x, mode, optimize_this_for_speed_p)))
10128     return tmp;
10129 
10130   return NULL_RTX;
10131 }
10132 
10133 /* Simplify a logical `and' of VAROP with the constant CONSTOP, to be done
10134    in MODE.  Return an equivalent form, if different from (and VAROP
10135    (const_int CONSTOP)).  Otherwise, return NULL_RTX.  */
10136 
10137 static rtx
simplify_and_const_int_1(scalar_int_mode mode,rtx varop,unsigned HOST_WIDE_INT constop)10138 simplify_and_const_int_1 (scalar_int_mode mode, rtx varop,
10139 			  unsigned HOST_WIDE_INT constop)
10140 {
10141   unsigned HOST_WIDE_INT nonzero;
10142   unsigned HOST_WIDE_INT orig_constop;
10143   rtx orig_varop;
10144   int i;
10145 
10146   orig_varop = varop;
10147   orig_constop = constop;
10148   if (GET_CODE (varop) == CLOBBER)
10149     return NULL_RTX;
10150 
10151   /* Simplify VAROP knowing that we will be only looking at some of the
10152      bits in it.
10153 
10154      Note by passing in CONSTOP, we guarantee that the bits not set in
10155      CONSTOP are not significant and will never be examined.  We must
10156      ensure that is the case by explicitly masking out those bits
10157      before returning.  */
10158   varop = force_to_mode (varop, mode, constop, 0);
10159 
10160   /* If VAROP is a CLOBBER, we will fail so return it.  */
10161   if (GET_CODE (varop) == CLOBBER)
10162     return varop;
10163 
10164   /* If VAROP is a CONST_INT, then we need to apply the mask in CONSTOP
10165      to VAROP and return the new constant.  */
10166   if (CONST_INT_P (varop))
10167     return gen_int_mode (INTVAL (varop) & constop, mode);
10168 
10169   /* See what bits may be nonzero in VAROP.  Unlike the general case of
10170      a call to nonzero_bits, here we don't care about bits outside
10171      MODE.  */
10172 
10173   nonzero = nonzero_bits (varop, mode) & GET_MODE_MASK (mode);
10174 
10175   /* Turn off all bits in the constant that are known to already be zero.
10176      Thus, if the AND isn't needed at all, we will have CONSTOP == NONZERO_BITS
10177      which is tested below.  */
10178 
10179   constop &= nonzero;
10180 
10181   /* If we don't have any bits left, return zero.  */
10182   if (constop == 0)
10183     return const0_rtx;
10184 
10185   /* If VAROP is a NEG of something known to be zero or 1 and CONSTOP is
10186      a power of two, we can replace this with an ASHIFT.  */
10187   if (GET_CODE (varop) == NEG && nonzero_bits (XEXP (varop, 0), mode) == 1
10188       && (i = exact_log2 (constop)) >= 0)
10189     return simplify_shift_const (NULL_RTX, ASHIFT, mode, XEXP (varop, 0), i);
10190 
10191   /* If VAROP is an IOR or XOR, apply the AND to both branches of the IOR
10192      or XOR, then try to apply the distributive law.  This may eliminate
10193      operations if either branch can be simplified because of the AND.
10194      It may also make some cases more complex, but those cases probably
10195      won't match a pattern either with or without this.  */
10196 
10197   if (GET_CODE (varop) == IOR || GET_CODE (varop) == XOR)
10198     {
10199       scalar_int_mode varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10200       return
10201 	gen_lowpart
10202 	  (mode,
10203 	   apply_distributive_law
10204 	   (simplify_gen_binary (GET_CODE (varop), varop_mode,
10205 				 simplify_and_const_int (NULL_RTX, varop_mode,
10206 							 XEXP (varop, 0),
10207 							 constop),
10208 				 simplify_and_const_int (NULL_RTX, varop_mode,
10209 							 XEXP (varop, 1),
10210 							 constop))));
10211     }
10212 
10213   /* If VAROP is PLUS, and the constant is a mask of low bits, distribute
10214      the AND and see if one of the operands simplifies to zero.  If so, we
10215      may eliminate it.  */
10216 
10217   if (GET_CODE (varop) == PLUS
10218       && pow2p_hwi (constop + 1))
10219     {
10220       rtx o0, o1;
10221 
10222       o0 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 0), constop);
10223       o1 = simplify_and_const_int (NULL_RTX, mode, XEXP (varop, 1), constop);
10224       if (o0 == const0_rtx)
10225 	return o1;
10226       if (o1 == const0_rtx)
10227 	return o0;
10228     }
10229 
10230   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
10231   varop = gen_lowpart (mode, varop);
10232   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
10233     return NULL_RTX;
10234 
10235   /* If we are only masking insignificant bits, return VAROP.  */
10236   if (constop == nonzero)
10237     return varop;
10238 
10239   if (varop == orig_varop && constop == orig_constop)
10240     return NULL_RTX;
10241 
10242   /* Otherwise, return an AND.  */
10243   return simplify_gen_binary (AND, mode, varop, gen_int_mode (constop, mode));
10244 }
10245 
10246 
10247 /* We have X, a logical `and' of VAROP with the constant CONSTOP, to be done
10248    in MODE.
10249 
10250    Return an equivalent form, if different from X.  Otherwise, return X.  If
10251    X is zero, we are to always construct the equivalent form.  */
10252 
10253 static rtx
simplify_and_const_int(rtx x,scalar_int_mode mode,rtx varop,unsigned HOST_WIDE_INT constop)10254 simplify_and_const_int (rtx x, scalar_int_mode mode, rtx varop,
10255 			unsigned HOST_WIDE_INT constop)
10256 {
10257   rtx tem = simplify_and_const_int_1 (mode, varop, constop);
10258   if (tem)
10259     return tem;
10260 
10261   if (!x)
10262     x = simplify_gen_binary (AND, GET_MODE (varop), varop,
10263 			     gen_int_mode (constop, mode));
10264   if (GET_MODE (x) != mode)
10265     x = gen_lowpart (mode, x);
10266   return x;
10267 }
10268 
10269 /* Given a REG X of mode XMODE, compute which bits in X can be nonzero.
10270    We don't care about bits outside of those defined in MODE.
10271    We DO care about all the bits in MODE, even if XMODE is smaller than MODE.
10272 
10273    For most X this is simply GET_MODE_MASK (GET_MODE (MODE)), but if X is
10274    a shift, AND, or zero_extract, we can do better.  */
10275 
10276 static rtx
reg_nonzero_bits_for_combine(const_rtx x,scalar_int_mode xmode,scalar_int_mode mode,unsigned HOST_WIDE_INT * nonzero)10277 reg_nonzero_bits_for_combine (const_rtx x, scalar_int_mode xmode,
10278 			      scalar_int_mode mode,
10279 			      unsigned HOST_WIDE_INT *nonzero)
10280 {
10281   rtx tem;
10282   reg_stat_type *rsp;
10283 
10284   /* If X is a register whose nonzero bits value is current, use it.
10285      Otherwise, if X is a register whose value we can find, use that
10286      value.  Otherwise, use the previously-computed global nonzero bits
10287      for this register.  */
10288 
10289   rsp = &reg_stat[REGNO (x)];
10290   if (rsp->last_set_value != 0
10291       && (rsp->last_set_mode == mode
10292 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10293 	      && GET_MODE_CLASS (rsp->last_set_mode) == MODE_INT
10294 	      && GET_MODE_CLASS (mode) == MODE_INT))
10295       && ((rsp->last_set_label >= label_tick_ebb_start
10296 	   && rsp->last_set_label < label_tick)
10297 	  || (rsp->last_set_label == label_tick
10298               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10299 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10300 	      && REGNO (x) < reg_n_sets_max
10301 	      && REG_N_SETS (REGNO (x)) == 1
10302 	      && !REGNO_REG_SET_P
10303 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10304 		   REGNO (x)))))
10305     {
10306       /* Note that, even if the precision of last_set_mode is lower than that
10307 	 of mode, record_value_for_reg invoked nonzero_bits on the register
10308 	 with nonzero_bits_mode (because last_set_mode is necessarily integral
10309 	 and HWI_COMPUTABLE_MODE_P in this case) so bits in nonzero_bits_mode
10310 	 are all valid, hence in mode too since nonzero_bits_mode is defined
10311 	 to the largest HWI_COMPUTABLE_MODE_P mode.  */
10312       *nonzero &= rsp->last_set_nonzero_bits;
10313       return NULL;
10314     }
10315 
10316   tem = get_last_value (x);
10317   if (tem)
10318     {
10319       if (SHORT_IMMEDIATES_SIGN_EXTEND)
10320 	tem = sign_extend_short_imm (tem, xmode, GET_MODE_PRECISION (mode));
10321 
10322       return tem;
10323     }
10324 
10325   if (nonzero_sign_valid && rsp->nonzero_bits)
10326     {
10327       unsigned HOST_WIDE_INT mask = rsp->nonzero_bits;
10328 
10329       if (GET_MODE_PRECISION (xmode) < GET_MODE_PRECISION (mode))
10330 	/* We don't know anything about the upper bits.  */
10331 	mask |= GET_MODE_MASK (mode) ^ GET_MODE_MASK (xmode);
10332 
10333       *nonzero &= mask;
10334     }
10335 
10336   return NULL;
10337 }
10338 
10339 /* Given a reg X of mode XMODE, return the number of bits at the high-order
10340    end of X that are known to be equal to the sign bit.  X will be used
10341    in mode MODE; the returned value will always be between 1 and the
10342    number of bits in MODE.  */
10343 
10344 static rtx
reg_num_sign_bit_copies_for_combine(const_rtx x,scalar_int_mode xmode,scalar_int_mode mode,unsigned int * result)10345 reg_num_sign_bit_copies_for_combine (const_rtx x, scalar_int_mode xmode,
10346 				     scalar_int_mode mode,
10347 				     unsigned int *result)
10348 {
10349   rtx tem;
10350   reg_stat_type *rsp;
10351 
10352   rsp = &reg_stat[REGNO (x)];
10353   if (rsp->last_set_value != 0
10354       && rsp->last_set_mode == mode
10355       && ((rsp->last_set_label >= label_tick_ebb_start
10356 	   && rsp->last_set_label < label_tick)
10357 	  || (rsp->last_set_label == label_tick
10358               && DF_INSN_LUID (rsp->last_set) < subst_low_luid)
10359 	  || (REGNO (x) >= FIRST_PSEUDO_REGISTER
10360 	      && REGNO (x) < reg_n_sets_max
10361 	      && REG_N_SETS (REGNO (x)) == 1
10362 	      && !REGNO_REG_SET_P
10363 		  (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
10364 		   REGNO (x)))))
10365     {
10366       *result = rsp->last_set_sign_bit_copies;
10367       return NULL;
10368     }
10369 
10370   tem = get_last_value (x);
10371   if (tem != 0)
10372     return tem;
10373 
10374   if (nonzero_sign_valid && rsp->sign_bit_copies != 0
10375       && GET_MODE_PRECISION (xmode) == GET_MODE_PRECISION (mode))
10376     *result = rsp->sign_bit_copies;
10377 
10378   return NULL;
10379 }
10380 
10381 /* Return the number of "extended" bits there are in X, when interpreted
10382    as a quantity in MODE whose signedness is indicated by UNSIGNEDP.  For
10383    unsigned quantities, this is the number of high-order zero bits.
10384    For signed quantities, this is the number of copies of the sign bit
10385    minus 1.  In both case, this function returns the number of "spare"
10386    bits.  For example, if two quantities for which this function returns
10387    at least 1 are added, the addition is known not to overflow.
10388 
10389    This function will always return 0 unless called during combine, which
10390    implies that it must be called from a define_split.  */
10391 
10392 unsigned int
extended_count(const_rtx x,machine_mode mode,int unsignedp)10393 extended_count (const_rtx x, machine_mode mode, int unsignedp)
10394 {
10395   if (nonzero_sign_valid == 0)
10396     return 0;
10397 
10398   scalar_int_mode int_mode;
10399   return (unsignedp
10400 	  ? (is_a <scalar_int_mode> (mode, &int_mode)
10401 	     && HWI_COMPUTABLE_MODE_P (int_mode)
10402 	     ? (unsigned int) (GET_MODE_PRECISION (int_mode) - 1
10403 			       - floor_log2 (nonzero_bits (x, int_mode)))
10404 	     : 0)
10405 	  : num_sign_bit_copies (x, mode) - 1);
10406 }
10407 
10408 /* This function is called from `simplify_shift_const' to merge two
10409    outer operations.  Specifically, we have already found that we need
10410    to perform operation *POP0 with constant *PCONST0 at the outermost
10411    position.  We would now like to also perform OP1 with constant CONST1
10412    (with *POP0 being done last).
10413 
10414    Return 1 if we can do the operation and update *POP0 and *PCONST0 with
10415    the resulting operation.  *PCOMP_P is set to 1 if we would need to
10416    complement the innermost operand, otherwise it is unchanged.
10417 
10418    MODE is the mode in which the operation will be done.  No bits outside
10419    the width of this mode matter.  It is assumed that the width of this mode
10420    is smaller than or equal to HOST_BITS_PER_WIDE_INT.
10421 
10422    If *POP0 or OP1 are UNKNOWN, it means no operation is required.  Only NEG, PLUS,
10423    IOR, XOR, and AND are supported.  We may set *POP0 to SET if the proper
10424    result is simply *PCONST0.
10425 
10426    If the resulting operation cannot be expressed as one operation, we
10427    return 0 and do not change *POP0, *PCONST0, and *PCOMP_P.  */
10428 
10429 static int
merge_outer_ops(enum rtx_code * pop0,HOST_WIDE_INT * pconst0,enum rtx_code op1,HOST_WIDE_INT const1,machine_mode mode,int * pcomp_p)10430 merge_outer_ops (enum rtx_code *pop0, HOST_WIDE_INT *pconst0, enum rtx_code op1, HOST_WIDE_INT const1, machine_mode mode, int *pcomp_p)
10431 {
10432   enum rtx_code op0 = *pop0;
10433   HOST_WIDE_INT const0 = *pconst0;
10434 
10435   const0 &= GET_MODE_MASK (mode);
10436   const1 &= GET_MODE_MASK (mode);
10437 
10438   /* If OP0 is an AND, clear unimportant bits in CONST1.  */
10439   if (op0 == AND)
10440     const1 &= const0;
10441 
10442   /* If OP0 or OP1 is UNKNOWN, this is easy.  Similarly if they are the same or
10443      if OP0 is SET.  */
10444 
10445   if (op1 == UNKNOWN || op0 == SET)
10446     return 1;
10447 
10448   else if (op0 == UNKNOWN)
10449     op0 = op1, const0 = const1;
10450 
10451   else if (op0 == op1)
10452     {
10453       switch (op0)
10454 	{
10455 	case AND:
10456 	  const0 &= const1;
10457 	  break;
10458 	case IOR:
10459 	  const0 |= const1;
10460 	  break;
10461 	case XOR:
10462 	  const0 ^= const1;
10463 	  break;
10464 	case PLUS:
10465 	  const0 += const1;
10466 	  break;
10467 	case NEG:
10468 	  op0 = UNKNOWN;
10469 	  break;
10470 	default:
10471 	  break;
10472 	}
10473     }
10474 
10475   /* Otherwise, if either is a PLUS or NEG, we can't do anything.  */
10476   else if (op0 == PLUS || op1 == PLUS || op0 == NEG || op1 == NEG)
10477     return 0;
10478 
10479   /* If the two constants aren't the same, we can't do anything.  The
10480      remaining six cases can all be done.  */
10481   else if (const0 != const1)
10482     return 0;
10483 
10484   else
10485     switch (op0)
10486       {
10487       case IOR:
10488 	if (op1 == AND)
10489 	  /* (a & b) | b == b */
10490 	  op0 = SET;
10491 	else /* op1 == XOR */
10492 	  /* (a ^ b) | b == a | b */
10493 	  {;}
10494 	break;
10495 
10496       case XOR:
10497 	if (op1 == AND)
10498 	  /* (a & b) ^ b == (~a) & b */
10499 	  op0 = AND, *pcomp_p = 1;
10500 	else /* op1 == IOR */
10501 	  /* (a | b) ^ b == a & ~b */
10502 	  op0 = AND, const0 = ~const0;
10503 	break;
10504 
10505       case AND:
10506 	if (op1 == IOR)
10507 	  /* (a | b) & b == b */
10508 	op0 = SET;
10509 	else /* op1 == XOR */
10510 	  /* (a ^ b) & b) == (~a) & b */
10511 	  *pcomp_p = 1;
10512 	break;
10513       default:
10514 	break;
10515       }
10516 
10517   /* Check for NO-OP cases.  */
10518   const0 &= GET_MODE_MASK (mode);
10519   if (const0 == 0
10520       && (op0 == IOR || op0 == XOR || op0 == PLUS))
10521     op0 = UNKNOWN;
10522   else if (const0 == 0 && op0 == AND)
10523     op0 = SET;
10524   else if ((unsigned HOST_WIDE_INT) const0 == GET_MODE_MASK (mode)
10525 	   && op0 == AND)
10526     op0 = UNKNOWN;
10527 
10528   *pop0 = op0;
10529 
10530   /* ??? Slightly redundant with the above mask, but not entirely.
10531      Moving this above means we'd have to sign-extend the mode mask
10532      for the final test.  */
10533   if (op0 != UNKNOWN && op0 != NEG)
10534     *pconst0 = trunc_int_for_mode (const0, mode);
10535 
10536   return 1;
10537 }
10538 
10539 /* A helper to simplify_shift_const_1 to determine the mode we can perform
10540    the shift in.  The original shift operation CODE is performed on OP in
10541    ORIG_MODE.  Return the wider mode MODE if we can perform the operation
10542    in that mode.  Return ORIG_MODE otherwise.  We can also assume that the
10543    result of the shift is subject to operation OUTER_CODE with operand
10544    OUTER_CONST.  */
10545 
10546 static scalar_int_mode
try_widen_shift_mode(enum rtx_code code,rtx op,int count,scalar_int_mode orig_mode,scalar_int_mode mode,enum rtx_code outer_code,HOST_WIDE_INT outer_const)10547 try_widen_shift_mode (enum rtx_code code, rtx op, int count,
10548 		      scalar_int_mode orig_mode, scalar_int_mode mode,
10549 		      enum rtx_code outer_code, HOST_WIDE_INT outer_const)
10550 {
10551   gcc_assert (GET_MODE_PRECISION (mode) > GET_MODE_PRECISION (orig_mode));
10552 
10553   /* In general we can't perform in wider mode for right shift and rotate.  */
10554   switch (code)
10555     {
10556     case ASHIFTRT:
10557       /* We can still widen if the bits brought in from the left are identical
10558 	 to the sign bit of ORIG_MODE.  */
10559       if (num_sign_bit_copies (op, mode)
10560 	  > (unsigned) (GET_MODE_PRECISION (mode)
10561 			- GET_MODE_PRECISION (orig_mode)))
10562 	return mode;
10563       return orig_mode;
10564 
10565     case LSHIFTRT:
10566       /* Similarly here but with zero bits.  */
10567       if (HWI_COMPUTABLE_MODE_P (mode)
10568 	  && (nonzero_bits (op, mode) & ~GET_MODE_MASK (orig_mode)) == 0)
10569 	return mode;
10570 
10571       /* We can also widen if the bits brought in will be masked off.  This
10572 	 operation is performed in ORIG_MODE.  */
10573       if (outer_code == AND)
10574 	{
10575 	  int care_bits = low_bitmask_len (orig_mode, outer_const);
10576 
10577 	  if (care_bits >= 0
10578 	      && GET_MODE_PRECISION (orig_mode) - care_bits >= count)
10579 	    return mode;
10580 	}
10581       /* fall through */
10582 
10583     case ROTATE:
10584       return orig_mode;
10585 
10586     case ROTATERT:
10587       gcc_unreachable ();
10588 
10589     default:
10590       return mode;
10591     }
10592 }
10593 
10594 /* Simplify a shift of VAROP by ORIG_COUNT bits.  CODE says what kind
10595    of shift.  The result of the shift is RESULT_MODE.  Return NULL_RTX
10596    if we cannot simplify it.  Otherwise, return a simplified value.
10597 
10598    The shift is normally computed in the widest mode we find in VAROP, as
10599    long as it isn't a different number of words than RESULT_MODE.  Exceptions
10600    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
10601 
10602 static rtx
simplify_shift_const_1(enum rtx_code code,machine_mode result_mode,rtx varop,int orig_count)10603 simplify_shift_const_1 (enum rtx_code code, machine_mode result_mode,
10604 			rtx varop, int orig_count)
10605 {
10606   enum rtx_code orig_code = code;
10607   rtx orig_varop = varop;
10608   int count, log2;
10609   machine_mode mode = result_mode;
10610   machine_mode shift_mode;
10611   scalar_int_mode tmode, inner_mode, int_mode, int_varop_mode, int_result_mode;
10612   /* We form (outer_op (code varop count) (outer_const)).  */
10613   enum rtx_code outer_op = UNKNOWN;
10614   HOST_WIDE_INT outer_const = 0;
10615   int complement_p = 0;
10616   rtx new_rtx, x;
10617 
10618   /* Make sure and truncate the "natural" shift on the way in.  We don't
10619      want to do this inside the loop as it makes it more difficult to
10620      combine shifts.  */
10621   if (SHIFT_COUNT_TRUNCATED)
10622     orig_count &= GET_MODE_UNIT_BITSIZE (mode) - 1;
10623 
10624   /* If we were given an invalid count, don't do anything except exactly
10625      what was requested.  */
10626 
10627   if (orig_count < 0 || orig_count >= (int) GET_MODE_UNIT_PRECISION (mode))
10628     return NULL_RTX;
10629 
10630   count = orig_count;
10631 
10632   /* Unless one of the branches of the `if' in this loop does a `continue',
10633      we will `break' the loop after the `if'.  */
10634 
10635   while (count != 0)
10636     {
10637       /* If we have an operand of (clobber (const_int 0)), fail.  */
10638       if (GET_CODE (varop) == CLOBBER)
10639 	return NULL_RTX;
10640 
10641       /* Convert ROTATERT to ROTATE.  */
10642       if (code == ROTATERT)
10643 	{
10644 	  unsigned int bitsize = GET_MODE_UNIT_PRECISION (result_mode);
10645 	  code = ROTATE;
10646 	  count = bitsize - count;
10647 	}
10648 
10649       shift_mode = result_mode;
10650       if (shift_mode != mode)
10651 	{
10652 	  /* We only change the modes of scalar shifts.  */
10653 	  int_mode = as_a <scalar_int_mode> (mode);
10654 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
10655 	  shift_mode = try_widen_shift_mode (code, varop, count,
10656 					     int_result_mode, int_mode,
10657 					     outer_op, outer_const);
10658 	}
10659 
10660       scalar_int_mode shift_unit_mode
10661 	= as_a <scalar_int_mode> (GET_MODE_INNER (shift_mode));
10662 
10663       /* Handle cases where the count is greater than the size of the mode
10664 	 minus 1.  For ASHIFT, use the size minus one as the count (this can
10665 	 occur when simplifying (lshiftrt (ashiftrt ..))).  For rotates,
10666 	 take the count modulo the size.  For other shifts, the result is
10667 	 zero.
10668 
10669 	 Since these shifts are being produced by the compiler by combining
10670 	 multiple operations, each of which are defined, we know what the
10671 	 result is supposed to be.  */
10672 
10673       if (count > (GET_MODE_PRECISION (shift_unit_mode) - 1))
10674 	{
10675 	  if (code == ASHIFTRT)
10676 	    count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10677 	  else if (code == ROTATE || code == ROTATERT)
10678 	    count %= GET_MODE_PRECISION (shift_unit_mode);
10679 	  else
10680 	    {
10681 	      /* We can't simply return zero because there may be an
10682 		 outer op.  */
10683 	      varop = const0_rtx;
10684 	      count = 0;
10685 	      break;
10686 	    }
10687 	}
10688 
10689       /* If we discovered we had to complement VAROP, leave.  Making a NOT
10690 	 here would cause an infinite loop.  */
10691       if (complement_p)
10692 	break;
10693 
10694       if (shift_mode == shift_unit_mode)
10695 	{
10696 	  /* An arithmetic right shift of a quantity known to be -1 or 0
10697 	     is a no-op.  */
10698 	  if (code == ASHIFTRT
10699 	      && (num_sign_bit_copies (varop, shift_unit_mode)
10700 		  == GET_MODE_PRECISION (shift_unit_mode)))
10701 	    {
10702 	      count = 0;
10703 	      break;
10704 	    }
10705 
10706 	  /* If we are doing an arithmetic right shift and discarding all but
10707 	     the sign bit copies, this is equivalent to doing a shift by the
10708 	     bitsize minus one.  Convert it into that shift because it will
10709 	     often allow other simplifications.  */
10710 
10711 	  if (code == ASHIFTRT
10712 	      && (count + num_sign_bit_copies (varop, shift_unit_mode)
10713 		  >= GET_MODE_PRECISION (shift_unit_mode)))
10714 	    count = GET_MODE_PRECISION (shift_unit_mode) - 1;
10715 
10716 	  /* We simplify the tests below and elsewhere by converting
10717 	     ASHIFTRT to LSHIFTRT if we know the sign bit is clear.
10718 	     `make_compound_operation' will convert it to an ASHIFTRT for
10719 	     those machines (such as VAX) that don't have an LSHIFTRT.  */
10720 	  if (code == ASHIFTRT
10721 	      && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10722 	      && val_signbit_known_clear_p (shift_unit_mode,
10723 					    nonzero_bits (varop,
10724 							  shift_unit_mode)))
10725 	    code = LSHIFTRT;
10726 
10727 	  if (((code == LSHIFTRT
10728 		&& HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10729 		&& !(nonzero_bits (varop, shift_unit_mode) >> count))
10730 	       || (code == ASHIFT
10731 		   && HWI_COMPUTABLE_MODE_P (shift_unit_mode)
10732 		   && !((nonzero_bits (varop, shift_unit_mode) << count)
10733 			& GET_MODE_MASK (shift_unit_mode))))
10734 	      && !side_effects_p (varop))
10735 	    varop = const0_rtx;
10736 	}
10737 
10738       switch (GET_CODE (varop))
10739 	{
10740 	case SIGN_EXTEND:
10741 	case ZERO_EXTEND:
10742 	case SIGN_EXTRACT:
10743 	case ZERO_EXTRACT:
10744 	  new_rtx = expand_compound_operation (varop);
10745 	  if (new_rtx != varop)
10746 	    {
10747 	      varop = new_rtx;
10748 	      continue;
10749 	    }
10750 	  break;
10751 
10752 	case MEM:
10753 	  /* The following rules apply only to scalars.  */
10754 	  if (shift_mode != shift_unit_mode)
10755 	    break;
10756 	  int_mode = as_a <scalar_int_mode> (mode);
10757 
10758 	  /* If we have (xshiftrt (mem ...) C) and C is MODE_WIDTH
10759 	     minus the width of a smaller mode, we can do this with a
10760 	     SIGN_EXTEND or ZERO_EXTEND from the narrower memory location.  */
10761 	  if ((code == ASHIFTRT || code == LSHIFTRT)
10762 	      && ! mode_dependent_address_p (XEXP (varop, 0),
10763 					     MEM_ADDR_SPACE (varop))
10764 	      && ! MEM_VOLATILE_P (varop)
10765 	      && (int_mode_for_size (GET_MODE_BITSIZE (int_mode) - count, 1)
10766 		  .exists (&tmode)))
10767 	    {
10768 	      new_rtx = adjust_address_nv (varop, tmode,
10769 					   BYTES_BIG_ENDIAN ? 0
10770 					   : count / BITS_PER_UNIT);
10771 
10772 	      varop = gen_rtx_fmt_e (code == ASHIFTRT ? SIGN_EXTEND
10773 				     : ZERO_EXTEND, int_mode, new_rtx);
10774 	      count = 0;
10775 	      continue;
10776 	    }
10777 	  break;
10778 
10779 	case SUBREG:
10780 	  /* The following rules apply only to scalars.  */
10781 	  if (shift_mode != shift_unit_mode)
10782 	    break;
10783 	  int_mode = as_a <scalar_int_mode> (mode);
10784 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10785 
10786 	  /* If VAROP is a SUBREG, strip it as long as the inner operand has
10787 	     the same number of words as what we've seen so far.  Then store
10788 	     the widest mode in MODE.  */
10789 	  if (subreg_lowpart_p (varop)
10790 	      && is_int_mode (GET_MODE (SUBREG_REG (varop)), &inner_mode)
10791 	      && GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_varop_mode)
10792 	      && (CEIL (GET_MODE_SIZE (inner_mode), UNITS_PER_WORD)
10793 		  == CEIL (GET_MODE_SIZE (int_mode), UNITS_PER_WORD))
10794 	      && GET_MODE_CLASS (int_varop_mode) == MODE_INT)
10795 	    {
10796 	      varop = SUBREG_REG (varop);
10797 	      if (GET_MODE_SIZE (inner_mode) > GET_MODE_SIZE (int_mode))
10798 		mode = inner_mode;
10799 	      continue;
10800 	    }
10801 	  break;
10802 
10803 	case MULT:
10804 	  /* Some machines use MULT instead of ASHIFT because MULT
10805 	     is cheaper.  But it is still better on those machines to
10806 	     merge two shifts into one.  */
10807 	  if (CONST_INT_P (XEXP (varop, 1))
10808 	      && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10809 	    {
10810 	      rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10811 	      varop = simplify_gen_binary (ASHIFT, GET_MODE (varop),
10812 					   XEXP (varop, 0), log2_rtx);
10813 	      continue;
10814 	    }
10815 	  break;
10816 
10817 	case UDIV:
10818 	  /* Similar, for when divides are cheaper.  */
10819 	  if (CONST_INT_P (XEXP (varop, 1))
10820 	      && (log2 = exact_log2 (UINTVAL (XEXP (varop, 1)))) >= 0)
10821 	    {
10822 	      rtx log2_rtx = gen_int_shift_amount (GET_MODE (varop), log2);
10823 	      varop = simplify_gen_binary (LSHIFTRT, GET_MODE (varop),
10824 					   XEXP (varop, 0), log2_rtx);
10825 	      continue;
10826 	    }
10827 	  break;
10828 
10829 	case ASHIFTRT:
10830 	  /* If we are extracting just the sign bit of an arithmetic
10831 	     right shift, that shift is not needed.  However, the sign
10832 	     bit of a wider mode may be different from what would be
10833 	     interpreted as the sign bit in a narrower mode, so, if
10834 	     the result is narrower, don't discard the shift.  */
10835 	  if (code == LSHIFTRT
10836 	      && count == (GET_MODE_UNIT_BITSIZE (result_mode) - 1)
10837 	      && (GET_MODE_UNIT_BITSIZE (result_mode)
10838 		  >= GET_MODE_UNIT_BITSIZE (GET_MODE (varop))))
10839 	    {
10840 	      varop = XEXP (varop, 0);
10841 	      continue;
10842 	    }
10843 
10844 	  /* fall through */
10845 
10846 	case LSHIFTRT:
10847 	case ASHIFT:
10848 	case ROTATE:
10849 	  /* The following rules apply only to scalars.  */
10850 	  if (shift_mode != shift_unit_mode)
10851 	    break;
10852 	  int_mode = as_a <scalar_int_mode> (mode);
10853 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
10854 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
10855 
10856 	  /* Here we have two nested shifts.  The result is usually the
10857 	     AND of a new shift with a mask.  We compute the result below.  */
10858 	  if (CONST_INT_P (XEXP (varop, 1))
10859 	      && INTVAL (XEXP (varop, 1)) >= 0
10860 	      && INTVAL (XEXP (varop, 1)) < GET_MODE_PRECISION (int_varop_mode)
10861 	      && HWI_COMPUTABLE_MODE_P (int_result_mode)
10862 	      && HWI_COMPUTABLE_MODE_P (int_mode))
10863 	    {
10864 	      enum rtx_code first_code = GET_CODE (varop);
10865 	      unsigned int first_count = INTVAL (XEXP (varop, 1));
10866 	      unsigned HOST_WIDE_INT mask;
10867 	      rtx mask_rtx;
10868 
10869 	      /* We have one common special case.  We can't do any merging if
10870 		 the inner code is an ASHIFTRT of a smaller mode.  However, if
10871 		 we have (ashift:M1 (subreg:M1 (ashiftrt:M2 FOO C1) 0) C2)
10872 		 with C2 == GET_MODE_BITSIZE (M1) - GET_MODE_BITSIZE (M2),
10873 		 we can convert it to
10874 		 (ashiftrt:M1 (ashift:M1 (and:M1 (subreg:M1 FOO 0) C3) C2) C1).
10875 		 This simplifies certain SIGN_EXTEND operations.  */
10876 	      if (code == ASHIFT && first_code == ASHIFTRT
10877 		  && count == (GET_MODE_PRECISION (int_result_mode)
10878 			       - GET_MODE_PRECISION (int_varop_mode)))
10879 		{
10880 		  /* C3 has the low-order C1 bits zero.  */
10881 
10882 		  mask = GET_MODE_MASK (int_mode)
10883 			 & ~((HOST_WIDE_INT_1U << first_count) - 1);
10884 
10885 		  varop = simplify_and_const_int (NULL_RTX, int_result_mode,
10886 						  XEXP (varop, 0), mask);
10887 		  varop = simplify_shift_const (NULL_RTX, ASHIFT,
10888 						int_result_mode, varop, count);
10889 		  count = first_count;
10890 		  code = ASHIFTRT;
10891 		  continue;
10892 		}
10893 
10894 	      /* If this was (ashiftrt (ashift foo C1) C2) and FOO has more
10895 		 than C1 high-order bits equal to the sign bit, we can convert
10896 		 this to either an ASHIFT or an ASHIFTRT depending on the
10897 		 two counts.
10898 
10899 		 We cannot do this if VAROP's mode is not SHIFT_UNIT_MODE.  */
10900 
10901 	      if (code == ASHIFTRT && first_code == ASHIFT
10902 		  && int_varop_mode == shift_unit_mode
10903 		  && (num_sign_bit_copies (XEXP (varop, 0), shift_unit_mode)
10904 		      > first_count))
10905 		{
10906 		  varop = XEXP (varop, 0);
10907 		  count -= first_count;
10908 		  if (count < 0)
10909 		    {
10910 		      count = -count;
10911 		      code = ASHIFT;
10912 		    }
10913 
10914 		  continue;
10915 		}
10916 
10917 	      /* There are some cases we can't do.  If CODE is ASHIFTRT,
10918 		 we can only do this if FIRST_CODE is also ASHIFTRT.
10919 
10920 		 We can't do the case when CODE is ROTATE and FIRST_CODE is
10921 		 ASHIFTRT.
10922 
10923 		 If the mode of this shift is not the mode of the outer shift,
10924 		 we can't do this if either shift is a right shift or ROTATE.
10925 
10926 		 Finally, we can't do any of these if the mode is too wide
10927 		 unless the codes are the same.
10928 
10929 		 Handle the case where the shift codes are the same
10930 		 first.  */
10931 
10932 	      if (code == first_code)
10933 		{
10934 		  if (int_varop_mode != int_result_mode
10935 		      && (code == ASHIFTRT || code == LSHIFTRT
10936 			  || code == ROTATE))
10937 		    break;
10938 
10939 		  count += first_count;
10940 		  varop = XEXP (varop, 0);
10941 		  continue;
10942 		}
10943 
10944 	      if (code == ASHIFTRT
10945 		  || (code == ROTATE && first_code == ASHIFTRT)
10946 		  || GET_MODE_PRECISION (int_mode) > HOST_BITS_PER_WIDE_INT
10947 		  || (int_varop_mode != int_result_mode
10948 		      && (first_code == ASHIFTRT || first_code == LSHIFTRT
10949 			  || first_code == ROTATE
10950 			  || code == ROTATE)))
10951 		break;
10952 
10953 	      /* To compute the mask to apply after the shift, shift the
10954 		 nonzero bits of the inner shift the same way the
10955 		 outer shift will.  */
10956 
10957 	      mask_rtx = gen_int_mode (nonzero_bits (varop, int_varop_mode),
10958 				       int_result_mode);
10959 	      rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
10960 	      mask_rtx
10961 		= simplify_const_binary_operation (code, int_result_mode,
10962 						   mask_rtx, count_rtx);
10963 
10964 	      /* Give up if we can't compute an outer operation to use.  */
10965 	      if (mask_rtx == 0
10966 		  || !CONST_INT_P (mask_rtx)
10967 		  || ! merge_outer_ops (&outer_op, &outer_const, AND,
10968 					INTVAL (mask_rtx),
10969 					int_result_mode, &complement_p))
10970 		break;
10971 
10972 	      /* If the shifts are in the same direction, we add the
10973 		 counts.  Otherwise, we subtract them.  */
10974 	      if ((code == ASHIFTRT || code == LSHIFTRT)
10975 		  == (first_code == ASHIFTRT || first_code == LSHIFTRT))
10976 		count += first_count;
10977 	      else
10978 		count -= first_count;
10979 
10980 	      /* If COUNT is positive, the new shift is usually CODE,
10981 		 except for the two exceptions below, in which case it is
10982 		 FIRST_CODE.  If the count is negative, FIRST_CODE should
10983 		 always be used  */
10984 	      if (count > 0
10985 		  && ((first_code == ROTATE && code == ASHIFT)
10986 		      || (first_code == ASHIFTRT && code == LSHIFTRT)))
10987 		code = first_code;
10988 	      else if (count < 0)
10989 		code = first_code, count = -count;
10990 
10991 	      varop = XEXP (varop, 0);
10992 	      continue;
10993 	    }
10994 
10995 	  /* If we have (A << B << C) for any shift, we can convert this to
10996 	     (A << C << B).  This wins if A is a constant.  Only try this if
10997 	     B is not a constant.  */
10998 
10999 	  else if (GET_CODE (varop) == code
11000 		   && CONST_INT_P (XEXP (varop, 0))
11001 		   && !CONST_INT_P (XEXP (varop, 1)))
11002 	    {
11003 	      /* For ((unsigned) (cstULL >> count)) >> cst2 we have to make
11004 		 sure the result will be masked.  See PR70222.  */
11005 	      if (code == LSHIFTRT
11006 		  && int_mode != int_result_mode
11007 		  && !merge_outer_ops (&outer_op, &outer_const, AND,
11008 				       GET_MODE_MASK (int_result_mode)
11009 				       >> orig_count, int_result_mode,
11010 				       &complement_p))
11011 		break;
11012 	      /* For ((int) (cstLL >> count)) >> cst2 just give up.  Queuing
11013 		 up outer sign extension (often left and right shift) is
11014 		 hardly more efficient than the original.  See PR70429.  */
11015 	      if (code == ASHIFTRT && int_mode != int_result_mode)
11016 		break;
11017 
11018 	      rtx count_rtx = gen_int_shift_amount (int_result_mode, count);
11019 	      rtx new_rtx = simplify_const_binary_operation (code, int_mode,
11020 							     XEXP (varop, 0),
11021 							     count_rtx);
11022 	      varop = gen_rtx_fmt_ee (code, int_mode, new_rtx, XEXP (varop, 1));
11023 	      count = 0;
11024 	      continue;
11025 	    }
11026 	  break;
11027 
11028 	case NOT:
11029 	  /* The following rules apply only to scalars.  */
11030 	  if (shift_mode != shift_unit_mode)
11031 	    break;
11032 
11033 	  /* Make this fit the case below.  */
11034 	  varop = gen_rtx_XOR (mode, XEXP (varop, 0), constm1_rtx);
11035 	  continue;
11036 
11037 	case IOR:
11038 	case AND:
11039 	case XOR:
11040 	  /* The following rules apply only to scalars.  */
11041 	  if (shift_mode != shift_unit_mode)
11042 	    break;
11043 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11044 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11045 
11046 	  /* If we have (xshiftrt (ior (plus X (const_int -1)) X) C)
11047 	     with C the size of VAROP - 1 and the shift is logical if
11048 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11049 	     we have an (le X 0) operation.   If we have an arithmetic shift
11050 	     and STORE_FLAG_VALUE is 1 or we have a logical shift with
11051 	     STORE_FLAG_VALUE of -1, we have a (neg (le X 0)) operation.  */
11052 
11053 	  if (GET_CODE (varop) == IOR && GET_CODE (XEXP (varop, 0)) == PLUS
11054 	      && XEXP (XEXP (varop, 0), 1) == constm1_rtx
11055 	      && (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11056 	      && (code == LSHIFTRT || code == ASHIFTRT)
11057 	      && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11058 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11059 	    {
11060 	      count = 0;
11061 	      varop = gen_rtx_LE (int_varop_mode, XEXP (varop, 1),
11062 				  const0_rtx);
11063 
11064 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11065 		varop = gen_rtx_NEG (int_varop_mode, varop);
11066 
11067 	      continue;
11068 	    }
11069 
11070 	  /* If we have (shift (logical)), move the logical to the outside
11071 	     to allow it to possibly combine with another logical and the
11072 	     shift to combine with another shift.  This also canonicalizes to
11073 	     what a ZERO_EXTRACT looks like.  Also, some machines have
11074 	     (and (shift)) insns.  */
11075 
11076 	  if (CONST_INT_P (XEXP (varop, 1))
11077 	      /* We can't do this if we have (ashiftrt (xor))  and the
11078 		 constant has its sign bit set in shift_unit_mode with
11079 		 shift_unit_mode wider than result_mode.  */
11080 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11081 		   && int_result_mode != shift_unit_mode
11082 		   && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11083 					  shift_unit_mode) < 0)
11084 	      && (new_rtx = simplify_const_binary_operation
11085 		  (code, int_result_mode,
11086 		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11087 		   gen_int_shift_amount (int_result_mode, count))) != 0
11088 	      && CONST_INT_P (new_rtx)
11089 	      && merge_outer_ops (&outer_op, &outer_const, GET_CODE (varop),
11090 				  INTVAL (new_rtx), int_result_mode,
11091 				  &complement_p))
11092 	    {
11093 	      varop = XEXP (varop, 0);
11094 	      continue;
11095 	    }
11096 
11097 	  /* If we can't do that, try to simplify the shift in each arm of the
11098 	     logical expression, make a new logical expression, and apply
11099 	     the inverse distributive law.  This also can't be done for
11100 	     (ashiftrt (xor)) where we've widened the shift and the constant
11101 	     changes the sign bit.  */
11102 	  if (CONST_INT_P (XEXP (varop, 1))
11103 	      && !(code == ASHIFTRT && GET_CODE (varop) == XOR
11104 		   && int_result_mode != shift_unit_mode
11105 		   && trunc_int_for_mode (INTVAL (XEXP (varop, 1)),
11106 					  shift_unit_mode) < 0))
11107 	    {
11108 	      rtx lhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11109 					      XEXP (varop, 0), count);
11110 	      rtx rhs = simplify_shift_const (NULL_RTX, code, shift_unit_mode,
11111 					      XEXP (varop, 1), count);
11112 
11113 	      varop = simplify_gen_binary (GET_CODE (varop), shift_unit_mode,
11114 					   lhs, rhs);
11115 	      varop = apply_distributive_law (varop);
11116 
11117 	      count = 0;
11118 	      continue;
11119 	    }
11120 	  break;
11121 
11122 	case EQ:
11123 	  /* The following rules apply only to scalars.  */
11124 	  if (shift_mode != shift_unit_mode)
11125 	    break;
11126 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11127 
11128 	  /* Convert (lshiftrt (eq FOO 0) C) to (xor FOO 1) if STORE_FLAG_VALUE
11129 	     says that the sign bit can be tested, FOO has mode MODE, C is
11130 	     GET_MODE_PRECISION (MODE) - 1, and FOO has only its low-order bit
11131 	     that may be nonzero.  */
11132 	  if (code == LSHIFTRT
11133 	      && XEXP (varop, 1) == const0_rtx
11134 	      && GET_MODE (XEXP (varop, 0)) == int_result_mode
11135 	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11136 	      && HWI_COMPUTABLE_MODE_P (int_result_mode)
11137 	      && STORE_FLAG_VALUE == -1
11138 	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11139 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11140 				  int_result_mode, &complement_p))
11141 	    {
11142 	      varop = XEXP (varop, 0);
11143 	      count = 0;
11144 	      continue;
11145 	    }
11146 	  break;
11147 
11148 	case NEG:
11149 	  /* The following rules apply only to scalars.  */
11150 	  if (shift_mode != shift_unit_mode)
11151 	    break;
11152 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11153 
11154 	  /* (lshiftrt (neg A) C) where A is either 0 or 1 and C is one less
11155 	     than the number of bits in the mode is equivalent to A.  */
11156 	  if (code == LSHIFTRT
11157 	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11158 	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1)
11159 	    {
11160 	      varop = XEXP (varop, 0);
11161 	      count = 0;
11162 	      continue;
11163 	    }
11164 
11165 	  /* NEG commutes with ASHIFT since it is multiplication.  Move the
11166 	     NEG outside to allow shifts to combine.  */
11167 	  if (code == ASHIFT
11168 	      && merge_outer_ops (&outer_op, &outer_const, NEG, 0,
11169 				  int_result_mode, &complement_p))
11170 	    {
11171 	      varop = XEXP (varop, 0);
11172 	      continue;
11173 	    }
11174 	  break;
11175 
11176 	case PLUS:
11177 	  /* The following rules apply only to scalars.  */
11178 	  if (shift_mode != shift_unit_mode)
11179 	    break;
11180 	  int_result_mode = as_a <scalar_int_mode> (result_mode);
11181 
11182 	  /* (lshiftrt (plus A -1) C) where A is either 0 or 1 and C
11183 	     is one less than the number of bits in the mode is
11184 	     equivalent to (xor A 1).  */
11185 	  if (code == LSHIFTRT
11186 	      && count == (GET_MODE_PRECISION (int_result_mode) - 1)
11187 	      && XEXP (varop, 1) == constm1_rtx
11188 	      && nonzero_bits (XEXP (varop, 0), int_result_mode) == 1
11189 	      && merge_outer_ops (&outer_op, &outer_const, XOR, 1,
11190 				  int_result_mode, &complement_p))
11191 	    {
11192 	      count = 0;
11193 	      varop = XEXP (varop, 0);
11194 	      continue;
11195 	    }
11196 
11197 	  /* If we have (xshiftrt (plus FOO BAR) C), and the only bits
11198 	     that might be nonzero in BAR are those being shifted out and those
11199 	     bits are known zero in FOO, we can replace the PLUS with FOO.
11200 	     Similarly in the other operand order.  This code occurs when
11201 	     we are computing the size of a variable-size array.  */
11202 
11203 	  if ((code == ASHIFTRT || code == LSHIFTRT)
11204 	      && count < HOST_BITS_PER_WIDE_INT
11205 	      && nonzero_bits (XEXP (varop, 1), int_result_mode) >> count == 0
11206 	      && (nonzero_bits (XEXP (varop, 1), int_result_mode)
11207 		  & nonzero_bits (XEXP (varop, 0), int_result_mode)) == 0)
11208 	    {
11209 	      varop = XEXP (varop, 0);
11210 	      continue;
11211 	    }
11212 	  else if ((code == ASHIFTRT || code == LSHIFTRT)
11213 		   && count < HOST_BITS_PER_WIDE_INT
11214 		   && HWI_COMPUTABLE_MODE_P (int_result_mode)
11215 		   && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11216 		       >> count) == 0
11217 		   && (nonzero_bits (XEXP (varop, 0), int_result_mode)
11218 		       & nonzero_bits (XEXP (varop, 1), int_result_mode)) == 0)
11219 	    {
11220 	      varop = XEXP (varop, 1);
11221 	      continue;
11222 	    }
11223 
11224 	  /* (ashift (plus foo C) N) is (plus (ashift foo N) C').  */
11225 	  if (code == ASHIFT
11226 	      && CONST_INT_P (XEXP (varop, 1))
11227 	      && (new_rtx = simplify_const_binary_operation
11228 		  (ASHIFT, int_result_mode,
11229 		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11230 		   gen_int_shift_amount (int_result_mode, count))) != 0
11231 	      && CONST_INT_P (new_rtx)
11232 	      && merge_outer_ops (&outer_op, &outer_const, PLUS,
11233 				  INTVAL (new_rtx), int_result_mode,
11234 				  &complement_p))
11235 	    {
11236 	      varop = XEXP (varop, 0);
11237 	      continue;
11238 	    }
11239 
11240 	  /* Check for 'PLUS signbit', which is the canonical form of 'XOR
11241 	     signbit', and attempt to change the PLUS to an XOR and move it to
11242 	     the outer operation as is done above in the AND/IOR/XOR case
11243 	     leg for shift(logical). See details in logical handling above
11244 	     for reasoning in doing so.  */
11245 	  if (code == LSHIFTRT
11246 	      && CONST_INT_P (XEXP (varop, 1))
11247 	      && mode_signbit_p (int_result_mode, XEXP (varop, 1))
11248 	      && (new_rtx = simplify_const_binary_operation
11249 		  (code, int_result_mode,
11250 		   gen_int_mode (INTVAL (XEXP (varop, 1)), int_result_mode),
11251 		   gen_int_shift_amount (int_result_mode, count))) != 0
11252 	      && CONST_INT_P (new_rtx)
11253 	      && merge_outer_ops (&outer_op, &outer_const, XOR,
11254 				  INTVAL (new_rtx), int_result_mode,
11255 				  &complement_p))
11256 	    {
11257 	      varop = XEXP (varop, 0);
11258 	      continue;
11259 	    }
11260 
11261 	  break;
11262 
11263 	case MINUS:
11264 	  /* The following rules apply only to scalars.  */
11265 	  if (shift_mode != shift_unit_mode)
11266 	    break;
11267 	  int_varop_mode = as_a <scalar_int_mode> (GET_MODE (varop));
11268 
11269 	  /* If we have (xshiftrt (minus (ashiftrt X C)) X) C)
11270 	     with C the size of VAROP - 1 and the shift is logical if
11271 	     STORE_FLAG_VALUE is 1 and arithmetic if STORE_FLAG_VALUE is -1,
11272 	     we have a (gt X 0) operation.  If the shift is arithmetic with
11273 	     STORE_FLAG_VALUE of 1 or logical with STORE_FLAG_VALUE == -1,
11274 	     we have a (neg (gt X 0)) operation.  */
11275 
11276 	  if ((STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
11277 	      && GET_CODE (XEXP (varop, 0)) == ASHIFTRT
11278 	      && count == (GET_MODE_PRECISION (int_varop_mode) - 1)
11279 	      && (code == LSHIFTRT || code == ASHIFTRT)
11280 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11281 	      && INTVAL (XEXP (XEXP (varop, 0), 1)) == count
11282 	      && rtx_equal_p (XEXP (XEXP (varop, 0), 0), XEXP (varop, 1)))
11283 	    {
11284 	      count = 0;
11285 	      varop = gen_rtx_GT (int_varop_mode, XEXP (varop, 1),
11286 				  const0_rtx);
11287 
11288 	      if (STORE_FLAG_VALUE == 1 ? code == ASHIFTRT : code == LSHIFTRT)
11289 		varop = gen_rtx_NEG (int_varop_mode, varop);
11290 
11291 	      continue;
11292 	    }
11293 	  break;
11294 
11295 	case TRUNCATE:
11296 	  /* Change (lshiftrt (truncate (lshiftrt))) to (truncate (lshiftrt))
11297 	     if the truncate does not affect the value.  */
11298 	  if (code == LSHIFTRT
11299 	      && GET_CODE (XEXP (varop, 0)) == LSHIFTRT
11300 	      && CONST_INT_P (XEXP (XEXP (varop, 0), 1))
11301 	      && (INTVAL (XEXP (XEXP (varop, 0), 1))
11302 		  >= (GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (varop, 0)))
11303 		      - GET_MODE_UNIT_PRECISION (GET_MODE (varop)))))
11304 	    {
11305 	      rtx varop_inner = XEXP (varop, 0);
11306 	      int new_count = count + INTVAL (XEXP (varop_inner, 1));
11307 	      rtx new_count_rtx = gen_int_shift_amount (GET_MODE (varop_inner),
11308 							new_count);
11309 	      varop_inner = gen_rtx_LSHIFTRT (GET_MODE (varop_inner),
11310 					      XEXP (varop_inner, 0),
11311 					      new_count_rtx);
11312 	      varop = gen_rtx_TRUNCATE (GET_MODE (varop), varop_inner);
11313 	      count = 0;
11314 	      continue;
11315 	    }
11316 	  break;
11317 
11318 	default:
11319 	  break;
11320 	}
11321 
11322       break;
11323     }
11324 
11325   shift_mode = result_mode;
11326   if (shift_mode != mode)
11327     {
11328       /* We only change the modes of scalar shifts.  */
11329       int_mode = as_a <scalar_int_mode> (mode);
11330       int_result_mode = as_a <scalar_int_mode> (result_mode);
11331       shift_mode = try_widen_shift_mode (code, varop, count, int_result_mode,
11332 					 int_mode, outer_op, outer_const);
11333     }
11334 
11335   /* We have now finished analyzing the shift.  The result should be
11336      a shift of type CODE with SHIFT_MODE shifting VAROP COUNT places.  If
11337      OUTER_OP is non-UNKNOWN, it is an operation that needs to be applied
11338      to the result of the shift.  OUTER_CONST is the relevant constant,
11339      but we must turn off all bits turned off in the shift.  */
11340 
11341   if (outer_op == UNKNOWN
11342       && orig_code == code && orig_count == count
11343       && varop == orig_varop
11344       && shift_mode == GET_MODE (varop))
11345     return NULL_RTX;
11346 
11347   /* Make a SUBREG if necessary.  If we can't make it, fail.  */
11348   varop = gen_lowpart (shift_mode, varop);
11349   if (varop == NULL_RTX || GET_CODE (varop) == CLOBBER)
11350     return NULL_RTX;
11351 
11352   /* If we have an outer operation and we just made a shift, it is
11353      possible that we could have simplified the shift were it not
11354      for the outer operation.  So try to do the simplification
11355      recursively.  */
11356 
11357   if (outer_op != UNKNOWN)
11358     x = simplify_shift_const_1 (code, shift_mode, varop, count);
11359   else
11360     x = NULL_RTX;
11361 
11362   if (x == NULL_RTX)
11363     x = simplify_gen_binary (code, shift_mode, varop,
11364 			     gen_int_shift_amount (shift_mode, count));
11365 
11366   /* If we were doing an LSHIFTRT in a wider mode than it was originally,
11367      turn off all the bits that the shift would have turned off.  */
11368   if (orig_code == LSHIFTRT && result_mode != shift_mode)
11369     /* We only change the modes of scalar shifts.  */
11370     x = simplify_and_const_int (NULL_RTX, as_a <scalar_int_mode> (shift_mode),
11371 				x, GET_MODE_MASK (result_mode) >> orig_count);
11372 
11373   /* Do the remainder of the processing in RESULT_MODE.  */
11374   x = gen_lowpart_or_truncate (result_mode, x);
11375 
11376   /* If COMPLEMENT_P is set, we have to complement X before doing the outer
11377      operation.  */
11378   if (complement_p)
11379     x = simplify_gen_unary (NOT, result_mode, x, result_mode);
11380 
11381   if (outer_op != UNKNOWN)
11382     {
11383       int_result_mode = as_a <scalar_int_mode> (result_mode);
11384 
11385       if (GET_RTX_CLASS (outer_op) != RTX_UNARY
11386 	  && GET_MODE_PRECISION (int_result_mode) < HOST_BITS_PER_WIDE_INT)
11387 	outer_const = trunc_int_for_mode (outer_const, int_result_mode);
11388 
11389       if (outer_op == AND)
11390 	x = simplify_and_const_int (NULL_RTX, int_result_mode, x, outer_const);
11391       else if (outer_op == SET)
11392 	{
11393 	  /* This means that we have determined that the result is
11394 	     equivalent to a constant.  This should be rare.  */
11395 	  if (!side_effects_p (x))
11396 	    x = GEN_INT (outer_const);
11397 	}
11398       else if (GET_RTX_CLASS (outer_op) == RTX_UNARY)
11399 	x = simplify_gen_unary (outer_op, int_result_mode, x, int_result_mode);
11400       else
11401 	x = simplify_gen_binary (outer_op, int_result_mode, x,
11402 				 GEN_INT (outer_const));
11403     }
11404 
11405   return x;
11406 }
11407 
11408 /* Simplify a shift of VAROP by COUNT bits.  CODE says what kind of shift.
11409    The result of the shift is RESULT_MODE.  If we cannot simplify it,
11410    return X or, if it is NULL, synthesize the expression with
11411    simplify_gen_binary.  Otherwise, return a simplified value.
11412 
11413    The shift is normally computed in the widest mode we find in VAROP, as
11414    long as it isn't a different number of words than RESULT_MODE.  Exceptions
11415    are ASHIFTRT and ROTATE, which are always done in their original mode.  */
11416 
11417 static rtx
simplify_shift_const(rtx x,enum rtx_code code,machine_mode result_mode,rtx varop,int count)11418 simplify_shift_const (rtx x, enum rtx_code code, machine_mode result_mode,
11419 		      rtx varop, int count)
11420 {
11421   rtx tem = simplify_shift_const_1 (code, result_mode, varop, count);
11422   if (tem)
11423     return tem;
11424 
11425   if (!x)
11426     x = simplify_gen_binary (code, GET_MODE (varop), varop,
11427 			     gen_int_shift_amount (GET_MODE (varop), count));
11428   if (GET_MODE (x) != result_mode)
11429     x = gen_lowpart (result_mode, x);
11430   return x;
11431 }
11432 
11433 
11434 /* A subroutine of recog_for_combine.  See there for arguments and
11435    return value.  */
11436 
11437 static int
recog_for_combine_1(rtx * pnewpat,rtx_insn * insn,rtx * pnotes)11438 recog_for_combine_1 (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11439 {
11440   rtx pat = *pnewpat;
11441   rtx pat_without_clobbers;
11442   int insn_code_number;
11443   int num_clobbers_to_add = 0;
11444   int i;
11445   rtx notes = NULL_RTX;
11446   rtx old_notes, old_pat;
11447   int old_icode;
11448 
11449   /* If PAT is a PARALLEL, check to see if it contains the CLOBBER
11450      we use to indicate that something didn't match.  If we find such a
11451      thing, force rejection.  */
11452   if (GET_CODE (pat) == PARALLEL)
11453     for (i = XVECLEN (pat, 0) - 1; i >= 0; i--)
11454       if (GET_CODE (XVECEXP (pat, 0, i)) == CLOBBER
11455 	  && XEXP (XVECEXP (pat, 0, i), 0) == const0_rtx)
11456 	return -1;
11457 
11458   old_pat = PATTERN (insn);
11459   old_notes = REG_NOTES (insn);
11460   PATTERN (insn) = pat;
11461   REG_NOTES (insn) = NULL_RTX;
11462 
11463   insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11464   if (dump_file && (dump_flags & TDF_DETAILS))
11465     {
11466       if (insn_code_number < 0)
11467 	fputs ("Failed to match this instruction:\n", dump_file);
11468       else
11469 	fputs ("Successfully matched this instruction:\n", dump_file);
11470       print_rtl_single (dump_file, pat);
11471     }
11472 
11473   /* If it isn't, there is the possibility that we previously had an insn
11474      that clobbered some register as a side effect, but the combined
11475      insn doesn't need to do that.  So try once more without the clobbers
11476      unless this represents an ASM insn.  */
11477 
11478   if (insn_code_number < 0 && ! check_asm_operands (pat)
11479       && GET_CODE (pat) == PARALLEL)
11480     {
11481       int pos;
11482 
11483       for (pos = 0, i = 0; i < XVECLEN (pat, 0); i++)
11484 	if (GET_CODE (XVECEXP (pat, 0, i)) != CLOBBER)
11485 	  {
11486 	    if (i != pos)
11487 	      SUBST (XVECEXP (pat, 0, pos), XVECEXP (pat, 0, i));
11488 	    pos++;
11489 	  }
11490 
11491       SUBST_INT (XVECLEN (pat, 0), pos);
11492 
11493       if (pos == 1)
11494 	pat = XVECEXP (pat, 0, 0);
11495 
11496       PATTERN (insn) = pat;
11497       insn_code_number = recog (pat, insn, &num_clobbers_to_add);
11498       if (dump_file && (dump_flags & TDF_DETAILS))
11499 	{
11500 	  if (insn_code_number < 0)
11501 	    fputs ("Failed to match this instruction:\n", dump_file);
11502 	  else
11503 	    fputs ("Successfully matched this instruction:\n", dump_file);
11504 	  print_rtl_single (dump_file, pat);
11505 	}
11506     }
11507 
11508   pat_without_clobbers = pat;
11509 
11510   PATTERN (insn) = old_pat;
11511   REG_NOTES (insn) = old_notes;
11512 
11513   /* Recognize all noop sets, these will be killed by followup pass.  */
11514   if (insn_code_number < 0 && GET_CODE (pat) == SET && set_noop_p (pat))
11515     insn_code_number = NOOP_MOVE_INSN_CODE, num_clobbers_to_add = 0;
11516 
11517   /* If we had any clobbers to add, make a new pattern than contains
11518      them.  Then check to make sure that all of them are dead.  */
11519   if (num_clobbers_to_add)
11520     {
11521       rtx newpat = gen_rtx_PARALLEL (VOIDmode,
11522 				     rtvec_alloc (GET_CODE (pat) == PARALLEL
11523 						  ? (XVECLEN (pat, 0)
11524 						     + num_clobbers_to_add)
11525 						  : num_clobbers_to_add + 1));
11526 
11527       if (GET_CODE (pat) == PARALLEL)
11528 	for (i = 0; i < XVECLEN (pat, 0); i++)
11529 	  XVECEXP (newpat, 0, i) = XVECEXP (pat, 0, i);
11530       else
11531 	XVECEXP (newpat, 0, 0) = pat;
11532 
11533       add_clobbers (newpat, insn_code_number);
11534 
11535       for (i = XVECLEN (newpat, 0) - num_clobbers_to_add;
11536 	   i < XVECLEN (newpat, 0); i++)
11537 	{
11538 	  if (REG_P (XEXP (XVECEXP (newpat, 0, i), 0))
11539 	      && ! reg_dead_at_p (XEXP (XVECEXP (newpat, 0, i), 0), insn))
11540 	    return -1;
11541 	  if (GET_CODE (XEXP (XVECEXP (newpat, 0, i), 0)) != SCRATCH)
11542 	    {
11543 	      gcc_assert (REG_P (XEXP (XVECEXP (newpat, 0, i), 0)));
11544 	      notes = alloc_reg_note (REG_UNUSED,
11545 				      XEXP (XVECEXP (newpat, 0, i), 0), notes);
11546 	    }
11547 	}
11548       pat = newpat;
11549     }
11550 
11551   if (insn_code_number >= 0
11552       && insn_code_number != NOOP_MOVE_INSN_CODE)
11553     {
11554       old_pat = PATTERN (insn);
11555       old_notes = REG_NOTES (insn);
11556       old_icode = INSN_CODE (insn);
11557       PATTERN (insn) = pat;
11558       REG_NOTES (insn) = notes;
11559       INSN_CODE (insn) = insn_code_number;
11560 
11561       /* Allow targets to reject combined insn.  */
11562       if (!targetm.legitimate_combined_insn (insn))
11563 	{
11564 	  if (dump_file && (dump_flags & TDF_DETAILS))
11565 	    fputs ("Instruction not appropriate for target.",
11566 		   dump_file);
11567 
11568 	  /* Callers expect recog_for_combine to strip
11569 	     clobbers from the pattern on failure.  */
11570 	  pat = pat_without_clobbers;
11571 	  notes = NULL_RTX;
11572 
11573 	  insn_code_number = -1;
11574 	}
11575 
11576       PATTERN (insn) = old_pat;
11577       REG_NOTES (insn) = old_notes;
11578       INSN_CODE (insn) = old_icode;
11579     }
11580 
11581   *pnewpat = pat;
11582   *pnotes = notes;
11583 
11584   return insn_code_number;
11585 }
11586 
11587 /* Change every ZERO_EXTRACT and ZERO_EXTEND of a SUBREG that can be
11588    expressed as an AND and maybe an LSHIFTRT, to that formulation.
11589    Return whether anything was so changed.  */
11590 
11591 static bool
change_zero_ext(rtx pat)11592 change_zero_ext (rtx pat)
11593 {
11594   bool changed = false;
11595   rtx *src = &SET_SRC (pat);
11596 
11597   subrtx_ptr_iterator::array_type array;
11598   FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11599     {
11600       rtx x = **iter;
11601       scalar_int_mode mode, inner_mode;
11602       if (!is_a <scalar_int_mode> (GET_MODE (x), &mode))
11603 	continue;
11604       int size;
11605 
11606       if (GET_CODE (x) == ZERO_EXTRACT
11607 	  && CONST_INT_P (XEXP (x, 1))
11608 	  && CONST_INT_P (XEXP (x, 2))
11609 	  && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), &inner_mode)
11610 	  && GET_MODE_PRECISION (inner_mode) <= GET_MODE_PRECISION (mode))
11611 	{
11612 	  size = INTVAL (XEXP (x, 1));
11613 
11614 	  int start = INTVAL (XEXP (x, 2));
11615 	  if (BITS_BIG_ENDIAN)
11616 	    start = GET_MODE_PRECISION (inner_mode) - size - start;
11617 
11618 	  if (start != 0)
11619 	    x = gen_rtx_LSHIFTRT (inner_mode, XEXP (x, 0),
11620 				  gen_int_shift_amount (inner_mode, start));
11621 	  else
11622 	    x = XEXP (x, 0);
11623 
11624 	  if (mode != inner_mode)
11625 	    {
11626 	      if (REG_P (x) && HARD_REGISTER_P (x)
11627 		  && !can_change_dest_mode (x, 0, mode))
11628 		continue;
11629 
11630 	      x = gen_lowpart_SUBREG (mode, x);
11631 	    }
11632 	}
11633       else if (GET_CODE (x) == ZERO_EXTEND
11634 	       && GET_CODE (XEXP (x, 0)) == SUBREG
11635 	       && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (XEXP (x, 0))))
11636 	       && !paradoxical_subreg_p (XEXP (x, 0))
11637 	       && subreg_lowpart_p (XEXP (x, 0)))
11638 	{
11639 	  inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11640 	  size = GET_MODE_PRECISION (inner_mode);
11641 	  x = SUBREG_REG (XEXP (x, 0));
11642 	  if (GET_MODE (x) != mode)
11643 	    {
11644 	      if (REG_P (x) && HARD_REGISTER_P (x)
11645 		  && !can_change_dest_mode (x, 0, mode))
11646 		continue;
11647 
11648 	      x = gen_lowpart_SUBREG (mode, x);
11649 	    }
11650 	}
11651       else if (GET_CODE (x) == ZERO_EXTEND
11652 	       && REG_P (XEXP (x, 0))
11653 	       && HARD_REGISTER_P (XEXP (x, 0))
11654 	       && can_change_dest_mode (XEXP (x, 0), 0, mode))
11655 	{
11656 	  inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)));
11657 	  size = GET_MODE_PRECISION (inner_mode);
11658 	  x = gen_rtx_REG (mode, REGNO (XEXP (x, 0)));
11659 	}
11660       else
11661 	continue;
11662 
11663       if (!(GET_CODE (x) == LSHIFTRT
11664 	    && CONST_INT_P (XEXP (x, 1))
11665 	    && size + INTVAL (XEXP (x, 1)) == GET_MODE_PRECISION (mode)))
11666 	{
11667 	  wide_int mask = wi::mask (size, false, GET_MODE_PRECISION (mode));
11668 	  x = gen_rtx_AND (mode, x, immed_wide_int_const (mask, mode));
11669 	}
11670 
11671       SUBST (**iter, x);
11672       changed = true;
11673     }
11674 
11675   if (changed)
11676     FOR_EACH_SUBRTX_PTR (iter, array, src, NONCONST)
11677       maybe_swap_commutative_operands (**iter);
11678 
11679   rtx *dst = &SET_DEST (pat);
11680   scalar_int_mode mode;
11681   if (GET_CODE (*dst) == ZERO_EXTRACT
11682       && REG_P (XEXP (*dst, 0))
11683       && is_a <scalar_int_mode> (GET_MODE (XEXP (*dst, 0)), &mode)
11684       && CONST_INT_P (XEXP (*dst, 1))
11685       && CONST_INT_P (XEXP (*dst, 2)))
11686     {
11687       rtx reg = XEXP (*dst, 0);
11688       int width = INTVAL (XEXP (*dst, 1));
11689       int offset = INTVAL (XEXP (*dst, 2));
11690       int reg_width = GET_MODE_PRECISION (mode);
11691       if (BITS_BIG_ENDIAN)
11692 	offset = reg_width - width - offset;
11693 
11694       rtx x, y, z, w;
11695       wide_int mask = wi::shifted_mask (offset, width, true, reg_width);
11696       wide_int mask2 = wi::shifted_mask (offset, width, false, reg_width);
11697       x = gen_rtx_AND (mode, reg, immed_wide_int_const (mask, mode));
11698       if (offset)
11699 	y = gen_rtx_ASHIFT (mode, SET_SRC (pat), GEN_INT (offset));
11700       else
11701 	y = SET_SRC (pat);
11702       z = gen_rtx_AND (mode, y, immed_wide_int_const (mask2, mode));
11703       w = gen_rtx_IOR (mode, x, z);
11704       SUBST (SET_DEST (pat), reg);
11705       SUBST (SET_SRC (pat), w);
11706 
11707       changed = true;
11708     }
11709 
11710   return changed;
11711 }
11712 
11713 /* Like recog, but we receive the address of a pointer to a new pattern.
11714    We try to match the rtx that the pointer points to.
11715    If that fails, we may try to modify or replace the pattern,
11716    storing the replacement into the same pointer object.
11717 
11718    Modifications include deletion or addition of CLOBBERs.  If the
11719    instruction will still not match, we change ZERO_EXTEND and ZERO_EXTRACT
11720    to the equivalent AND and perhaps LSHIFTRT patterns, and try with that
11721    (and undo if that fails).
11722 
11723    PNOTES is a pointer to a location where any REG_UNUSED notes added for
11724    the CLOBBERs are placed.
11725 
11726    The value is the final insn code from the pattern ultimately matched,
11727    or -1.  */
11728 
11729 static int
recog_for_combine(rtx * pnewpat,rtx_insn * insn,rtx * pnotes)11730 recog_for_combine (rtx *pnewpat, rtx_insn *insn, rtx *pnotes)
11731 {
11732   rtx pat = *pnewpat;
11733   int insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11734   if (insn_code_number >= 0 || check_asm_operands (pat))
11735     return insn_code_number;
11736 
11737   void *marker = get_undo_marker ();
11738   bool changed = false;
11739 
11740   if (GET_CODE (pat) == SET)
11741     changed = change_zero_ext (pat);
11742   else if (GET_CODE (pat) == PARALLEL)
11743     {
11744       int i;
11745       for (i = 0; i < XVECLEN (pat, 0); i++)
11746 	{
11747 	  rtx set = XVECEXP (pat, 0, i);
11748 	  if (GET_CODE (set) == SET)
11749 	    changed |= change_zero_ext (set);
11750 	}
11751     }
11752 
11753   if (changed)
11754     {
11755       insn_code_number = recog_for_combine_1 (pnewpat, insn, pnotes);
11756 
11757       if (insn_code_number < 0)
11758 	undo_to_marker (marker);
11759     }
11760 
11761   return insn_code_number;
11762 }
11763 
11764 /* Like gen_lowpart_general but for use by combine.  In combine it
11765    is not possible to create any new pseudoregs.  However, it is
11766    safe to create invalid memory addresses, because combine will
11767    try to recognize them and all they will do is make the combine
11768    attempt fail.
11769 
11770    If for some reason this cannot do its job, an rtx
11771    (clobber (const_int 0)) is returned.
11772    An insn containing that will not be recognized.  */
11773 
11774 static rtx
gen_lowpart_for_combine(machine_mode omode,rtx x)11775 gen_lowpart_for_combine (machine_mode omode, rtx x)
11776 {
11777   machine_mode imode = GET_MODE (x);
11778   rtx result;
11779 
11780   if (omode == imode)
11781     return x;
11782 
11783   /* We can only support MODE being wider than a word if X is a
11784      constant integer or has a mode the same size.  */
11785   if (maybe_gt (GET_MODE_SIZE (omode), UNITS_PER_WORD)
11786       && ! (CONST_SCALAR_INT_P (x)
11787 	    || known_eq (GET_MODE_SIZE (imode), GET_MODE_SIZE (omode))))
11788     goto fail;
11789 
11790   /* X might be a paradoxical (subreg (mem)).  In that case, gen_lowpart
11791      won't know what to do.  So we will strip off the SUBREG here and
11792      process normally.  */
11793   if (GET_CODE (x) == SUBREG && MEM_P (SUBREG_REG (x)))
11794     {
11795       x = SUBREG_REG (x);
11796 
11797       /* For use in case we fall down into the address adjustments
11798 	 further below, we need to adjust the known mode and size of
11799 	 x; imode and isize, since we just adjusted x.  */
11800       imode = GET_MODE (x);
11801 
11802       if (imode == omode)
11803 	return x;
11804     }
11805 
11806   result = gen_lowpart_common (omode, x);
11807 
11808   if (result)
11809     return result;
11810 
11811   if (MEM_P (x))
11812     {
11813       /* Refuse to work on a volatile memory ref or one with a mode-dependent
11814 	 address.  */
11815       if (MEM_VOLATILE_P (x)
11816 	  || mode_dependent_address_p (XEXP (x, 0), MEM_ADDR_SPACE (x)))
11817 	goto fail;
11818 
11819       /* If we want to refer to something bigger than the original memref,
11820 	 generate a paradoxical subreg instead.  That will force a reload
11821 	 of the original memref X.  */
11822       if (paradoxical_subreg_p (omode, imode))
11823 	return gen_rtx_SUBREG (omode, x, 0);
11824 
11825       poly_int64 offset = byte_lowpart_offset (omode, imode);
11826       return adjust_address_nv (x, omode, offset);
11827     }
11828 
11829   /* If X is a comparison operator, rewrite it in a new mode.  This
11830      probably won't match, but may allow further simplifications.  */
11831   else if (COMPARISON_P (x))
11832     return gen_rtx_fmt_ee (GET_CODE (x), omode, XEXP (x, 0), XEXP (x, 1));
11833 
11834   /* If we couldn't simplify X any other way, just enclose it in a
11835      SUBREG.  Normally, this SUBREG won't match, but some patterns may
11836      include an explicit SUBREG or we may simplify it further in combine.  */
11837   else
11838     {
11839       rtx res;
11840 
11841       if (imode == VOIDmode)
11842 	{
11843 	  imode = int_mode_for_mode (omode).require ();
11844 	  x = gen_lowpart_common (imode, x);
11845 	  if (x == NULL)
11846 	    goto fail;
11847 	}
11848       res = lowpart_subreg (omode, x, imode);
11849       if (res)
11850 	return res;
11851     }
11852 
11853  fail:
11854   return gen_rtx_CLOBBER (omode, const0_rtx);
11855 }
11856 
11857 /* Try to simplify a comparison between OP0 and a constant OP1,
11858    where CODE is the comparison code that will be tested, into a
11859    (CODE OP0 const0_rtx) form.
11860 
11861    The result is a possibly different comparison code to use.
11862    *POP1 may be updated.  */
11863 
11864 static enum rtx_code
simplify_compare_const(enum rtx_code code,machine_mode mode,rtx op0,rtx * pop1)11865 simplify_compare_const (enum rtx_code code, machine_mode mode,
11866 			rtx op0, rtx *pop1)
11867 {
11868   scalar_int_mode int_mode;
11869   HOST_WIDE_INT const_op = INTVAL (*pop1);
11870 
11871   /* Get the constant we are comparing against and turn off all bits
11872      not on in our mode.  */
11873   if (mode != VOIDmode)
11874     const_op = trunc_int_for_mode (const_op, mode);
11875 
11876   /* If we are comparing against a constant power of two and the value
11877      being compared can only have that single bit nonzero (e.g., it was
11878      `and'ed with that bit), we can replace this with a comparison
11879      with zero.  */
11880   if (const_op
11881       && (code == EQ || code == NE || code == GE || code == GEU
11882 	  || code == LT || code == LTU)
11883       && is_a <scalar_int_mode> (mode, &int_mode)
11884       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11885       && pow2p_hwi (const_op & GET_MODE_MASK (int_mode))
11886       && (nonzero_bits (op0, int_mode)
11887 	  == (unsigned HOST_WIDE_INT) (const_op & GET_MODE_MASK (int_mode))))
11888     {
11889       code = (code == EQ || code == GE || code == GEU ? NE : EQ);
11890       const_op = 0;
11891     }
11892 
11893   /* Similarly, if we are comparing a value known to be either -1 or
11894      0 with -1, change it to the opposite comparison against zero.  */
11895   if (const_op == -1
11896       && (code == EQ || code == NE || code == GT || code == LE
11897 	  || code == GEU || code == LTU)
11898       && is_a <scalar_int_mode> (mode, &int_mode)
11899       && num_sign_bit_copies (op0, int_mode) == GET_MODE_PRECISION (int_mode))
11900     {
11901       code = (code == EQ || code == LE || code == GEU ? NE : EQ);
11902       const_op = 0;
11903     }
11904 
11905   /* Do some canonicalizations based on the comparison code.  We prefer
11906      comparisons against zero and then prefer equality comparisons.
11907      If we can reduce the size of a constant, we will do that too.  */
11908   switch (code)
11909     {
11910     case LT:
11911       /* < C is equivalent to <= (C - 1) */
11912       if (const_op > 0)
11913 	{
11914 	  const_op -= 1;
11915 	  code = LE;
11916 	  /* ... fall through to LE case below.  */
11917 	  gcc_fallthrough ();
11918 	}
11919       else
11920 	break;
11921 
11922     case LE:
11923       /* <= C is equivalent to < (C + 1); we do this for C < 0  */
11924       if (const_op < 0)
11925 	{
11926 	  const_op += 1;
11927 	  code = LT;
11928 	}
11929 
11930       /* If we are doing a <= 0 comparison on a value known to have
11931 	 a zero sign bit, we can replace this with == 0.  */
11932       else if (const_op == 0
11933 	       && is_a <scalar_int_mode> (mode, &int_mode)
11934 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11935 	       && (nonzero_bits (op0, int_mode)
11936 		   & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11937 	       == 0)
11938 	code = EQ;
11939       break;
11940 
11941     case GE:
11942       /* >= C is equivalent to > (C - 1).  */
11943       if (const_op > 0)
11944 	{
11945 	  const_op -= 1;
11946 	  code = GT;
11947 	  /* ... fall through to GT below.  */
11948 	  gcc_fallthrough ();
11949 	}
11950       else
11951 	break;
11952 
11953     case GT:
11954       /* > C is equivalent to >= (C + 1); we do this for C < 0.  */
11955       if (const_op < 0)
11956 	{
11957 	  const_op += 1;
11958 	  code = GE;
11959 	}
11960 
11961       /* If we are doing a > 0 comparison on a value known to have
11962 	 a zero sign bit, we can replace this with != 0.  */
11963       else if (const_op == 0
11964 	       && is_a <scalar_int_mode> (mode, &int_mode)
11965 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11966 	       && (nonzero_bits (op0, int_mode)
11967 		   & (HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11968 	       == 0)
11969 	code = NE;
11970       break;
11971 
11972     case LTU:
11973       /* < C is equivalent to <= (C - 1).  */
11974       if (const_op > 0)
11975 	{
11976 	  const_op -= 1;
11977 	  code = LEU;
11978 	  /* ... fall through ...  */
11979 	  gcc_fallthrough ();
11980 	}
11981       /* (unsigned) < 0x80000000 is equivalent to >= 0.  */
11982       else if (is_a <scalar_int_mode> (mode, &int_mode)
11983 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
11984 	       && ((unsigned HOST_WIDE_INT) const_op
11985 		   == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
11986 	{
11987 	  const_op = 0;
11988 	  code = GE;
11989 	  break;
11990 	}
11991       else
11992 	break;
11993 
11994     case LEU:
11995       /* unsigned <= 0 is equivalent to == 0 */
11996       if (const_op == 0)
11997 	code = EQ;
11998       /* (unsigned) <= 0x7fffffff is equivalent to >= 0.  */
11999       else if (is_a <scalar_int_mode> (mode, &int_mode)
12000 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12001 	       && ((unsigned HOST_WIDE_INT) const_op
12002 		   == ((HOST_WIDE_INT_1U
12003 			<< (GET_MODE_PRECISION (int_mode) - 1)) - 1)))
12004 	{
12005 	  const_op = 0;
12006 	  code = GE;
12007 	}
12008       break;
12009 
12010     case GEU:
12011       /* >= C is equivalent to > (C - 1).  */
12012       if (const_op > 1)
12013 	{
12014 	  const_op -= 1;
12015 	  code = GTU;
12016 	  /* ... fall through ...  */
12017 	  gcc_fallthrough ();
12018 	}
12019 
12020       /* (unsigned) >= 0x80000000 is equivalent to < 0.  */
12021       else if (is_a <scalar_int_mode> (mode, &int_mode)
12022 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12023 	       && ((unsigned HOST_WIDE_INT) const_op
12024 		   == HOST_WIDE_INT_1U << (GET_MODE_PRECISION (int_mode) - 1)))
12025 	{
12026 	  const_op = 0;
12027 	  code = LT;
12028 	  break;
12029 	}
12030       else
12031 	break;
12032 
12033     case GTU:
12034       /* unsigned > 0 is equivalent to != 0 */
12035       if (const_op == 0)
12036 	code = NE;
12037       /* (unsigned) > 0x7fffffff is equivalent to < 0.  */
12038       else if (is_a <scalar_int_mode> (mode, &int_mode)
12039 	       && GET_MODE_PRECISION (int_mode) - 1 < HOST_BITS_PER_WIDE_INT
12040 	       && ((unsigned HOST_WIDE_INT) const_op
12041 		   == (HOST_WIDE_INT_1U
12042 		       << (GET_MODE_PRECISION (int_mode) - 1)) - 1))
12043 	{
12044 	  const_op = 0;
12045 	  code = LT;
12046 	}
12047       break;
12048 
12049     default:
12050       break;
12051     }
12052 
12053   *pop1 = GEN_INT (const_op);
12054   return code;
12055 }
12056 
12057 /* Simplify a comparison between *POP0 and *POP1 where CODE is the
12058    comparison code that will be tested.
12059 
12060    The result is a possibly different comparison code to use.  *POP0 and
12061    *POP1 may be updated.
12062 
12063    It is possible that we might detect that a comparison is either always
12064    true or always false.  However, we do not perform general constant
12065    folding in combine, so this knowledge isn't useful.  Such tautologies
12066    should have been detected earlier.  Hence we ignore all such cases.  */
12067 
12068 static enum rtx_code
simplify_comparison(enum rtx_code code,rtx * pop0,rtx * pop1)12069 simplify_comparison (enum rtx_code code, rtx *pop0, rtx *pop1)
12070 {
12071   rtx op0 = *pop0;
12072   rtx op1 = *pop1;
12073   rtx tem, tem1;
12074   int i;
12075   scalar_int_mode mode, inner_mode, tmode;
12076   opt_scalar_int_mode tmode_iter;
12077 
12078   /* Try a few ways of applying the same transformation to both operands.  */
12079   while (1)
12080     {
12081       /* The test below this one won't handle SIGN_EXTENDs on these machines,
12082 	 so check specially.  */
12083       if (!WORD_REGISTER_OPERATIONS
12084 	  && code != GTU && code != GEU && code != LTU && code != LEU
12085 	  && GET_CODE (op0) == ASHIFTRT && GET_CODE (op1) == ASHIFTRT
12086 	  && GET_CODE (XEXP (op0, 0)) == ASHIFT
12087 	  && GET_CODE (XEXP (op1, 0)) == ASHIFT
12088 	  && GET_CODE (XEXP (XEXP (op0, 0), 0)) == SUBREG
12089 	  && GET_CODE (XEXP (XEXP (op1, 0), 0)) == SUBREG
12090 	  && is_a <scalar_int_mode> (GET_MODE (op0), &mode)
12091 	  && (is_a <scalar_int_mode>
12092 	      (GET_MODE (SUBREG_REG (XEXP (XEXP (op0, 0), 0))), &inner_mode))
12093 	  && inner_mode == GET_MODE (SUBREG_REG (XEXP (XEXP (op1, 0), 0)))
12094 	  && CONST_INT_P (XEXP (op0, 1))
12095 	  && XEXP (op0, 1) == XEXP (op1, 1)
12096 	  && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12097 	  && XEXP (op0, 1) == XEXP (XEXP (op1, 0), 1)
12098 	  && (INTVAL (XEXP (op0, 1))
12099 	      == (GET_MODE_PRECISION (mode)
12100 		  - GET_MODE_PRECISION (inner_mode))))
12101 	{
12102 	  op0 = SUBREG_REG (XEXP (XEXP (op0, 0), 0));
12103 	  op1 = SUBREG_REG (XEXP (XEXP (op1, 0), 0));
12104 	}
12105 
12106       /* If both operands are the same constant shift, see if we can ignore the
12107 	 shift.  We can if the shift is a rotate or if the bits shifted out of
12108 	 this shift are known to be zero for both inputs and if the type of
12109 	 comparison is compatible with the shift.  */
12110       if (GET_CODE (op0) == GET_CODE (op1)
12111 	  && HWI_COMPUTABLE_MODE_P (GET_MODE (op0))
12112 	  && ((GET_CODE (op0) == ROTATE && (code == NE || code == EQ))
12113 	      || ((GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFT)
12114 		  && (code != GT && code != LT && code != GE && code != LE))
12115 	      || (GET_CODE (op0) == ASHIFTRT
12116 		  && (code != GTU && code != LTU
12117 		      && code != GEU && code != LEU)))
12118 	  && CONST_INT_P (XEXP (op0, 1))
12119 	  && INTVAL (XEXP (op0, 1)) >= 0
12120 	  && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12121 	  && XEXP (op0, 1) == XEXP (op1, 1))
12122 	{
12123 	  machine_mode mode = GET_MODE (op0);
12124 	  unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12125 	  int shift_count = INTVAL (XEXP (op0, 1));
12126 
12127 	  if (GET_CODE (op0) == LSHIFTRT || GET_CODE (op0) == ASHIFTRT)
12128 	    mask &= (mask >> shift_count) << shift_count;
12129 	  else if (GET_CODE (op0) == ASHIFT)
12130 	    mask = (mask & (mask << shift_count)) >> shift_count;
12131 
12132 	  if ((nonzero_bits (XEXP (op0, 0), mode) & ~mask) == 0
12133 	      && (nonzero_bits (XEXP (op1, 0), mode) & ~mask) == 0)
12134 	    op0 = XEXP (op0, 0), op1 = XEXP (op1, 0);
12135 	  else
12136 	    break;
12137 	}
12138 
12139       /* If both operands are AND's of a paradoxical SUBREG by constant, the
12140 	 SUBREGs are of the same mode, and, in both cases, the AND would
12141 	 be redundant if the comparison was done in the narrower mode,
12142 	 do the comparison in the narrower mode (e.g., we are AND'ing with 1
12143 	 and the operand's possibly nonzero bits are 0xffffff01; in that case
12144 	 if we only care about QImode, we don't need the AND).  This case
12145 	 occurs if the output mode of an scc insn is not SImode and
12146 	 STORE_FLAG_VALUE == 1 (e.g., the 386).
12147 
12148 	 Similarly, check for a case where the AND's are ZERO_EXTEND
12149 	 operations from some narrower mode even though a SUBREG is not
12150 	 present.  */
12151 
12152       else if (GET_CODE (op0) == AND && GET_CODE (op1) == AND
12153 	       && CONST_INT_P (XEXP (op0, 1))
12154 	       && CONST_INT_P (XEXP (op1, 1)))
12155 	{
12156 	  rtx inner_op0 = XEXP (op0, 0);
12157 	  rtx inner_op1 = XEXP (op1, 0);
12158 	  HOST_WIDE_INT c0 = INTVAL (XEXP (op0, 1));
12159 	  HOST_WIDE_INT c1 = INTVAL (XEXP (op1, 1));
12160 	  int changed = 0;
12161 
12162 	  if (paradoxical_subreg_p (inner_op0)
12163 	      && GET_CODE (inner_op1) == SUBREG
12164 	      && HWI_COMPUTABLE_MODE_P (GET_MODE (SUBREG_REG (inner_op0)))
12165 	      && (GET_MODE (SUBREG_REG (inner_op0))
12166 		  == GET_MODE (SUBREG_REG (inner_op1)))
12167 	      && ((~c0) & nonzero_bits (SUBREG_REG (inner_op0),
12168 					GET_MODE (SUBREG_REG (inner_op0)))) == 0
12169 	      && ((~c1) & nonzero_bits (SUBREG_REG (inner_op1),
12170 					GET_MODE (SUBREG_REG (inner_op1)))) == 0)
12171 	    {
12172 	      op0 = SUBREG_REG (inner_op0);
12173 	      op1 = SUBREG_REG (inner_op1);
12174 
12175 	      /* The resulting comparison is always unsigned since we masked
12176 		 off the original sign bit.  */
12177 	      code = unsigned_condition (code);
12178 
12179 	      changed = 1;
12180 	    }
12181 
12182 	  else if (c0 == c1)
12183 	    FOR_EACH_MODE_UNTIL (tmode,
12184 				 as_a <scalar_int_mode> (GET_MODE (op0)))
12185 	      if ((unsigned HOST_WIDE_INT) c0 == GET_MODE_MASK (tmode))
12186 		{
12187 		  op0 = gen_lowpart_or_truncate (tmode, inner_op0);
12188 		  op1 = gen_lowpart_or_truncate (tmode, inner_op1);
12189 		  code = unsigned_condition (code);
12190 		  changed = 1;
12191 		  break;
12192 		}
12193 
12194 	  if (! changed)
12195 	    break;
12196 	}
12197 
12198       /* If both operands are NOT, we can strip off the outer operation
12199 	 and adjust the comparison code for swapped operands; similarly for
12200 	 NEG, except that this must be an equality comparison.  */
12201       else if ((GET_CODE (op0) == NOT && GET_CODE (op1) == NOT)
12202 	       || (GET_CODE (op0) == NEG && GET_CODE (op1) == NEG
12203 		   && (code == EQ || code == NE)))
12204 	op0 = XEXP (op0, 0), op1 = XEXP (op1, 0), code = swap_condition (code);
12205 
12206       else
12207 	break;
12208     }
12209 
12210   /* If the first operand is a constant, swap the operands and adjust the
12211      comparison code appropriately, but don't do this if the second operand
12212      is already a constant integer.  */
12213   if (swap_commutative_operands_p (op0, op1))
12214     {
12215       std::swap (op0, op1);
12216       code = swap_condition (code);
12217     }
12218 
12219   /* We now enter a loop during which we will try to simplify the comparison.
12220      For the most part, we only are concerned with comparisons with zero,
12221      but some things may really be comparisons with zero but not start
12222      out looking that way.  */
12223 
12224   while (CONST_INT_P (op1))
12225     {
12226       machine_mode raw_mode = GET_MODE (op0);
12227       scalar_int_mode int_mode;
12228       int equality_comparison_p;
12229       int sign_bit_comparison_p;
12230       int unsigned_comparison_p;
12231       HOST_WIDE_INT const_op;
12232 
12233       /* We only want to handle integral modes.  This catches VOIDmode,
12234 	 CCmode, and the floating-point modes.  An exception is that we
12235 	 can handle VOIDmode if OP0 is a COMPARE or a comparison
12236 	 operation.  */
12237 
12238       if (GET_MODE_CLASS (raw_mode) != MODE_INT
12239 	  && ! (raw_mode == VOIDmode
12240 		&& (GET_CODE (op0) == COMPARE || COMPARISON_P (op0))))
12241 	break;
12242 
12243       /* Try to simplify the compare to constant, possibly changing the
12244 	 comparison op, and/or changing op1 to zero.  */
12245       code = simplify_compare_const (code, raw_mode, op0, &op1);
12246       const_op = INTVAL (op1);
12247 
12248       /* Compute some predicates to simplify code below.  */
12249 
12250       equality_comparison_p = (code == EQ || code == NE);
12251       sign_bit_comparison_p = ((code == LT || code == GE) && const_op == 0);
12252       unsigned_comparison_p = (code == LTU || code == LEU || code == GTU
12253 			       || code == GEU);
12254 
12255       /* If this is a sign bit comparison and we can do arithmetic in
12256 	 MODE, say that we will only be needing the sign bit of OP0.  */
12257       if (sign_bit_comparison_p
12258 	  && is_a <scalar_int_mode> (raw_mode, &int_mode)
12259 	  && HWI_COMPUTABLE_MODE_P (int_mode))
12260 	op0 = force_to_mode (op0, int_mode,
12261 			     HOST_WIDE_INT_1U
12262 			     << (GET_MODE_PRECISION (int_mode) - 1),
12263 			     0);
12264 
12265       if (COMPARISON_P (op0))
12266 	{
12267 	  /* We can't do anything if OP0 is a condition code value, rather
12268 	     than an actual data value.  */
12269 	  if (const_op != 0
12270 	      || CC0_P (XEXP (op0, 0))
12271 	      || GET_MODE_CLASS (GET_MODE (XEXP (op0, 0))) == MODE_CC)
12272 	    break;
12273 
12274 	  /* Get the two operands being compared.  */
12275 	  if (GET_CODE (XEXP (op0, 0)) == COMPARE)
12276 	    tem = XEXP (XEXP (op0, 0), 0), tem1 = XEXP (XEXP (op0, 0), 1);
12277 	  else
12278 	    tem = XEXP (op0, 0), tem1 = XEXP (op0, 1);
12279 
12280 	  /* Check for the cases where we simply want the result of the
12281 	     earlier test or the opposite of that result.  */
12282 	  if (code == NE || code == EQ
12283 	      || (val_signbit_known_set_p (raw_mode, STORE_FLAG_VALUE)
12284 		  && (code == LT || code == GE)))
12285 	    {
12286 	      enum rtx_code new_code;
12287 	      if (code == LT || code == NE)
12288 		new_code = GET_CODE (op0);
12289 	      else
12290 		new_code = reversed_comparison_code (op0, NULL);
12291 
12292 	      if (new_code != UNKNOWN)
12293 		{
12294 		  code = new_code;
12295 		  op0 = tem;
12296 		  op1 = tem1;
12297 		  continue;
12298 		}
12299 	    }
12300 	  break;
12301 	}
12302 
12303       if (raw_mode == VOIDmode)
12304 	break;
12305       scalar_int_mode mode = as_a <scalar_int_mode> (raw_mode);
12306 
12307       /* Now try cases based on the opcode of OP0.  If none of the cases
12308 	 does a "continue", we exit this loop immediately after the
12309 	 switch.  */
12310 
12311       unsigned int mode_width = GET_MODE_PRECISION (mode);
12312       unsigned HOST_WIDE_INT mask = GET_MODE_MASK (mode);
12313       switch (GET_CODE (op0))
12314 	{
12315 	case ZERO_EXTRACT:
12316 	  /* If we are extracting a single bit from a variable position in
12317 	     a constant that has only a single bit set and are comparing it
12318 	     with zero, we can convert this into an equality comparison
12319 	     between the position and the location of the single bit.  */
12320 	  /* Except we can't if SHIFT_COUNT_TRUNCATED is set, since we might
12321 	     have already reduced the shift count modulo the word size.  */
12322 	  if (!SHIFT_COUNT_TRUNCATED
12323 	      && CONST_INT_P (XEXP (op0, 0))
12324 	      && XEXP (op0, 1) == const1_rtx
12325 	      && equality_comparison_p && const_op == 0
12326 	      && (i = exact_log2 (UINTVAL (XEXP (op0, 0)))) >= 0)
12327 	    {
12328 	      if (BITS_BIG_ENDIAN)
12329 		i = BITS_PER_WORD - 1 - i;
12330 
12331 	      op0 = XEXP (op0, 2);
12332 	      op1 = GEN_INT (i);
12333 	      const_op = i;
12334 
12335 	      /* Result is nonzero iff shift count is equal to I.  */
12336 	      code = reverse_condition (code);
12337 	      continue;
12338 	    }
12339 
12340 	  /* fall through */
12341 
12342 	case SIGN_EXTRACT:
12343 	  tem = expand_compound_operation (op0);
12344 	  if (tem != op0)
12345 	    {
12346 	      op0 = tem;
12347 	      continue;
12348 	    }
12349 	  break;
12350 
12351 	case NOT:
12352 	  /* If testing for equality, we can take the NOT of the constant.  */
12353 	  if (equality_comparison_p
12354 	      && (tem = simplify_unary_operation (NOT, mode, op1, mode)) != 0)
12355 	    {
12356 	      op0 = XEXP (op0, 0);
12357 	      op1 = tem;
12358 	      continue;
12359 	    }
12360 
12361 	  /* If just looking at the sign bit, reverse the sense of the
12362 	     comparison.  */
12363 	  if (sign_bit_comparison_p)
12364 	    {
12365 	      op0 = XEXP (op0, 0);
12366 	      code = (code == GE ? LT : GE);
12367 	      continue;
12368 	    }
12369 	  break;
12370 
12371 	case NEG:
12372 	  /* If testing for equality, we can take the NEG of the constant.  */
12373 	  if (equality_comparison_p
12374 	      && (tem = simplify_unary_operation (NEG, mode, op1, mode)) != 0)
12375 	    {
12376 	      op0 = XEXP (op0, 0);
12377 	      op1 = tem;
12378 	      continue;
12379 	    }
12380 
12381 	  /* The remaining cases only apply to comparisons with zero.  */
12382 	  if (const_op != 0)
12383 	    break;
12384 
12385 	  /* When X is ABS or is known positive,
12386 	     (neg X) is < 0 if and only if X != 0.  */
12387 
12388 	  if (sign_bit_comparison_p
12389 	      && (GET_CODE (XEXP (op0, 0)) == ABS
12390 		  || (mode_width <= HOST_BITS_PER_WIDE_INT
12391 		      && (nonzero_bits (XEXP (op0, 0), mode)
12392 			  & (HOST_WIDE_INT_1U << (mode_width - 1)))
12393 			 == 0)))
12394 	    {
12395 	      op0 = XEXP (op0, 0);
12396 	      code = (code == LT ? NE : EQ);
12397 	      continue;
12398 	    }
12399 
12400 	  /* If we have NEG of something whose two high-order bits are the
12401 	     same, we know that "(-a) < 0" is equivalent to "a > 0".  */
12402 	  if (num_sign_bit_copies (op0, mode) >= 2)
12403 	    {
12404 	      op0 = XEXP (op0, 0);
12405 	      code = swap_condition (code);
12406 	      continue;
12407 	    }
12408 	  break;
12409 
12410 	case ROTATE:
12411 	  /* If we are testing equality and our count is a constant, we
12412 	     can perform the inverse operation on our RHS.  */
12413 	  if (equality_comparison_p && CONST_INT_P (XEXP (op0, 1))
12414 	      && (tem = simplify_binary_operation (ROTATERT, mode,
12415 						   op1, XEXP (op0, 1))) != 0)
12416 	    {
12417 	      op0 = XEXP (op0, 0);
12418 	      op1 = tem;
12419 	      continue;
12420 	    }
12421 
12422 	  /* If we are doing a < 0 or >= 0 comparison, it means we are testing
12423 	     a particular bit.  Convert it to an AND of a constant of that
12424 	     bit.  This will be converted into a ZERO_EXTRACT.  */
12425 	  if (const_op == 0 && sign_bit_comparison_p
12426 	      && CONST_INT_P (XEXP (op0, 1))
12427 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
12428 	    {
12429 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12430 					    (HOST_WIDE_INT_1U
12431 					     << (mode_width - 1
12432 						 - INTVAL (XEXP (op0, 1)))));
12433 	      code = (code == LT ? NE : EQ);
12434 	      continue;
12435 	    }
12436 
12437 	  /* Fall through.  */
12438 
12439 	case ABS:
12440 	  /* ABS is ignorable inside an equality comparison with zero.  */
12441 	  if (const_op == 0 && equality_comparison_p)
12442 	    {
12443 	      op0 = XEXP (op0, 0);
12444 	      continue;
12445 	    }
12446 	  break;
12447 
12448 	case SIGN_EXTEND:
12449 	  /* Can simplify (compare (zero/sign_extend FOO) CONST) to
12450 	     (compare FOO CONST) if CONST fits in FOO's mode and we
12451 	     are either testing inequality or have an unsigned
12452 	     comparison with ZERO_EXTEND or a signed comparison with
12453 	     SIGN_EXTEND.  But don't do it if we don't have a compare
12454 	     insn of the given mode, since we'd have to revert it
12455 	     later on, and then we wouldn't know whether to sign- or
12456 	     zero-extend.  */
12457 	  if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12458 	      && ! unsigned_comparison_p
12459 	      && HWI_COMPUTABLE_MODE_P (mode)
12460 	      && trunc_int_for_mode (const_op, mode) == const_op
12461 	      && have_insn_for (COMPARE, mode))
12462 	    {
12463 	      op0 = XEXP (op0, 0);
12464 	      continue;
12465 	    }
12466 	  break;
12467 
12468 	case SUBREG:
12469 	  /* Check for the case where we are comparing A - C1 with C2, that is
12470 
12471 	       (subreg:MODE (plus (A) (-C1))) op (C2)
12472 
12473 	     with C1 a constant, and try to lift the SUBREG, i.e. to do the
12474 	     comparison in the wider mode.  One of the following two conditions
12475 	     must be true in order for this to be valid:
12476 
12477 	       1. The mode extension results in the same bit pattern being added
12478 		  on both sides and the comparison is equality or unsigned.  As
12479 		  C2 has been truncated to fit in MODE, the pattern can only be
12480 		  all 0s or all 1s.
12481 
12482 	       2. The mode extension results in the sign bit being copied on
12483 		  each side.
12484 
12485 	     The difficulty here is that we have predicates for A but not for
12486 	     (A - C1) so we need to check that C1 is within proper bounds so
12487 	     as to perturbate A as little as possible.  */
12488 
12489 	  if (mode_width <= HOST_BITS_PER_WIDE_INT
12490 	      && subreg_lowpart_p (op0)
12491 	      && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
12492 					 &inner_mode)
12493 	      && GET_MODE_PRECISION (inner_mode) > mode_width
12494 	      && GET_CODE (SUBREG_REG (op0)) == PLUS
12495 	      && CONST_INT_P (XEXP (SUBREG_REG (op0), 1)))
12496 	    {
12497 	      rtx a = XEXP (SUBREG_REG (op0), 0);
12498 	      HOST_WIDE_INT c1 = -INTVAL (XEXP (SUBREG_REG (op0), 1));
12499 
12500 	      if ((c1 > 0
12501 		   && (unsigned HOST_WIDE_INT) c1
12502 		       < HOST_WIDE_INT_1U << (mode_width - 1)
12503 		   && (equality_comparison_p || unsigned_comparison_p)
12504 		   /* (A - C1) zero-extends if it is positive and sign-extends
12505 		      if it is negative, C2 both zero- and sign-extends.  */
12506 		   && (((nonzero_bits (a, inner_mode)
12507 			 & ~GET_MODE_MASK (mode)) == 0
12508 			&& const_op >= 0)
12509 		       /* (A - C1) sign-extends if it is positive and 1-extends
12510 			  if it is negative, C2 both sign- and 1-extends.  */
12511 		       || (num_sign_bit_copies (a, inner_mode)
12512 			   > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12513 					     - mode_width)
12514 			   && const_op < 0)))
12515 		  || ((unsigned HOST_WIDE_INT) c1
12516 		       < HOST_WIDE_INT_1U << (mode_width - 2)
12517 		      /* (A - C1) always sign-extends, like C2.  */
12518 		      && num_sign_bit_copies (a, inner_mode)
12519 			 > (unsigned int) (GET_MODE_PRECISION (inner_mode)
12520 					   - (mode_width - 1))))
12521 		{
12522 		  op0 = SUBREG_REG (op0);
12523 		  continue;
12524 		}
12525 	    }
12526 
12527 	  /* If the inner mode is narrower and we are extracting the low part,
12528 	     we can treat the SUBREG as if it were a ZERO_EXTEND.  */
12529 	  if (paradoxical_subreg_p (op0))
12530 	    ;
12531 	  else if (subreg_lowpart_p (op0)
12532 		   && GET_MODE_CLASS (mode) == MODE_INT
12533 		   && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
12534 		   && (code == NE || code == EQ)
12535 		   && GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
12536 		   && !paradoxical_subreg_p (op0)
12537 		   && (nonzero_bits (SUBREG_REG (op0), inner_mode)
12538 		       & ~GET_MODE_MASK (mode)) == 0)
12539 	    {
12540 	      /* Remove outer subregs that don't do anything.  */
12541 	      tem = gen_lowpart (inner_mode, op1);
12542 
12543 	      if ((nonzero_bits (tem, inner_mode)
12544 		   & ~GET_MODE_MASK (mode)) == 0)
12545 		{
12546 		  op0 = SUBREG_REG (op0);
12547 		  op1 = tem;
12548 		  continue;
12549 		}
12550 	      break;
12551 	    }
12552 	  else
12553 	    break;
12554 
12555 	  /* FALLTHROUGH */
12556 
12557 	case ZERO_EXTEND:
12558 	  if (is_int_mode (GET_MODE (XEXP (op0, 0)), &mode)
12559 	      && (unsigned_comparison_p || equality_comparison_p)
12560 	      && HWI_COMPUTABLE_MODE_P (mode)
12561 	      && (unsigned HOST_WIDE_INT) const_op <= GET_MODE_MASK (mode)
12562 	      && const_op >= 0
12563 	      && have_insn_for (COMPARE, mode))
12564 	    {
12565 	      op0 = XEXP (op0, 0);
12566 	      continue;
12567 	    }
12568 	  break;
12569 
12570 	case PLUS:
12571 	  /* (eq (plus X A) B) -> (eq X (minus B A)).  We can only do
12572 	     this for equality comparisons due to pathological cases involving
12573 	     overflows.  */
12574 	  if (equality_comparison_p
12575 	      && (tem = simplify_binary_operation (MINUS, mode,
12576 						   op1, XEXP (op0, 1))) != 0)
12577 	    {
12578 	      op0 = XEXP (op0, 0);
12579 	      op1 = tem;
12580 	      continue;
12581 	    }
12582 
12583 	  /* (plus (abs X) (const_int -1)) is < 0 if and only if X == 0.  */
12584 	  if (const_op == 0 && XEXP (op0, 1) == constm1_rtx
12585 	      && GET_CODE (XEXP (op0, 0)) == ABS && sign_bit_comparison_p)
12586 	    {
12587 	      op0 = XEXP (XEXP (op0, 0), 0);
12588 	      code = (code == LT ? EQ : NE);
12589 	      continue;
12590 	    }
12591 	  break;
12592 
12593 	case MINUS:
12594 	  /* We used to optimize signed comparisons against zero, but that
12595 	     was incorrect.  Unsigned comparisons against zero (GTU, LEU)
12596 	     arrive here as equality comparisons, or (GEU, LTU) are
12597 	     optimized away.  No need to special-case them.  */
12598 
12599 	  /* (eq (minus A B) C) -> (eq A (plus B C)) or
12600 	     (eq B (minus A C)), whichever simplifies.  We can only do
12601 	     this for equality comparisons due to pathological cases involving
12602 	     overflows.  */
12603 	  if (equality_comparison_p
12604 	      && (tem = simplify_binary_operation (PLUS, mode,
12605 						   XEXP (op0, 1), op1)) != 0)
12606 	    {
12607 	      op0 = XEXP (op0, 0);
12608 	      op1 = tem;
12609 	      continue;
12610 	    }
12611 
12612 	  if (equality_comparison_p
12613 	      && (tem = simplify_binary_operation (MINUS, mode,
12614 						   XEXP (op0, 0), op1)) != 0)
12615 	    {
12616 	      op0 = XEXP (op0, 1);
12617 	      op1 = tem;
12618 	      continue;
12619 	    }
12620 
12621 	  /* The sign bit of (minus (ashiftrt X C) X), where C is the number
12622 	     of bits in X minus 1, is one iff X > 0.  */
12623 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == ASHIFTRT
12624 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12625 	      && UINTVAL (XEXP (XEXP (op0, 0), 1)) == mode_width - 1
12626 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12627 	    {
12628 	      op0 = XEXP (op0, 1);
12629 	      code = (code == GE ? LE : GT);
12630 	      continue;
12631 	    }
12632 	  break;
12633 
12634 	case XOR:
12635 	  /* (eq (xor A B) C) -> (eq A (xor B C)).  This is a simplification
12636 	     if C is zero or B is a constant.  */
12637 	  if (equality_comparison_p
12638 	      && (tem = simplify_binary_operation (XOR, mode,
12639 						   XEXP (op0, 1), op1)) != 0)
12640 	    {
12641 	      op0 = XEXP (op0, 0);
12642 	      op1 = tem;
12643 	      continue;
12644 	    }
12645 	  break;
12646 
12647 
12648 	case IOR:
12649 	  /* The sign bit of (ior (plus X (const_int -1)) X) is nonzero
12650 	     iff X <= 0.  */
12651 	  if (sign_bit_comparison_p && GET_CODE (XEXP (op0, 0)) == PLUS
12652 	      && XEXP (XEXP (op0, 0), 1) == constm1_rtx
12653 	      && rtx_equal_p (XEXP (XEXP (op0, 0), 0), XEXP (op0, 1)))
12654 	    {
12655 	      op0 = XEXP (op0, 1);
12656 	      code = (code == GE ? GT : LE);
12657 	      continue;
12658 	    }
12659 	  break;
12660 
12661 	case AND:
12662 	  /* Convert (and (xshift 1 X) Y) to (and (lshiftrt Y X) 1).  This
12663 	     will be converted to a ZERO_EXTRACT later.  */
12664 	  if (const_op == 0 && equality_comparison_p
12665 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12666 	      && XEXP (XEXP (op0, 0), 0) == const1_rtx)
12667 	    {
12668 	      op0 = gen_rtx_LSHIFTRT (mode, XEXP (op0, 1),
12669 				      XEXP (XEXP (op0, 0), 1));
12670 	      op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12671 	      continue;
12672 	    }
12673 
12674 	  /* If we are comparing (and (lshiftrt X C1) C2) for equality with
12675 	     zero and X is a comparison and C1 and C2 describe only bits set
12676 	     in STORE_FLAG_VALUE, we can compare with X.  */
12677 	  if (const_op == 0 && equality_comparison_p
12678 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12679 	      && CONST_INT_P (XEXP (op0, 1))
12680 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
12681 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12682 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
12683 	      && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT)
12684 	    {
12685 	      mask = ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12686 		      << INTVAL (XEXP (XEXP (op0, 0), 1)));
12687 	      if ((~STORE_FLAG_VALUE & mask) == 0
12688 		  && (COMPARISON_P (XEXP (XEXP (op0, 0), 0))
12689 		      || ((tem = get_last_value (XEXP (XEXP (op0, 0), 0))) != 0
12690 			  && COMPARISON_P (tem))))
12691 		{
12692 		  op0 = XEXP (XEXP (op0, 0), 0);
12693 		  continue;
12694 		}
12695 	    }
12696 
12697 	  /* If we are doing an equality comparison of an AND of a bit equal
12698 	     to the sign bit, replace this with a LT or GE comparison of
12699 	     the underlying value.  */
12700 	  if (equality_comparison_p
12701 	      && const_op == 0
12702 	      && CONST_INT_P (XEXP (op0, 1))
12703 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12704 	      && ((INTVAL (XEXP (op0, 1)) & GET_MODE_MASK (mode))
12705 		  == HOST_WIDE_INT_1U << (mode_width - 1)))
12706 	    {
12707 	      op0 = XEXP (op0, 0);
12708 	      code = (code == EQ ? GE : LT);
12709 	      continue;
12710 	    }
12711 
12712 	  /* If this AND operation is really a ZERO_EXTEND from a narrower
12713 	     mode, the constant fits within that mode, and this is either an
12714 	     equality or unsigned comparison, try to do this comparison in
12715 	     the narrower mode.
12716 
12717 	     Note that in:
12718 
12719 	     (ne:DI (and:DI (reg:DI 4) (const_int 0xffffffff)) (const_int 0))
12720 	     -> (ne:DI (reg:SI 4) (const_int 0))
12721 
12722 	     unless TARGET_TRULY_NOOP_TRUNCATION allows it or the register is
12723 	     known to hold a value of the required mode the
12724 	     transformation is invalid.  */
12725 	  if ((equality_comparison_p || unsigned_comparison_p)
12726 	      && CONST_INT_P (XEXP (op0, 1))
12727 	      && (i = exact_log2 ((UINTVAL (XEXP (op0, 1))
12728 				   & GET_MODE_MASK (mode))
12729 				  + 1)) >= 0
12730 	      && const_op >> i == 0
12731 	      && int_mode_for_size (i, 1).exists (&tmode))
12732 	    {
12733 	      op0 = gen_lowpart_or_truncate (tmode, XEXP (op0, 0));
12734 	      continue;
12735 	    }
12736 
12737 	  /* If this is (and:M1 (subreg:M1 X:M2 0) (const_int C1)) where C1
12738 	     fits in both M1 and M2 and the SUBREG is either paradoxical
12739 	     or represents the low part, permute the SUBREG and the AND
12740 	     and try again.  */
12741 	  if (GET_CODE (XEXP (op0, 0)) == SUBREG
12742 	      && CONST_INT_P (XEXP (op0, 1)))
12743 	    {
12744 	      unsigned HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
12745 	      /* Require an integral mode, to avoid creating something like
12746 		 (AND:SF ...).  */
12747 	      if ((is_a <scalar_int_mode>
12748 		   (GET_MODE (SUBREG_REG (XEXP (op0, 0))), &tmode))
12749 		  /* It is unsafe to commute the AND into the SUBREG if the
12750 		     SUBREG is paradoxical and WORD_REGISTER_OPERATIONS is
12751 		     not defined.  As originally written the upper bits
12752 		     have a defined value due to the AND operation.
12753 		     However, if we commute the AND inside the SUBREG then
12754 		     they no longer have defined values and the meaning of
12755 		     the code has been changed.
12756 		     Also C1 should not change value in the smaller mode,
12757 		     see PR67028 (a positive C1 can become negative in the
12758 		     smaller mode, so that the AND does no longer mask the
12759 		     upper bits).  */
12760 		  && ((WORD_REGISTER_OPERATIONS
12761 		       && mode_width > GET_MODE_PRECISION (tmode)
12762 		       && mode_width <= BITS_PER_WORD
12763 		       && trunc_int_for_mode (c1, tmode) == (HOST_WIDE_INT) c1)
12764 		      || (mode_width <= GET_MODE_PRECISION (tmode)
12765 			  && subreg_lowpart_p (XEXP (op0, 0))))
12766 		  && mode_width <= HOST_BITS_PER_WIDE_INT
12767 		  && HWI_COMPUTABLE_MODE_P (tmode)
12768 		  && (c1 & ~mask) == 0
12769 		  && (c1 & ~GET_MODE_MASK (tmode)) == 0
12770 		  && c1 != mask
12771 		  && c1 != GET_MODE_MASK (tmode))
12772 		{
12773 		  op0 = simplify_gen_binary (AND, tmode,
12774 					     SUBREG_REG (XEXP (op0, 0)),
12775 					     gen_int_mode (c1, tmode));
12776 		  op0 = gen_lowpart (mode, op0);
12777 		  continue;
12778 		}
12779 	    }
12780 
12781 	  /* Convert (ne (and (not X) 1) 0) to (eq (and X 1) 0).  */
12782 	  if (const_op == 0 && equality_comparison_p
12783 	      && XEXP (op0, 1) == const1_rtx
12784 	      && GET_CODE (XEXP (op0, 0)) == NOT)
12785 	    {
12786 	      op0 = simplify_and_const_int (NULL_RTX, mode,
12787 					    XEXP (XEXP (op0, 0), 0), 1);
12788 	      code = (code == NE ? EQ : NE);
12789 	      continue;
12790 	    }
12791 
12792 	  /* Convert (ne (and (lshiftrt (not X)) 1) 0) to
12793 	     (eq (and (lshiftrt X) 1) 0).
12794 	     Also handle the case where (not X) is expressed using xor.  */
12795 	  if (const_op == 0 && equality_comparison_p
12796 	      && XEXP (op0, 1) == const1_rtx
12797 	      && GET_CODE (XEXP (op0, 0)) == LSHIFTRT)
12798 	    {
12799 	      rtx shift_op = XEXP (XEXP (op0, 0), 0);
12800 	      rtx shift_count = XEXP (XEXP (op0, 0), 1);
12801 
12802 	      if (GET_CODE (shift_op) == NOT
12803 		  || (GET_CODE (shift_op) == XOR
12804 		      && CONST_INT_P (XEXP (shift_op, 1))
12805 		      && CONST_INT_P (shift_count)
12806 		      && HWI_COMPUTABLE_MODE_P (mode)
12807 		      && (UINTVAL (XEXP (shift_op, 1))
12808 			  == HOST_WIDE_INT_1U
12809 			       << INTVAL (shift_count))))
12810 		{
12811 		  op0
12812 		    = gen_rtx_LSHIFTRT (mode, XEXP (shift_op, 0), shift_count);
12813 		  op0 = simplify_and_const_int (NULL_RTX, mode, op0, 1);
12814 		  code = (code == NE ? EQ : NE);
12815 		  continue;
12816 		}
12817 	    }
12818 	  break;
12819 
12820 	case ASHIFT:
12821 	  /* If we have (compare (ashift FOO N) (const_int C)) and
12822 	     the high order N bits of FOO (N+1 if an inequality comparison)
12823 	     are known to be zero, we can do this by comparing FOO with C
12824 	     shifted right N bits so long as the low-order N bits of C are
12825 	     zero.  */
12826 	  if (CONST_INT_P (XEXP (op0, 1))
12827 	      && INTVAL (XEXP (op0, 1)) >= 0
12828 	      && ((INTVAL (XEXP (op0, 1)) + ! equality_comparison_p)
12829 		  < HOST_BITS_PER_WIDE_INT)
12830 	      && (((unsigned HOST_WIDE_INT) const_op
12831 		   & ((HOST_WIDE_INT_1U << INTVAL (XEXP (op0, 1)))
12832 		      - 1)) == 0)
12833 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12834 	      && (nonzero_bits (XEXP (op0, 0), mode)
12835 		  & ~(mask >> (INTVAL (XEXP (op0, 1))
12836 			       + ! equality_comparison_p))) == 0)
12837 	    {
12838 	      /* We must perform a logical shift, not an arithmetic one,
12839 		 as we want the top N bits of C to be zero.  */
12840 	      unsigned HOST_WIDE_INT temp = const_op & GET_MODE_MASK (mode);
12841 
12842 	      temp >>= INTVAL (XEXP (op0, 1));
12843 	      op1 = gen_int_mode (temp, mode);
12844 	      op0 = XEXP (op0, 0);
12845 	      continue;
12846 	    }
12847 
12848 	  /* If we are doing a sign bit comparison, it means we are testing
12849 	     a particular bit.  Convert it to the appropriate AND.  */
12850 	  if (sign_bit_comparison_p && CONST_INT_P (XEXP (op0, 1))
12851 	      && mode_width <= HOST_BITS_PER_WIDE_INT)
12852 	    {
12853 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0),
12854 					    (HOST_WIDE_INT_1U
12855 					     << (mode_width - 1
12856 						 - INTVAL (XEXP (op0, 1)))));
12857 	      code = (code == LT ? NE : EQ);
12858 	      continue;
12859 	    }
12860 
12861 	  /* If this an equality comparison with zero and we are shifting
12862 	     the low bit to the sign bit, we can convert this to an AND of the
12863 	     low-order bit.  */
12864 	  if (const_op == 0 && equality_comparison_p
12865 	      && CONST_INT_P (XEXP (op0, 1))
12866 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12867 	    {
12868 	      op0 = simplify_and_const_int (NULL_RTX, mode, XEXP (op0, 0), 1);
12869 	      continue;
12870 	    }
12871 	  break;
12872 
12873 	case ASHIFTRT:
12874 	  /* If this is an equality comparison with zero, we can do this
12875 	     as a logical shift, which might be much simpler.  */
12876 	  if (equality_comparison_p && const_op == 0
12877 	      && CONST_INT_P (XEXP (op0, 1)))
12878 	    {
12879 	      op0 = simplify_shift_const (NULL_RTX, LSHIFTRT, mode,
12880 					  XEXP (op0, 0),
12881 					  INTVAL (XEXP (op0, 1)));
12882 	      continue;
12883 	    }
12884 
12885 	  /* If OP0 is a sign extension and CODE is not an unsigned comparison,
12886 	     do the comparison in a narrower mode.  */
12887 	  if (! unsigned_comparison_p
12888 	      && CONST_INT_P (XEXP (op0, 1))
12889 	      && GET_CODE (XEXP (op0, 0)) == ASHIFT
12890 	      && XEXP (op0, 1) == XEXP (XEXP (op0, 0), 1)
12891 	      && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12892 		  .exists (&tmode))
12893 	      && (((unsigned HOST_WIDE_INT) const_op
12894 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12895 		  <= GET_MODE_MASK (tmode)))
12896 	    {
12897 	      op0 = gen_lowpart (tmode, XEXP (XEXP (op0, 0), 0));
12898 	      continue;
12899 	    }
12900 
12901 	  /* Likewise if OP0 is a PLUS of a sign extension with a
12902 	     constant, which is usually represented with the PLUS
12903 	     between the shifts.  */
12904 	  if (! unsigned_comparison_p
12905 	      && CONST_INT_P (XEXP (op0, 1))
12906 	      && GET_CODE (XEXP (op0, 0)) == PLUS
12907 	      && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
12908 	      && GET_CODE (XEXP (XEXP (op0, 0), 0)) == ASHIFT
12909 	      && XEXP (op0, 1) == XEXP (XEXP (XEXP (op0, 0), 0), 1)
12910 	      && (int_mode_for_size (mode_width - INTVAL (XEXP (op0, 1)), 1)
12911 		  .exists (&tmode))
12912 	      && (((unsigned HOST_WIDE_INT) const_op
12913 		   + (GET_MODE_MASK (tmode) >> 1) + 1)
12914 		  <= GET_MODE_MASK (tmode)))
12915 	    {
12916 	      rtx inner = XEXP (XEXP (XEXP (op0, 0), 0), 0);
12917 	      rtx add_const = XEXP (XEXP (op0, 0), 1);
12918 	      rtx new_const = simplify_gen_binary (ASHIFTRT, mode,
12919 						   add_const, XEXP (op0, 1));
12920 
12921 	      op0 = simplify_gen_binary (PLUS, tmode,
12922 					 gen_lowpart (tmode, inner),
12923 					 new_const);
12924 	      continue;
12925 	    }
12926 
12927 	  /* FALLTHROUGH */
12928 	case LSHIFTRT:
12929 	  /* If we have (compare (xshiftrt FOO N) (const_int C)) and
12930 	     the low order N bits of FOO are known to be zero, we can do this
12931 	     by comparing FOO with C shifted left N bits so long as no
12932 	     overflow occurs.  Even if the low order N bits of FOO aren't known
12933 	     to be zero, if the comparison is >= or < we can use the same
12934 	     optimization and for > or <= by setting all the low
12935 	     order N bits in the comparison constant.  */
12936 	  if (CONST_INT_P (XEXP (op0, 1))
12937 	      && INTVAL (XEXP (op0, 1)) > 0
12938 	      && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT
12939 	      && mode_width <= HOST_BITS_PER_WIDE_INT
12940 	      && (((unsigned HOST_WIDE_INT) const_op
12941 		   + (GET_CODE (op0) != LSHIFTRT
12942 		      ? ((GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1)) >> 1)
12943 			 + 1)
12944 		      : 0))
12945 		  <= GET_MODE_MASK (mode) >> INTVAL (XEXP (op0, 1))))
12946 	    {
12947 	      unsigned HOST_WIDE_INT low_bits
12948 		= (nonzero_bits (XEXP (op0, 0), mode)
12949 		   & ((HOST_WIDE_INT_1U
12950 		       << INTVAL (XEXP (op0, 1))) - 1));
12951 	      if (low_bits == 0 || !equality_comparison_p)
12952 		{
12953 		  /* If the shift was logical, then we must make the condition
12954 		     unsigned.  */
12955 		  if (GET_CODE (op0) == LSHIFTRT)
12956 		    code = unsigned_condition (code);
12957 
12958 		  const_op = (unsigned HOST_WIDE_INT) const_op
12959 			      << INTVAL (XEXP (op0, 1));
12960 		  if (low_bits != 0
12961 		      && (code == GT || code == GTU
12962 			  || code == LE || code == LEU))
12963 		    const_op
12964 		      |= ((HOST_WIDE_INT_1 << INTVAL (XEXP (op0, 1))) - 1);
12965 		  op1 = GEN_INT (const_op);
12966 		  op0 = XEXP (op0, 0);
12967 		  continue;
12968 		}
12969 	    }
12970 
12971 	  /* If we are using this shift to extract just the sign bit, we
12972 	     can replace this with an LT or GE comparison.  */
12973 	  if (const_op == 0
12974 	      && (equality_comparison_p || sign_bit_comparison_p)
12975 	      && CONST_INT_P (XEXP (op0, 1))
12976 	      && UINTVAL (XEXP (op0, 1)) == mode_width - 1)
12977 	    {
12978 	      op0 = XEXP (op0, 0);
12979 	      code = (code == NE || code == GT ? LT : GE);
12980 	      continue;
12981 	    }
12982 	  break;
12983 
12984 	default:
12985 	  break;
12986 	}
12987 
12988       break;
12989     }
12990 
12991   /* Now make any compound operations involved in this comparison.  Then,
12992      check for an outmost SUBREG on OP0 that is not doing anything or is
12993      paradoxical.  The latter transformation must only be performed when
12994      it is known that the "extra" bits will be the same in op0 and op1 or
12995      that they don't matter.  There are three cases to consider:
12996 
12997      1. SUBREG_REG (op0) is a register.  In this case the bits are don't
12998      care bits and we can assume they have any convenient value.  So
12999      making the transformation is safe.
13000 
13001      2. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is UNKNOWN.
13002      In this case the upper bits of op0 are undefined.  We should not make
13003      the simplification in that case as we do not know the contents of
13004      those bits.
13005 
13006      3. SUBREG_REG (op0) is a memory and LOAD_EXTEND_OP is not UNKNOWN.
13007      In that case we know those bits are zeros or ones.  We must also be
13008      sure that they are the same as the upper bits of op1.
13009 
13010      We can never remove a SUBREG for a non-equality comparison because
13011      the sign bit is in a different place in the underlying object.  */
13012 
13013   rtx_code op0_mco_code = SET;
13014   if (op1 == const0_rtx)
13015     op0_mco_code = code == NE || code == EQ ? EQ : COMPARE;
13016 
13017   op0 = make_compound_operation (op0, op0_mco_code);
13018   op1 = make_compound_operation (op1, SET);
13019 
13020   if (GET_CODE (op0) == SUBREG && subreg_lowpart_p (op0)
13021       && is_int_mode (GET_MODE (op0), &mode)
13022       && is_int_mode (GET_MODE (SUBREG_REG (op0)), &inner_mode)
13023       && (code == NE || code == EQ))
13024     {
13025       if (paradoxical_subreg_p (op0))
13026 	{
13027 	  /* For paradoxical subregs, allow case 1 as above.  Case 3 isn't
13028 	     implemented.  */
13029 	  if (REG_P (SUBREG_REG (op0)))
13030 	    {
13031 	      op0 = SUBREG_REG (op0);
13032 	      op1 = gen_lowpart (inner_mode, op1);
13033 	    }
13034 	}
13035       else if (GET_MODE_PRECISION (inner_mode) <= HOST_BITS_PER_WIDE_INT
13036 	       && (nonzero_bits (SUBREG_REG (op0), inner_mode)
13037 		   & ~GET_MODE_MASK (mode)) == 0)
13038 	{
13039 	  tem = gen_lowpart (inner_mode, op1);
13040 
13041 	  if ((nonzero_bits (tem, inner_mode) & ~GET_MODE_MASK (mode)) == 0)
13042 	    op0 = SUBREG_REG (op0), op1 = tem;
13043 	}
13044     }
13045 
13046   /* We now do the opposite procedure: Some machines don't have compare
13047      insns in all modes.  If OP0's mode is an integer mode smaller than a
13048      word and we can't do a compare in that mode, see if there is a larger
13049      mode for which we can do the compare.  There are a number of cases in
13050      which we can use the wider mode.  */
13051 
13052   if (is_int_mode (GET_MODE (op0), &mode)
13053       && GET_MODE_SIZE (mode) < UNITS_PER_WORD
13054       && ! have_insn_for (COMPARE, mode))
13055     FOR_EACH_WIDER_MODE (tmode_iter, mode)
13056       {
13057 	tmode = tmode_iter.require ();
13058 	if (!HWI_COMPUTABLE_MODE_P (tmode))
13059 	  break;
13060 	if (have_insn_for (COMPARE, tmode))
13061 	  {
13062 	    int zero_extended;
13063 
13064 	    /* If this is a test for negative, we can make an explicit
13065 	       test of the sign bit.  Test this first so we can use
13066 	       a paradoxical subreg to extend OP0.  */
13067 
13068 	    if (op1 == const0_rtx && (code == LT || code == GE)
13069 		&& HWI_COMPUTABLE_MODE_P (mode))
13070 	      {
13071 		unsigned HOST_WIDE_INT sign
13072 		  = HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1);
13073 		op0 = simplify_gen_binary (AND, tmode,
13074 					   gen_lowpart (tmode, op0),
13075 					   gen_int_mode (sign, tmode));
13076 		code = (code == LT) ? NE : EQ;
13077 		break;
13078 	      }
13079 
13080 	    /* If the only nonzero bits in OP0 and OP1 are those in the
13081 	       narrower mode and this is an equality or unsigned comparison,
13082 	       we can use the wider mode.  Similarly for sign-extended
13083 	       values, in which case it is true for all comparisons.  */
13084 	    zero_extended = ((code == EQ || code == NE
13085 			      || code == GEU || code == GTU
13086 			      || code == LEU || code == LTU)
13087 			     && (nonzero_bits (op0, tmode)
13088 				 & ~GET_MODE_MASK (mode)) == 0
13089 			     && ((CONST_INT_P (op1)
13090 				  || (nonzero_bits (op1, tmode)
13091 				      & ~GET_MODE_MASK (mode)) == 0)));
13092 
13093 	    if (zero_extended
13094 		|| ((num_sign_bit_copies (op0, tmode)
13095 		     > (unsigned int) (GET_MODE_PRECISION (tmode)
13096 				       - GET_MODE_PRECISION (mode)))
13097 		    && (num_sign_bit_copies (op1, tmode)
13098 			> (unsigned int) (GET_MODE_PRECISION (tmode)
13099 					  - GET_MODE_PRECISION (mode)))))
13100 	      {
13101 		/* If OP0 is an AND and we don't have an AND in MODE either,
13102 		   make a new AND in the proper mode.  */
13103 		if (GET_CODE (op0) == AND
13104 		    && !have_insn_for (AND, mode))
13105 		  op0 = simplify_gen_binary (AND, tmode,
13106 					     gen_lowpart (tmode,
13107 							  XEXP (op0, 0)),
13108 					     gen_lowpart (tmode,
13109 							  XEXP (op0, 1)));
13110 		else
13111 		  {
13112 		    if (zero_extended)
13113 		      {
13114 			op0 = simplify_gen_unary (ZERO_EXTEND, tmode,
13115 						  op0, mode);
13116 			op1 = simplify_gen_unary (ZERO_EXTEND, tmode,
13117 						  op1, mode);
13118 		      }
13119 		    else
13120 		      {
13121 			op0 = simplify_gen_unary (SIGN_EXTEND, tmode,
13122 						  op0, mode);
13123 			op1 = simplify_gen_unary (SIGN_EXTEND, tmode,
13124 						  op1, mode);
13125 		      }
13126 		    break;
13127 		  }
13128 	      }
13129 	  }
13130       }
13131 
13132   /* We may have changed the comparison operands.  Re-canonicalize.  */
13133   if (swap_commutative_operands_p (op0, op1))
13134     {
13135       std::swap (op0, op1);
13136       code = swap_condition (code);
13137     }
13138 
13139   /* If this machine only supports a subset of valid comparisons, see if we
13140      can convert an unsupported one into a supported one.  */
13141   target_canonicalize_comparison (&code, &op0, &op1, 0);
13142 
13143   *pop0 = op0;
13144   *pop1 = op1;
13145 
13146   return code;
13147 }
13148 
13149 /* Utility function for record_value_for_reg.  Count number of
13150    rtxs in X.  */
13151 static int
count_rtxs(rtx x)13152 count_rtxs (rtx x)
13153 {
13154   enum rtx_code code = GET_CODE (x);
13155   const char *fmt;
13156   int i, j, ret = 1;
13157 
13158   if (GET_RTX_CLASS (code) == RTX_BIN_ARITH
13159       || GET_RTX_CLASS (code) == RTX_COMM_ARITH)
13160     {
13161       rtx x0 = XEXP (x, 0);
13162       rtx x1 = XEXP (x, 1);
13163 
13164       if (x0 == x1)
13165 	return 1 + 2 * count_rtxs (x0);
13166 
13167       if ((GET_RTX_CLASS (GET_CODE (x1)) == RTX_BIN_ARITH
13168 	   || GET_RTX_CLASS (GET_CODE (x1)) == RTX_COMM_ARITH)
13169 	  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13170 	return 2 + 2 * count_rtxs (x0)
13171 	       + count_rtxs (x == XEXP (x1, 0)
13172 			     ? XEXP (x1, 1) : XEXP (x1, 0));
13173 
13174       if ((GET_RTX_CLASS (GET_CODE (x0)) == RTX_BIN_ARITH
13175 	   || GET_RTX_CLASS (GET_CODE (x0)) == RTX_COMM_ARITH)
13176 	  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13177 	return 2 + 2 * count_rtxs (x1)
13178 	       + count_rtxs (x == XEXP (x0, 0)
13179 			     ? XEXP (x0, 1) : XEXP (x0, 0));
13180     }
13181 
13182   fmt = GET_RTX_FORMAT (code);
13183   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13184     if (fmt[i] == 'e')
13185       ret += count_rtxs (XEXP (x, i));
13186     else if (fmt[i] == 'E')
13187       for (j = 0; j < XVECLEN (x, i); j++)
13188 	ret += count_rtxs (XVECEXP (x, i, j));
13189 
13190   return ret;
13191 }
13192 
13193 /* Utility function for following routine.  Called when X is part of a value
13194    being stored into last_set_value.  Sets last_set_table_tick
13195    for each register mentioned.  Similar to mention_regs in cse.c  */
13196 
13197 static void
update_table_tick(rtx x)13198 update_table_tick (rtx x)
13199 {
13200   enum rtx_code code = GET_CODE (x);
13201   const char *fmt = GET_RTX_FORMAT (code);
13202   int i, j;
13203 
13204   if (code == REG)
13205     {
13206       unsigned int regno = REGNO (x);
13207       unsigned int endregno = END_REGNO (x);
13208       unsigned int r;
13209 
13210       for (r = regno; r < endregno; r++)
13211 	{
13212 	  reg_stat_type *rsp = &reg_stat[r];
13213 	  rsp->last_set_table_tick = label_tick;
13214 	}
13215 
13216       return;
13217     }
13218 
13219   for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
13220     if (fmt[i] == 'e')
13221       {
13222 	/* Check for identical subexpressions.  If x contains
13223 	   identical subexpression we only have to traverse one of
13224 	   them.  */
13225 	if (i == 0 && ARITHMETIC_P (x))
13226 	  {
13227 	    /* Note that at this point x1 has already been
13228 	       processed.  */
13229 	    rtx x0 = XEXP (x, 0);
13230 	    rtx x1 = XEXP (x, 1);
13231 
13232 	    /* If x0 and x1 are identical then there is no need to
13233 	       process x0.  */
13234 	    if (x0 == x1)
13235 	      break;
13236 
13237 	    /* If x0 is identical to a subexpression of x1 then while
13238 	       processing x1, x0 has already been processed.  Thus we
13239 	       are done with x.  */
13240 	    if (ARITHMETIC_P (x1)
13241 		&& (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13242 	      break;
13243 
13244 	    /* If x1 is identical to a subexpression of x0 then we
13245 	       still have to process the rest of x0.  */
13246 	    if (ARITHMETIC_P (x0)
13247 		&& (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13248 	      {
13249 		update_table_tick (XEXP (x0, x1 == XEXP (x0, 0) ? 1 : 0));
13250 		break;
13251 	      }
13252 	  }
13253 
13254 	update_table_tick (XEXP (x, i));
13255       }
13256     else if (fmt[i] == 'E')
13257       for (j = 0; j < XVECLEN (x, i); j++)
13258 	update_table_tick (XVECEXP (x, i, j));
13259 }
13260 
13261 /* Record that REG is set to VALUE in insn INSN.  If VALUE is zero, we
13262    are saying that the register is clobbered and we no longer know its
13263    value.  If INSN is zero, don't update reg_stat[].last_set; this is
13264    only permitted with VALUE also zero and is used to invalidate the
13265    register.  */
13266 
13267 static void
record_value_for_reg(rtx reg,rtx_insn * insn,rtx value)13268 record_value_for_reg (rtx reg, rtx_insn *insn, rtx value)
13269 {
13270   unsigned int regno = REGNO (reg);
13271   unsigned int endregno = END_REGNO (reg);
13272   unsigned int i;
13273   reg_stat_type *rsp;
13274 
13275   /* If VALUE contains REG and we have a previous value for REG, substitute
13276      the previous value.  */
13277   if (value && insn && reg_overlap_mentioned_p (reg, value))
13278     {
13279       rtx tem;
13280 
13281       /* Set things up so get_last_value is allowed to see anything set up to
13282 	 our insn.  */
13283       subst_low_luid = DF_INSN_LUID (insn);
13284       tem = get_last_value (reg);
13285 
13286       /* If TEM is simply a binary operation with two CLOBBERs as operands,
13287 	 it isn't going to be useful and will take a lot of time to process,
13288 	 so just use the CLOBBER.  */
13289 
13290       if (tem)
13291 	{
13292 	  if (ARITHMETIC_P (tem)
13293 	      && GET_CODE (XEXP (tem, 0)) == CLOBBER
13294 	      && GET_CODE (XEXP (tem, 1)) == CLOBBER)
13295 	    tem = XEXP (tem, 0);
13296 	  else if (count_occurrences (value, reg, 1) >= 2)
13297 	    {
13298 	      /* If there are two or more occurrences of REG in VALUE,
13299 		 prevent the value from growing too much.  */
13300 	      if (count_rtxs (tem) > MAX_LAST_VALUE_RTL)
13301 		tem = gen_rtx_CLOBBER (GET_MODE (tem), const0_rtx);
13302 	    }
13303 
13304 	  value = replace_rtx (copy_rtx (value), reg, tem);
13305 	}
13306     }
13307 
13308   /* For each register modified, show we don't know its value, that
13309      we don't know about its bitwise content, that its value has been
13310      updated, and that we don't know the location of the death of the
13311      register.  */
13312   for (i = regno; i < endregno; i++)
13313     {
13314       rsp = &reg_stat[i];
13315 
13316       if (insn)
13317 	rsp->last_set = insn;
13318 
13319       rsp->last_set_value = 0;
13320       rsp->last_set_mode = VOIDmode;
13321       rsp->last_set_nonzero_bits = 0;
13322       rsp->last_set_sign_bit_copies = 0;
13323       rsp->last_death = 0;
13324       rsp->truncated_to_mode = VOIDmode;
13325     }
13326 
13327   /* Mark registers that are being referenced in this value.  */
13328   if (value)
13329     update_table_tick (value);
13330 
13331   /* Now update the status of each register being set.
13332      If someone is using this register in this block, set this register
13333      to invalid since we will get confused between the two lives in this
13334      basic block.  This makes using this register always invalid.  In cse, we
13335      scan the table to invalidate all entries using this register, but this
13336      is too much work for us.  */
13337 
13338   for (i = regno; i < endregno; i++)
13339     {
13340       rsp = &reg_stat[i];
13341       rsp->last_set_label = label_tick;
13342       if (!insn
13343 	  || (value && rsp->last_set_table_tick >= label_tick_ebb_start))
13344 	rsp->last_set_invalid = 1;
13345       else
13346 	rsp->last_set_invalid = 0;
13347     }
13348 
13349   /* The value being assigned might refer to X (like in "x++;").  In that
13350      case, we must replace it with (clobber (const_int 0)) to prevent
13351      infinite loops.  */
13352   rsp = &reg_stat[regno];
13353   if (value && !get_last_value_validate (&value, insn, label_tick, 0))
13354     {
13355       value = copy_rtx (value);
13356       if (!get_last_value_validate (&value, insn, label_tick, 1))
13357 	value = 0;
13358     }
13359 
13360   /* For the main register being modified, update the value, the mode, the
13361      nonzero bits, and the number of sign bit copies.  */
13362 
13363   rsp->last_set_value = value;
13364 
13365   if (value)
13366     {
13367       machine_mode mode = GET_MODE (reg);
13368       subst_low_luid = DF_INSN_LUID (insn);
13369       rsp->last_set_mode = mode;
13370       if (GET_MODE_CLASS (mode) == MODE_INT
13371 	  && HWI_COMPUTABLE_MODE_P (mode))
13372 	mode = nonzero_bits_mode;
13373       rsp->last_set_nonzero_bits = nonzero_bits (value, mode);
13374       rsp->last_set_sign_bit_copies
13375 	= num_sign_bit_copies (value, GET_MODE (reg));
13376     }
13377 }
13378 
13379 /* Called via note_stores from record_dead_and_set_regs to handle one
13380    SET or CLOBBER in an insn.  DATA is the instruction in which the
13381    set is occurring.  */
13382 
13383 static void
record_dead_and_set_regs_1(rtx dest,const_rtx setter,void * data)13384 record_dead_and_set_regs_1 (rtx dest, const_rtx setter, void *data)
13385 {
13386   rtx_insn *record_dead_insn = (rtx_insn *) data;
13387 
13388   if (GET_CODE (dest) == SUBREG)
13389     dest = SUBREG_REG (dest);
13390 
13391   if (!record_dead_insn)
13392     {
13393       if (REG_P (dest))
13394 	record_value_for_reg (dest, NULL, NULL_RTX);
13395       return;
13396     }
13397 
13398   if (REG_P (dest))
13399     {
13400       /* If we are setting the whole register, we know its value.  Otherwise
13401 	 show that we don't know the value.  We can handle a SUBREG if it's
13402 	 the low part, but we must be careful with paradoxical SUBREGs on
13403 	 RISC architectures because we cannot strip e.g. an extension around
13404 	 a load and record the naked load since the RTL middle-end considers
13405 	 that the upper bits are defined according to LOAD_EXTEND_OP.  */
13406       if (GET_CODE (setter) == SET && dest == SET_DEST (setter))
13407 	record_value_for_reg (dest, record_dead_insn, SET_SRC (setter));
13408       else if (GET_CODE (setter) == SET
13409 	       && GET_CODE (SET_DEST (setter)) == SUBREG
13410 	       && SUBREG_REG (SET_DEST (setter)) == dest
13411 	       && known_le (GET_MODE_PRECISION (GET_MODE (dest)),
13412 			    BITS_PER_WORD)
13413 	       && subreg_lowpart_p (SET_DEST (setter)))
13414 	record_value_for_reg (dest, record_dead_insn,
13415 			      WORD_REGISTER_OPERATIONS
13416 			      && word_register_operation_p (SET_SRC (setter))
13417 			      && paradoxical_subreg_p (SET_DEST (setter))
13418 			      ? SET_SRC (setter)
13419 			      : gen_lowpart (GET_MODE (dest),
13420 					     SET_SRC (setter)));
13421       else if (GET_CODE (setter) == CLOBBER_HIGH)
13422 	{
13423 	  reg_stat_type *rsp = &reg_stat[REGNO (dest)];
13424 	  if (rsp->last_set_value
13425 	      && reg_is_clobbered_by_clobber_high
13426 		   (REGNO (dest), GET_MODE (rsp->last_set_value),
13427 		    XEXP (setter, 0)))
13428 	    record_value_for_reg (dest, NULL, NULL_RTX);
13429 	}
13430       else
13431 	record_value_for_reg (dest, record_dead_insn, NULL_RTX);
13432     }
13433   else if (MEM_P (dest)
13434 	   /* Ignore pushes, they clobber nothing.  */
13435 	   && ! push_operand (dest, GET_MODE (dest)))
13436     mem_last_set = DF_INSN_LUID (record_dead_insn);
13437 }
13438 
13439 /* Update the records of when each REG was most recently set or killed
13440    for the things done by INSN.  This is the last thing done in processing
13441    INSN in the combiner loop.
13442 
13443    We update reg_stat[], in particular fields last_set, last_set_value,
13444    last_set_mode, last_set_nonzero_bits, last_set_sign_bit_copies,
13445    last_death, and also the similar information mem_last_set (which insn
13446    most recently modified memory) and last_call_luid (which insn was the
13447    most recent subroutine call).  */
13448 
13449 static void
record_dead_and_set_regs(rtx_insn * insn)13450 record_dead_and_set_regs (rtx_insn *insn)
13451 {
13452   rtx link;
13453   unsigned int i;
13454 
13455   for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
13456     {
13457       if (REG_NOTE_KIND (link) == REG_DEAD
13458 	  && REG_P (XEXP (link, 0)))
13459 	{
13460 	  unsigned int regno = REGNO (XEXP (link, 0));
13461 	  unsigned int endregno = END_REGNO (XEXP (link, 0));
13462 
13463 	  for (i = regno; i < endregno; i++)
13464 	    {
13465 	      reg_stat_type *rsp;
13466 
13467 	      rsp = &reg_stat[i];
13468 	      rsp->last_death = insn;
13469 	    }
13470 	}
13471       else if (REG_NOTE_KIND (link) == REG_INC)
13472 	record_value_for_reg (XEXP (link, 0), insn, NULL_RTX);
13473     }
13474 
13475   if (CALL_P (insn))
13476     {
13477       hard_reg_set_iterator hrsi;
13478       EXECUTE_IF_SET_IN_HARD_REG_SET (regs_invalidated_by_call, 0, i, hrsi)
13479 	{
13480 	  reg_stat_type *rsp;
13481 
13482 	  rsp = &reg_stat[i];
13483 	  rsp->last_set_invalid = 1;
13484 	  rsp->last_set = insn;
13485 	  rsp->last_set_value = 0;
13486 	  rsp->last_set_mode = VOIDmode;
13487 	  rsp->last_set_nonzero_bits = 0;
13488 	  rsp->last_set_sign_bit_copies = 0;
13489 	  rsp->last_death = 0;
13490 	  rsp->truncated_to_mode = VOIDmode;
13491 	}
13492 
13493       last_call_luid = mem_last_set = DF_INSN_LUID (insn);
13494 
13495       /* We can't combine into a call pattern.  Remember, though, that
13496 	 the return value register is set at this LUID.  We could
13497 	 still replace a register with the return value from the
13498 	 wrong subroutine call!  */
13499       note_stores (PATTERN (insn), record_dead_and_set_regs_1, NULL_RTX);
13500     }
13501   else
13502     note_stores (PATTERN (insn), record_dead_and_set_regs_1, insn);
13503 }
13504 
13505 /* If a SUBREG has the promoted bit set, it is in fact a property of the
13506    register present in the SUBREG, so for each such SUBREG go back and
13507    adjust nonzero and sign bit information of the registers that are
13508    known to have some zero/sign bits set.
13509 
13510    This is needed because when combine blows the SUBREGs away, the
13511    information on zero/sign bits is lost and further combines can be
13512    missed because of that.  */
13513 
13514 static void
record_promoted_value(rtx_insn * insn,rtx subreg)13515 record_promoted_value (rtx_insn *insn, rtx subreg)
13516 {
13517   struct insn_link *links;
13518   rtx set;
13519   unsigned int regno = REGNO (SUBREG_REG (subreg));
13520   machine_mode mode = GET_MODE (subreg);
13521 
13522   if (!HWI_COMPUTABLE_MODE_P (mode))
13523     return;
13524 
13525   for (links = LOG_LINKS (insn); links;)
13526     {
13527       reg_stat_type *rsp;
13528 
13529       insn = links->insn;
13530       set = single_set (insn);
13531 
13532       if (! set || !REG_P (SET_DEST (set))
13533 	  || REGNO (SET_DEST (set)) != regno
13534 	  || GET_MODE (SET_DEST (set)) != GET_MODE (SUBREG_REG (subreg)))
13535 	{
13536 	  links = links->next;
13537 	  continue;
13538 	}
13539 
13540       rsp = &reg_stat[regno];
13541       if (rsp->last_set == insn)
13542 	{
13543 	  if (SUBREG_PROMOTED_UNSIGNED_P (subreg))
13544 	    rsp->last_set_nonzero_bits &= GET_MODE_MASK (mode);
13545 	}
13546 
13547       if (REG_P (SET_SRC (set)))
13548 	{
13549 	  regno = REGNO (SET_SRC (set));
13550 	  links = LOG_LINKS (insn);
13551 	}
13552       else
13553 	break;
13554     }
13555 }
13556 
13557 /* Check if X, a register, is known to contain a value already
13558    truncated to MODE.  In this case we can use a subreg to refer to
13559    the truncated value even though in the generic case we would need
13560    an explicit truncation.  */
13561 
13562 static bool
reg_truncated_to_mode(machine_mode mode,const_rtx x)13563 reg_truncated_to_mode (machine_mode mode, const_rtx x)
13564 {
13565   reg_stat_type *rsp = &reg_stat[REGNO (x)];
13566   machine_mode truncated = rsp->truncated_to_mode;
13567 
13568   if (truncated == 0
13569       || rsp->truncation_label < label_tick_ebb_start)
13570     return false;
13571   if (!partial_subreg_p (mode, truncated))
13572     return true;
13573   if (TRULY_NOOP_TRUNCATION_MODES_P (mode, truncated))
13574     return true;
13575   return false;
13576 }
13577 
13578 /* If X is a hard reg or a subreg record the mode that the register is
13579    accessed in.  For non-TARGET_TRULY_NOOP_TRUNCATION targets we might be
13580    able to turn a truncate into a subreg using this information.  Return true
13581    if traversing X is complete.  */
13582 
13583 static bool
record_truncated_value(rtx x)13584 record_truncated_value (rtx x)
13585 {
13586   machine_mode truncated_mode;
13587   reg_stat_type *rsp;
13588 
13589   if (GET_CODE (x) == SUBREG && REG_P (SUBREG_REG (x)))
13590     {
13591       machine_mode original_mode = GET_MODE (SUBREG_REG (x));
13592       truncated_mode = GET_MODE (x);
13593 
13594       if (!partial_subreg_p (truncated_mode, original_mode))
13595 	return true;
13596 
13597       truncated_mode = GET_MODE (x);
13598       if (TRULY_NOOP_TRUNCATION_MODES_P (truncated_mode, original_mode))
13599 	return true;
13600 
13601       x = SUBREG_REG (x);
13602     }
13603   /* ??? For hard-regs we now record everything.  We might be able to
13604      optimize this using last_set_mode.  */
13605   else if (REG_P (x) && REGNO (x) < FIRST_PSEUDO_REGISTER)
13606     truncated_mode = GET_MODE (x);
13607   else
13608     return false;
13609 
13610   rsp = &reg_stat[REGNO (x)];
13611   if (rsp->truncated_to_mode == 0
13612       || rsp->truncation_label < label_tick_ebb_start
13613       || partial_subreg_p (truncated_mode, rsp->truncated_to_mode))
13614     {
13615       rsp->truncated_to_mode = truncated_mode;
13616       rsp->truncation_label = label_tick;
13617     }
13618 
13619   return true;
13620 }
13621 
13622 /* Callback for note_uses.  Find hardregs and subregs of pseudos and
13623    the modes they are used in.  This can help truning TRUNCATEs into
13624    SUBREGs.  */
13625 
13626 static void
record_truncated_values(rtx * loc,void * data ATTRIBUTE_UNUSED)13627 record_truncated_values (rtx *loc, void *data ATTRIBUTE_UNUSED)
13628 {
13629   subrtx_var_iterator::array_type array;
13630   FOR_EACH_SUBRTX_VAR (iter, array, *loc, NONCONST)
13631     if (record_truncated_value (*iter))
13632       iter.skip_subrtxes ();
13633 }
13634 
13635 /* Scan X for promoted SUBREGs.  For each one found,
13636    note what it implies to the registers used in it.  */
13637 
13638 static void
check_promoted_subreg(rtx_insn * insn,rtx x)13639 check_promoted_subreg (rtx_insn *insn, rtx x)
13640 {
13641   if (GET_CODE (x) == SUBREG
13642       && SUBREG_PROMOTED_VAR_P (x)
13643       && REG_P (SUBREG_REG (x)))
13644     record_promoted_value (insn, x);
13645   else
13646     {
13647       const char *format = GET_RTX_FORMAT (GET_CODE (x));
13648       int i, j;
13649 
13650       for (i = 0; i < GET_RTX_LENGTH (GET_CODE (x)); i++)
13651 	switch (format[i])
13652 	  {
13653 	  case 'e':
13654 	    check_promoted_subreg (insn, XEXP (x, i));
13655 	    break;
13656 	  case 'V':
13657 	  case 'E':
13658 	    if (XVEC (x, i) != 0)
13659 	      for (j = 0; j < XVECLEN (x, i); j++)
13660 		check_promoted_subreg (insn, XVECEXP (x, i, j));
13661 	    break;
13662 	  }
13663     }
13664 }
13665 
13666 /* Verify that all the registers and memory references mentioned in *LOC are
13667    still valid.  *LOC was part of a value set in INSN when label_tick was
13668    equal to TICK.  Return 0 if some are not.  If REPLACE is nonzero, replace
13669    the invalid references with (clobber (const_int 0)) and return 1.  This
13670    replacement is useful because we often can get useful information about
13671    the form of a value (e.g., if it was produced by a shift that always
13672    produces -1 or 0) even though we don't know exactly what registers it
13673    was produced from.  */
13674 
13675 static int
get_last_value_validate(rtx * loc,rtx_insn * insn,int tick,int replace)13676 get_last_value_validate (rtx *loc, rtx_insn *insn, int tick, int replace)
13677 {
13678   rtx x = *loc;
13679   const char *fmt = GET_RTX_FORMAT (GET_CODE (x));
13680   int len = GET_RTX_LENGTH (GET_CODE (x));
13681   int i, j;
13682 
13683   if (REG_P (x))
13684     {
13685       unsigned int regno = REGNO (x);
13686       unsigned int endregno = END_REGNO (x);
13687       unsigned int j;
13688 
13689       for (j = regno; j < endregno; j++)
13690 	{
13691 	  reg_stat_type *rsp = &reg_stat[j];
13692 	  if (rsp->last_set_invalid
13693 	      /* If this is a pseudo-register that was only set once and not
13694 		 live at the beginning of the function, it is always valid.  */
13695 	      || (! (regno >= FIRST_PSEUDO_REGISTER
13696 		     && regno < reg_n_sets_max
13697 		     && REG_N_SETS (regno) == 1
13698 		     && (!REGNO_REG_SET_P
13699 			 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb),
13700 			  regno)))
13701 		  && rsp->last_set_label > tick))
13702 	  {
13703 	    if (replace)
13704 	      *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13705 	    return replace;
13706 	  }
13707 	}
13708 
13709       return 1;
13710     }
13711   /* If this is a memory reference, make sure that there were no stores after
13712      it that might have clobbered the value.  We don't have alias info, so we
13713      assume any store invalidates it.  Moreover, we only have local UIDs, so
13714      we also assume that there were stores in the intervening basic blocks.  */
13715   else if (MEM_P (x) && !MEM_READONLY_P (x)
13716 	   && (tick != label_tick || DF_INSN_LUID (insn) <= mem_last_set))
13717     {
13718       if (replace)
13719 	*loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
13720       return replace;
13721     }
13722 
13723   for (i = 0; i < len; i++)
13724     {
13725       if (fmt[i] == 'e')
13726 	{
13727 	  /* Check for identical subexpressions.  If x contains
13728 	     identical subexpression we only have to traverse one of
13729 	     them.  */
13730 	  if (i == 1 && ARITHMETIC_P (x))
13731 	    {
13732 	      /* Note that at this point x0 has already been checked
13733 		 and found valid.  */
13734 	      rtx x0 = XEXP (x, 0);
13735 	      rtx x1 = XEXP (x, 1);
13736 
13737 	      /* If x0 and x1 are identical then x is also valid.  */
13738 	      if (x0 == x1)
13739 		return 1;
13740 
13741 	      /* If x1 is identical to a subexpression of x0 then
13742 		 while checking x0, x1 has already been checked.  Thus
13743 		 it is valid and so as x.  */
13744 	      if (ARITHMETIC_P (x0)
13745 		  && (x1 == XEXP (x0, 0) || x1 == XEXP (x0, 1)))
13746 		return 1;
13747 
13748 	      /* If x0 is identical to a subexpression of x1 then x is
13749 		 valid iff the rest of x1 is valid.  */
13750 	      if (ARITHMETIC_P (x1)
13751 		  && (x0 == XEXP (x1, 0) || x0 == XEXP (x1, 1)))
13752 		return
13753 		  get_last_value_validate (&XEXP (x1,
13754 						  x0 == XEXP (x1, 0) ? 1 : 0),
13755 					   insn, tick, replace);
13756 	    }
13757 
13758 	  if (get_last_value_validate (&XEXP (x, i), insn, tick,
13759 				       replace) == 0)
13760 	    return 0;
13761 	}
13762       else if (fmt[i] == 'E')
13763 	for (j = 0; j < XVECLEN (x, i); j++)
13764 	  if (get_last_value_validate (&XVECEXP (x, i, j),
13765 				       insn, tick, replace) == 0)
13766 	    return 0;
13767     }
13768 
13769   /* If we haven't found a reason for it to be invalid, it is valid.  */
13770   return 1;
13771 }
13772 
13773 /* Get the last value assigned to X, if known.  Some registers
13774    in the value may be replaced with (clobber (const_int 0)) if their value
13775    is known longer known reliably.  */
13776 
13777 static rtx
get_last_value(const_rtx x)13778 get_last_value (const_rtx x)
13779 {
13780   unsigned int regno;
13781   rtx value;
13782   reg_stat_type *rsp;
13783 
13784   /* If this is a non-paradoxical SUBREG, get the value of its operand and
13785      then convert it to the desired mode.  If this is a paradoxical SUBREG,
13786      we cannot predict what values the "extra" bits might have.  */
13787   if (GET_CODE (x) == SUBREG
13788       && subreg_lowpart_p (x)
13789       && !paradoxical_subreg_p (x)
13790       && (value = get_last_value (SUBREG_REG (x))) != 0)
13791     return gen_lowpart (GET_MODE (x), value);
13792 
13793   if (!REG_P (x))
13794     return 0;
13795 
13796   regno = REGNO (x);
13797   rsp = &reg_stat[regno];
13798   value = rsp->last_set_value;
13799 
13800   /* If we don't have a value, or if it isn't for this basic block and
13801      it's either a hard register, set more than once, or it's a live
13802      at the beginning of the function, return 0.
13803 
13804      Because if it's not live at the beginning of the function then the reg
13805      is always set before being used (is never used without being set).
13806      And, if it's set only once, and it's always set before use, then all
13807      uses must have the same last value, even if it's not from this basic
13808      block.  */
13809 
13810   if (value == 0
13811       || (rsp->last_set_label < label_tick_ebb_start
13812 	  && (regno < FIRST_PSEUDO_REGISTER
13813 	      || regno >= reg_n_sets_max
13814 	      || REG_N_SETS (regno) != 1
13815 	      || REGNO_REG_SET_P
13816 		 (DF_LR_IN (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb), regno))))
13817     return 0;
13818 
13819   /* If the value was set in a later insn than the ones we are processing,
13820      we can't use it even if the register was only set once.  */
13821   if (rsp->last_set_label == label_tick
13822       && DF_INSN_LUID (rsp->last_set) >= subst_low_luid)
13823     return 0;
13824 
13825   /* If fewer bits were set than what we are asked for now, we cannot use
13826      the value.  */
13827   if (maybe_lt (GET_MODE_PRECISION (rsp->last_set_mode),
13828 		GET_MODE_PRECISION (GET_MODE (x))))
13829     return 0;
13830 
13831   /* If the value has all its registers valid, return it.  */
13832   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 0))
13833     return value;
13834 
13835   /* Otherwise, make a copy and replace any invalid register with
13836      (clobber (const_int 0)).  If that fails for some reason, return 0.  */
13837 
13838   value = copy_rtx (value);
13839   if (get_last_value_validate (&value, rsp->last_set, rsp->last_set_label, 1))
13840     return value;
13841 
13842   return 0;
13843 }
13844 
13845 /* Define three variables used for communication between the following
13846    routines.  */
13847 
13848 static unsigned int reg_dead_regno, reg_dead_endregno;
13849 static int reg_dead_flag;
13850 rtx reg_dead_reg;
13851 
13852 /* Function called via note_stores from reg_dead_at_p.
13853 
13854    If DEST is within [reg_dead_regno, reg_dead_endregno), set
13855    reg_dead_flag to 1 if X is a CLOBBER and to -1 it is a SET.  */
13856 
13857 static void
reg_dead_at_p_1(rtx dest,const_rtx x,void * data ATTRIBUTE_UNUSED)13858 reg_dead_at_p_1 (rtx dest, const_rtx x, void *data ATTRIBUTE_UNUSED)
13859 {
13860   unsigned int regno, endregno;
13861 
13862   if (!REG_P (dest))
13863     return;
13864 
13865   if (GET_CODE (x) == CLOBBER_HIGH
13866       && !reg_is_clobbered_by_clobber_high (reg_dead_reg, XEXP (x, 0)))
13867     return;
13868 
13869   regno = REGNO (dest);
13870   endregno = END_REGNO (dest);
13871   if (reg_dead_endregno > regno && reg_dead_regno < endregno)
13872     reg_dead_flag = (GET_CODE (x) == CLOBBER) ? 1 : -1;
13873 }
13874 
13875 /* Return nonzero if REG is known to be dead at INSN.
13876 
13877    We scan backwards from INSN.  If we hit a REG_DEAD note or a CLOBBER
13878    referencing REG, it is dead.  If we hit a SET referencing REG, it is
13879    live.  Otherwise, see if it is live or dead at the start of the basic
13880    block we are in.  Hard regs marked as being live in NEWPAT_USED_REGS
13881    must be assumed to be always live.  */
13882 
13883 static int
reg_dead_at_p(rtx reg,rtx_insn * insn)13884 reg_dead_at_p (rtx reg, rtx_insn *insn)
13885 {
13886   basic_block block;
13887   unsigned int i;
13888 
13889   /* Set variables for reg_dead_at_p_1.  */
13890   reg_dead_regno = REGNO (reg);
13891   reg_dead_endregno = END_REGNO (reg);
13892   reg_dead_reg = reg;
13893 
13894   reg_dead_flag = 0;
13895 
13896   /* Check that reg isn't mentioned in NEWPAT_USED_REGS.  For fixed registers
13897      we allow the machine description to decide whether use-and-clobber
13898      patterns are OK.  */
13899   if (reg_dead_regno < FIRST_PSEUDO_REGISTER)
13900     {
13901       for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13902 	if (!fixed_regs[i] && TEST_HARD_REG_BIT (newpat_used_regs, i))
13903 	  return 0;
13904     }
13905 
13906   /* Scan backwards until we find a REG_DEAD note, SET, CLOBBER, or
13907      beginning of basic block.  */
13908   block = BLOCK_FOR_INSN (insn);
13909   for (;;)
13910     {
13911       if (INSN_P (insn))
13912         {
13913 	  if (find_regno_note (insn, REG_UNUSED, reg_dead_regno))
13914 	    return 1;
13915 
13916 	  note_stores (PATTERN (insn), reg_dead_at_p_1, NULL);
13917 	  if (reg_dead_flag)
13918 	    return reg_dead_flag == 1 ? 1 : 0;
13919 
13920 	  if (find_regno_note (insn, REG_DEAD, reg_dead_regno))
13921 	    return 1;
13922         }
13923 
13924       if (insn == BB_HEAD (block))
13925 	break;
13926 
13927       insn = PREV_INSN (insn);
13928     }
13929 
13930   /* Look at live-in sets for the basic block that we were in.  */
13931   for (i = reg_dead_regno; i < reg_dead_endregno; i++)
13932     if (REGNO_REG_SET_P (df_get_live_in (block), i))
13933       return 0;
13934 
13935   return 1;
13936 }
13937 
13938 /* Note hard registers in X that are used.  */
13939 
13940 static void
mark_used_regs_combine(rtx x)13941 mark_used_regs_combine (rtx x)
13942 {
13943   RTX_CODE code = GET_CODE (x);
13944   unsigned int regno;
13945   int i;
13946 
13947   switch (code)
13948     {
13949     case LABEL_REF:
13950     case SYMBOL_REF:
13951     case CONST:
13952     CASE_CONST_ANY:
13953     case PC:
13954     case ADDR_VEC:
13955     case ADDR_DIFF_VEC:
13956     case ASM_INPUT:
13957     /* CC0 must die in the insn after it is set, so we don't need to take
13958        special note of it here.  */
13959     case CC0:
13960       return;
13961 
13962     case CLOBBER:
13963       /* If we are clobbering a MEM, mark any hard registers inside the
13964 	 address as used.  */
13965       if (MEM_P (XEXP (x, 0)))
13966 	mark_used_regs_combine (XEXP (XEXP (x, 0), 0));
13967       return;
13968 
13969     case REG:
13970       regno = REGNO (x);
13971       /* A hard reg in a wide mode may really be multiple registers.
13972 	 If so, mark all of them just like the first.  */
13973       if (regno < FIRST_PSEUDO_REGISTER)
13974 	{
13975 	  /* None of this applies to the stack, frame or arg pointers.  */
13976 	  if (regno == STACK_POINTER_REGNUM
13977 	      || (!HARD_FRAME_POINTER_IS_FRAME_POINTER
13978 		  && regno == HARD_FRAME_POINTER_REGNUM)
13979 	      || (FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
13980 		  && regno == ARG_POINTER_REGNUM && fixed_regs[regno])
13981 	      || regno == FRAME_POINTER_REGNUM)
13982 	    return;
13983 
13984 	  add_to_hard_reg_set (&newpat_used_regs, GET_MODE (x), regno);
13985 	}
13986       return;
13987 
13988     case SET:
13989       {
13990 	/* If setting a MEM, or a SUBREG of a MEM, then note any hard regs in
13991 	   the address.  */
13992 	rtx testreg = SET_DEST (x);
13993 
13994 	while (GET_CODE (testreg) == SUBREG
13995 	       || GET_CODE (testreg) == ZERO_EXTRACT
13996 	       || GET_CODE (testreg) == STRICT_LOW_PART)
13997 	  testreg = XEXP (testreg, 0);
13998 
13999 	if (MEM_P (testreg))
14000 	  mark_used_regs_combine (XEXP (testreg, 0));
14001 
14002 	mark_used_regs_combine (SET_SRC (x));
14003       }
14004       return;
14005 
14006     default:
14007       break;
14008     }
14009 
14010   /* Recursively scan the operands of this expression.  */
14011 
14012   {
14013     const char *fmt = GET_RTX_FORMAT (code);
14014 
14015     for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
14016       {
14017 	if (fmt[i] == 'e')
14018 	  mark_used_regs_combine (XEXP (x, i));
14019 	else if (fmt[i] == 'E')
14020 	  {
14021 	    int j;
14022 
14023 	    for (j = 0; j < XVECLEN (x, i); j++)
14024 	      mark_used_regs_combine (XVECEXP (x, i, j));
14025 	  }
14026       }
14027   }
14028 }
14029 
14030 /* Remove register number REGNO from the dead registers list of INSN.
14031 
14032    Return the note used to record the death, if there was one.  */
14033 
14034 rtx
remove_death(unsigned int regno,rtx_insn * insn)14035 remove_death (unsigned int regno, rtx_insn *insn)
14036 {
14037   rtx note = find_regno_note (insn, REG_DEAD, regno);
14038 
14039   if (note)
14040     remove_note (insn, note);
14041 
14042   return note;
14043 }
14044 
14045 /* For each register (hardware or pseudo) used within expression X, if its
14046    death is in an instruction with luid between FROM_LUID (inclusive) and
14047    TO_INSN (exclusive), put a REG_DEAD note for that register in the
14048    list headed by PNOTES.
14049 
14050    That said, don't move registers killed by maybe_kill_insn.
14051 
14052    This is done when X is being merged by combination into TO_INSN.  These
14053    notes will then be distributed as needed.  */
14054 
14055 static void
move_deaths(rtx x,rtx maybe_kill_insn,int from_luid,rtx_insn * to_insn,rtx * pnotes)14056 move_deaths (rtx x, rtx maybe_kill_insn, int from_luid, rtx_insn *to_insn,
14057 	     rtx *pnotes)
14058 {
14059   const char *fmt;
14060   int len, i;
14061   enum rtx_code code = GET_CODE (x);
14062 
14063   if (code == REG)
14064     {
14065       unsigned int regno = REGNO (x);
14066       rtx_insn *where_dead = reg_stat[regno].last_death;
14067 
14068       /* If we do not know where the register died, it may still die between
14069 	 FROM_LUID and TO_INSN.  If so, find it.  This is PR83304.  */
14070       if (!where_dead || DF_INSN_LUID (where_dead) >= DF_INSN_LUID (to_insn))
14071 	{
14072 	  rtx_insn *insn = prev_real_nondebug_insn (to_insn);
14073 	  while (insn
14074 		 && BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (to_insn)
14075 		 && DF_INSN_LUID (insn) >= from_luid)
14076 	    {
14077 	      if (dead_or_set_regno_p (insn, regno))
14078 		{
14079 		  if (find_regno_note (insn, REG_DEAD, regno))
14080 		    where_dead = insn;
14081 		  break;
14082 		}
14083 
14084 	      insn = prev_real_nondebug_insn (insn);
14085 	    }
14086 	}
14087 
14088       /* Don't move the register if it gets killed in between from and to.  */
14089       if (maybe_kill_insn && reg_set_p (x, maybe_kill_insn)
14090 	  && ! reg_referenced_p (x, maybe_kill_insn))
14091 	return;
14092 
14093       if (where_dead
14094 	  && BLOCK_FOR_INSN (where_dead) == BLOCK_FOR_INSN (to_insn)
14095 	  && DF_INSN_LUID (where_dead) >= from_luid
14096 	  && DF_INSN_LUID (where_dead) < DF_INSN_LUID (to_insn))
14097 	{
14098 	  rtx note = remove_death (regno, where_dead);
14099 
14100 	  /* It is possible for the call above to return 0.  This can occur
14101 	     when last_death points to I2 or I1 that we combined with.
14102 	     In that case make a new note.
14103 
14104 	     We must also check for the case where X is a hard register
14105 	     and NOTE is a death note for a range of hard registers
14106 	     including X.  In that case, we must put REG_DEAD notes for
14107 	     the remaining registers in place of NOTE.  */
14108 
14109 	  if (note != 0 && regno < FIRST_PSEUDO_REGISTER
14110 	      && partial_subreg_p (GET_MODE (x), GET_MODE (XEXP (note, 0))))
14111 	    {
14112 	      unsigned int deadregno = REGNO (XEXP (note, 0));
14113 	      unsigned int deadend = END_REGNO (XEXP (note, 0));
14114 	      unsigned int ourend = END_REGNO (x);
14115 	      unsigned int i;
14116 
14117 	      for (i = deadregno; i < deadend; i++)
14118 		if (i < regno || i >= ourend)
14119 		  add_reg_note (where_dead, REG_DEAD, regno_reg_rtx[i]);
14120 	    }
14121 
14122 	  /* If we didn't find any note, or if we found a REG_DEAD note that
14123 	     covers only part of the given reg, and we have a multi-reg hard
14124 	     register, then to be safe we must check for REG_DEAD notes
14125 	     for each register other than the first.  They could have
14126 	     their own REG_DEAD notes lying around.  */
14127 	  else if ((note == 0
14128 		    || (note != 0
14129 			&& partial_subreg_p (GET_MODE (XEXP (note, 0)),
14130 					     GET_MODE (x))))
14131 		   && regno < FIRST_PSEUDO_REGISTER
14132 		   && REG_NREGS (x) > 1)
14133 	    {
14134 	      unsigned int ourend = END_REGNO (x);
14135 	      unsigned int i, offset;
14136 	      rtx oldnotes = 0;
14137 
14138 	      if (note)
14139 		offset = hard_regno_nregs (regno, GET_MODE (XEXP (note, 0)));
14140 	      else
14141 		offset = 1;
14142 
14143 	      for (i = regno + offset; i < ourend; i++)
14144 		move_deaths (regno_reg_rtx[i],
14145 			     maybe_kill_insn, from_luid, to_insn, &oldnotes);
14146 	    }
14147 
14148 	  if (note != 0 && GET_MODE (XEXP (note, 0)) == GET_MODE (x))
14149 	    {
14150 	      XEXP (note, 1) = *pnotes;
14151 	      *pnotes = note;
14152 	    }
14153 	  else
14154 	    *pnotes = alloc_reg_note (REG_DEAD, x, *pnotes);
14155 	}
14156 
14157       return;
14158     }
14159 
14160   else if (GET_CODE (x) == SET)
14161     {
14162       rtx dest = SET_DEST (x);
14163 
14164       move_deaths (SET_SRC (x), maybe_kill_insn, from_luid, to_insn, pnotes);
14165 
14166       /* In the case of a ZERO_EXTRACT, a STRICT_LOW_PART, or a SUBREG
14167 	 that accesses one word of a multi-word item, some
14168 	 piece of everything register in the expression is used by
14169 	 this insn, so remove any old death.  */
14170       /* ??? So why do we test for equality of the sizes?  */
14171 
14172       if (GET_CODE (dest) == ZERO_EXTRACT
14173 	  || GET_CODE (dest) == STRICT_LOW_PART
14174 	  || (GET_CODE (dest) == SUBREG
14175 	      && !read_modify_subreg_p (dest)))
14176 	{
14177 	  move_deaths (dest, maybe_kill_insn, from_luid, to_insn, pnotes);
14178 	  return;
14179 	}
14180 
14181       /* If this is some other SUBREG, we know it replaces the entire
14182 	 value, so use that as the destination.  */
14183       if (GET_CODE (dest) == SUBREG)
14184 	dest = SUBREG_REG (dest);
14185 
14186       /* If this is a MEM, adjust deaths of anything used in the address.
14187 	 For a REG (the only other possibility), the entire value is
14188 	 being replaced so the old value is not used in this insn.  */
14189 
14190       if (MEM_P (dest))
14191 	move_deaths (XEXP (dest, 0), maybe_kill_insn, from_luid,
14192 		     to_insn, pnotes);
14193       return;
14194     }
14195 
14196   else if (GET_CODE (x) == CLOBBER)
14197     return;
14198 
14199   len = GET_RTX_LENGTH (code);
14200   fmt = GET_RTX_FORMAT (code);
14201 
14202   for (i = 0; i < len; i++)
14203     {
14204       if (fmt[i] == 'E')
14205 	{
14206 	  int j;
14207 	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
14208 	    move_deaths (XVECEXP (x, i, j), maybe_kill_insn, from_luid,
14209 			 to_insn, pnotes);
14210 	}
14211       else if (fmt[i] == 'e')
14212 	move_deaths (XEXP (x, i), maybe_kill_insn, from_luid, to_insn, pnotes);
14213     }
14214 }
14215 
14216 /* Return 1 if X is the target of a bit-field assignment in BODY, the
14217    pattern of an insn.  X must be a REG.  */
14218 
14219 static int
reg_bitfield_target_p(rtx x,rtx body)14220 reg_bitfield_target_p (rtx x, rtx body)
14221 {
14222   int i;
14223 
14224   if (GET_CODE (body) == SET)
14225     {
14226       rtx dest = SET_DEST (body);
14227       rtx target;
14228       unsigned int regno, tregno, endregno, endtregno;
14229 
14230       if (GET_CODE (dest) == ZERO_EXTRACT)
14231 	target = XEXP (dest, 0);
14232       else if (GET_CODE (dest) == STRICT_LOW_PART)
14233 	target = SUBREG_REG (XEXP (dest, 0));
14234       else
14235 	return 0;
14236 
14237       if (GET_CODE (target) == SUBREG)
14238 	target = SUBREG_REG (target);
14239 
14240       if (!REG_P (target))
14241 	return 0;
14242 
14243       tregno = REGNO (target), regno = REGNO (x);
14244       if (tregno >= FIRST_PSEUDO_REGISTER || regno >= FIRST_PSEUDO_REGISTER)
14245 	return target == x;
14246 
14247       endtregno = end_hard_regno (GET_MODE (target), tregno);
14248       endregno = end_hard_regno (GET_MODE (x), regno);
14249 
14250       return endregno > tregno && regno < endtregno;
14251     }
14252 
14253   else if (GET_CODE (body) == PARALLEL)
14254     for (i = XVECLEN (body, 0) - 1; i >= 0; i--)
14255       if (reg_bitfield_target_p (x, XVECEXP (body, 0, i)))
14256 	return 1;
14257 
14258   return 0;
14259 }
14260 
14261 /* Given a chain of REG_NOTES originally from FROM_INSN, try to place them
14262    as appropriate.  I3 and I2 are the insns resulting from the combination
14263    insns including FROM (I2 may be zero).
14264 
14265    ELIM_I2 and ELIM_I1 are either zero or registers that we know will
14266    not need REG_DEAD notes because they are being substituted for.  This
14267    saves searching in the most common cases.
14268 
14269    Each note in the list is either ignored or placed on some insns, depending
14270    on the type of note.  */
14271 
14272 static void
distribute_notes(rtx notes,rtx_insn * from_insn,rtx_insn * i3,rtx_insn * i2,rtx elim_i2,rtx elim_i1,rtx elim_i0)14273 distribute_notes (rtx notes, rtx_insn *from_insn, rtx_insn *i3, rtx_insn *i2,
14274 		  rtx elim_i2, rtx elim_i1, rtx elim_i0)
14275 {
14276   rtx note, next_note;
14277   rtx tem_note;
14278   rtx_insn *tem_insn;
14279 
14280   for (note = notes; note; note = next_note)
14281     {
14282       rtx_insn *place = 0, *place2 = 0;
14283 
14284       next_note = XEXP (note, 1);
14285       switch (REG_NOTE_KIND (note))
14286 	{
14287 	case REG_BR_PROB:
14288 	case REG_BR_PRED:
14289 	  /* Doesn't matter much where we put this, as long as it's somewhere.
14290 	     It is preferable to keep these notes on branches, which is most
14291 	     likely to be i3.  */
14292 	  place = i3;
14293 	  break;
14294 
14295 	case REG_NON_LOCAL_GOTO:
14296 	  if (JUMP_P (i3))
14297 	    place = i3;
14298 	  else
14299 	    {
14300 	      gcc_assert (i2 && JUMP_P (i2));
14301 	      place = i2;
14302 	    }
14303 	  break;
14304 
14305 	case REG_EH_REGION:
14306 	  /* These notes must remain with the call or trapping instruction.  */
14307 	  if (CALL_P (i3))
14308 	    place = i3;
14309 	  else if (i2 && CALL_P (i2))
14310 	    place = i2;
14311 	  else
14312 	    {
14313 	      gcc_assert (cfun->can_throw_non_call_exceptions);
14314 	      if (may_trap_p (i3))
14315 		place = i3;
14316 	      else if (i2 && may_trap_p (i2))
14317 		place = i2;
14318 	      /* ??? Otherwise assume we've combined things such that we
14319 		 can now prove that the instructions can't trap.  Drop the
14320 		 note in this case.  */
14321 	    }
14322 	  break;
14323 
14324 	case REG_ARGS_SIZE:
14325 	  /* ??? How to distribute between i3-i1.  Assume i3 contains the
14326 	     entire adjustment.  Assert i3 contains at least some adjust.  */
14327 	  if (!noop_move_p (i3))
14328 	    {
14329 	      poly_int64 old_size, args_size = get_args_size (note);
14330 	      /* fixup_args_size_notes looks at REG_NORETURN note,
14331 		 so ensure the note is placed there first.  */
14332 	      if (CALL_P (i3))
14333 		{
14334 		  rtx *np;
14335 		  for (np = &next_note; *np; np = &XEXP (*np, 1))
14336 		    if (REG_NOTE_KIND (*np) == REG_NORETURN)
14337 		      {
14338 			rtx n = *np;
14339 			*np = XEXP (n, 1);
14340 			XEXP (n, 1) = REG_NOTES (i3);
14341 			REG_NOTES (i3) = n;
14342 			break;
14343 		      }
14344 		}
14345 	      old_size = fixup_args_size_notes (PREV_INSN (i3), i3, args_size);
14346 	      /* emit_call_1 adds for !ACCUMULATE_OUTGOING_ARGS
14347 		 REG_ARGS_SIZE note to all noreturn calls, allow that here.  */
14348 	      gcc_assert (maybe_ne (old_size, args_size)
14349 			  || (CALL_P (i3)
14350 			      && !ACCUMULATE_OUTGOING_ARGS
14351 			      && find_reg_note (i3, REG_NORETURN, NULL_RTX)));
14352 	    }
14353 	  break;
14354 
14355 	case REG_NORETURN:
14356 	case REG_SETJMP:
14357 	case REG_TM:
14358 	case REG_CALL_DECL:
14359 	case REG_CALL_NOCF_CHECK:
14360 	  /* These notes must remain with the call.  It should not be
14361 	     possible for both I2 and I3 to be a call.  */
14362 	  if (CALL_P (i3))
14363 	    place = i3;
14364 	  else
14365 	    {
14366 	      gcc_assert (i2 && CALL_P (i2));
14367 	      place = i2;
14368 	    }
14369 	  break;
14370 
14371 	case REG_UNUSED:
14372 	  /* Any clobbers for i3 may still exist, and so we must process
14373 	     REG_UNUSED notes from that insn.
14374 
14375 	     Any clobbers from i2 or i1 can only exist if they were added by
14376 	     recog_for_combine.  In that case, recog_for_combine created the
14377 	     necessary REG_UNUSED notes.  Trying to keep any original
14378 	     REG_UNUSED notes from these insns can cause incorrect output
14379 	     if it is for the same register as the original i3 dest.
14380 	     In that case, we will notice that the register is set in i3,
14381 	     and then add a REG_UNUSED note for the destination of i3, which
14382 	     is wrong.  However, it is possible to have REG_UNUSED notes from
14383 	     i2 or i1 for register which were both used and clobbered, so
14384 	     we keep notes from i2 or i1 if they will turn into REG_DEAD
14385 	     notes.  */
14386 
14387 	  /* If this register is set or clobbered in I3, put the note there
14388 	     unless there is one already.  */
14389 	  if (reg_set_p (XEXP (note, 0), PATTERN (i3)))
14390 	    {
14391 	      if (from_insn != i3)
14392 		break;
14393 
14394 	      if (! (REG_P (XEXP (note, 0))
14395 		     ? find_regno_note (i3, REG_UNUSED, REGNO (XEXP (note, 0)))
14396 		     : find_reg_note (i3, REG_UNUSED, XEXP (note, 0))))
14397 		place = i3;
14398 	    }
14399 	  /* Otherwise, if this register is used by I3, then this register
14400 	     now dies here, so we must put a REG_DEAD note here unless there
14401 	     is one already.  */
14402 	  else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3))
14403 		   && ! (REG_P (XEXP (note, 0))
14404 			 ? find_regno_note (i3, REG_DEAD,
14405 					    REGNO (XEXP (note, 0)))
14406 			 : find_reg_note (i3, REG_DEAD, XEXP (note, 0))))
14407 	    {
14408 	      PUT_REG_NOTE_KIND (note, REG_DEAD);
14409 	      place = i3;
14410 	    }
14411 
14412 	  /* A SET or CLOBBER of the REG_UNUSED reg has been removed,
14413 	     but we can't tell which at this point.  We must reset any
14414 	     expectations we had about the value that was previously
14415 	     stored in the reg.  ??? Ideally, we'd adjust REG_N_SETS
14416 	     and, if appropriate, restore its previous value, but we
14417 	     don't have enough information for that at this point.  */
14418 	  else
14419 	    {
14420 	      record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14421 
14422 	      /* Otherwise, if this register is now referenced in i2
14423 		 then the register used to be modified in one of the
14424 		 original insns.  If it was i3 (say, in an unused
14425 		 parallel), it's now completely gone, so the note can
14426 		 be discarded.  But if it was modified in i2, i1 or i0
14427 		 and we still reference it in i2, then we're
14428 		 referencing the previous value, and since the
14429 		 register was modified and REG_UNUSED, we know that
14430 		 the previous value is now dead.  So, if we only
14431 		 reference the register in i2, we change the note to
14432 		 REG_DEAD, to reflect the previous value.  However, if
14433 		 we're also setting or clobbering the register as
14434 		 scratch, we know (because the register was not
14435 		 referenced in i3) that it's unused, just as it was
14436 		 unused before, and we place the note in i2.  */
14437 	      if (from_insn != i3 && i2 && INSN_P (i2)
14438 		  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14439 		{
14440 		  if (!reg_set_p (XEXP (note, 0), PATTERN (i2)))
14441 		    PUT_REG_NOTE_KIND (note, REG_DEAD);
14442 		  if (! (REG_P (XEXP (note, 0))
14443 			 ? find_regno_note (i2, REG_NOTE_KIND (note),
14444 					    REGNO (XEXP (note, 0)))
14445 			 : find_reg_note (i2, REG_NOTE_KIND (note),
14446 					  XEXP (note, 0))))
14447 		    place = i2;
14448 		}
14449 	    }
14450 
14451 	  break;
14452 
14453 	case REG_EQUAL:
14454 	case REG_EQUIV:
14455 	case REG_NOALIAS:
14456 	  /* These notes say something about results of an insn.  We can
14457 	     only support them if they used to be on I3 in which case they
14458 	     remain on I3.  Otherwise they are ignored.
14459 
14460 	     If the note refers to an expression that is not a constant, we
14461 	     must also ignore the note since we cannot tell whether the
14462 	     equivalence is still true.  It might be possible to do
14463 	     slightly better than this (we only have a problem if I2DEST
14464 	     or I1DEST is present in the expression), but it doesn't
14465 	     seem worth the trouble.  */
14466 
14467 	  if (from_insn == i3
14468 	      && (XEXP (note, 0) == 0 || CONSTANT_P (XEXP (note, 0))))
14469 	    place = i3;
14470 	  break;
14471 
14472 	case REG_INC:
14473 	  /* These notes say something about how a register is used.  They must
14474 	     be present on any use of the register in I2 or I3.  */
14475 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3)))
14476 	    place = i3;
14477 
14478 	  if (i2 && reg_mentioned_p (XEXP (note, 0), PATTERN (i2)))
14479 	    {
14480 	      if (place)
14481 		place2 = i2;
14482 	      else
14483 		place = i2;
14484 	    }
14485 	  break;
14486 
14487 	case REG_LABEL_TARGET:
14488 	case REG_LABEL_OPERAND:
14489 	  /* This can show up in several ways -- either directly in the
14490 	     pattern, or hidden off in the constant pool with (or without?)
14491 	     a REG_EQUAL note.  */
14492 	  /* ??? Ignore the without-reg_equal-note problem for now.  */
14493 	  if (reg_mentioned_p (XEXP (note, 0), PATTERN (i3))
14494 	      || ((tem_note = find_reg_note (i3, REG_EQUAL, NULL_RTX))
14495 		  && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14496 		  && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0)))
14497 	    place = i3;
14498 
14499 	  if (i2
14500 	      && (reg_mentioned_p (XEXP (note, 0), PATTERN (i2))
14501 		  || ((tem_note = find_reg_note (i2, REG_EQUAL, NULL_RTX))
14502 		      && GET_CODE (XEXP (tem_note, 0)) == LABEL_REF
14503 		      && label_ref_label (XEXP (tem_note, 0)) == XEXP (note, 0))))
14504 	    {
14505 	      if (place)
14506 		place2 = i2;
14507 	      else
14508 		place = i2;
14509 	    }
14510 
14511 	  /* For REG_LABEL_TARGET on a JUMP_P, we prefer to put the note
14512 	     as a JUMP_LABEL or decrement LABEL_NUSES if it's already
14513 	     there.  */
14514 	  if (place && JUMP_P (place)
14515 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14516 	      && (JUMP_LABEL (place) == NULL
14517 		  || JUMP_LABEL (place) == XEXP (note, 0)))
14518 	    {
14519 	      rtx label = JUMP_LABEL (place);
14520 
14521 	      if (!label)
14522 		JUMP_LABEL (place) = XEXP (note, 0);
14523 	      else if (LABEL_P (label))
14524 		LABEL_NUSES (label)--;
14525 	    }
14526 
14527 	  if (place2 && JUMP_P (place2)
14528 	      && REG_NOTE_KIND (note) == REG_LABEL_TARGET
14529 	      && (JUMP_LABEL (place2) == NULL
14530 		  || JUMP_LABEL (place2) == XEXP (note, 0)))
14531 	    {
14532 	      rtx label = JUMP_LABEL (place2);
14533 
14534 	      if (!label)
14535 		JUMP_LABEL (place2) = XEXP (note, 0);
14536 	      else if (LABEL_P (label))
14537 		LABEL_NUSES (label)--;
14538 	      place2 = 0;
14539 	    }
14540 	  break;
14541 
14542 	case REG_NONNEG:
14543 	  /* This note says something about the value of a register prior
14544 	     to the execution of an insn.  It is too much trouble to see
14545 	     if the note is still correct in all situations.  It is better
14546 	     to simply delete it.  */
14547 	  break;
14548 
14549 	case REG_DEAD:
14550 	  /* If we replaced the right hand side of FROM_INSN with a
14551 	     REG_EQUAL note, the original use of the dying register
14552 	     will not have been combined into I3 and I2.  In such cases,
14553 	     FROM_INSN is guaranteed to be the first of the combined
14554 	     instructions, so we simply need to search back before
14555 	     FROM_INSN for the previous use or set of this register,
14556 	     then alter the notes there appropriately.
14557 
14558 	     If the register is used as an input in I3, it dies there.
14559 	     Similarly for I2, if it is nonzero and adjacent to I3.
14560 
14561 	     If the register is not used as an input in either I3 or I2
14562 	     and it is not one of the registers we were supposed to eliminate,
14563 	     there are two possibilities.  We might have a non-adjacent I2
14564 	     or we might have somehow eliminated an additional register
14565 	     from a computation.  For example, we might have had A & B where
14566 	     we discover that B will always be zero.  In this case we will
14567 	     eliminate the reference to A.
14568 
14569 	     In both cases, we must search to see if we can find a previous
14570 	     use of A and put the death note there.  */
14571 
14572 	  if (from_insn
14573 	      && from_insn == i2mod
14574 	      && !reg_overlap_mentioned_p (XEXP (note, 0), i2mod_new_rhs))
14575 	    tem_insn = from_insn;
14576 	  else
14577 	    {
14578 	      if (from_insn
14579 		  && CALL_P (from_insn)
14580 		  && find_reg_fusage (from_insn, USE, XEXP (note, 0)))
14581 		place = from_insn;
14582 	      else if (i2 && reg_set_p (XEXP (note, 0), PATTERN (i2)))
14583 		{
14584 		  /* If the new I2 sets the same register that is marked
14585 		     dead in the note, we do not in general know where to
14586 		     put the note.  One important case we _can_ handle is
14587 		     when the note comes from I3.  */
14588 		  if (from_insn == i3)
14589 		    place = i3;
14590 		  else
14591 		    break;
14592 		}
14593 	      else if (reg_referenced_p (XEXP (note, 0), PATTERN (i3)))
14594 		place = i3;
14595 	      else if (i2 != 0 && next_nonnote_nondebug_insn (i2) == i3
14596 		       && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14597 		place = i2;
14598 	      else if ((rtx_equal_p (XEXP (note, 0), elim_i2)
14599 			&& !(i2mod
14600 			     && reg_overlap_mentioned_p (XEXP (note, 0),
14601 							 i2mod_old_rhs)))
14602 		       || rtx_equal_p (XEXP (note, 0), elim_i1)
14603 		       || rtx_equal_p (XEXP (note, 0), elim_i0))
14604 		break;
14605 	      tem_insn = i3;
14606 	    }
14607 
14608 	  if (place == 0)
14609 	    {
14610 	      basic_block bb = this_basic_block;
14611 
14612 	      for (tem_insn = PREV_INSN (tem_insn); place == 0; tem_insn = PREV_INSN (tem_insn))
14613 		{
14614 		  if (!NONDEBUG_INSN_P (tem_insn))
14615 		    {
14616 		      if (tem_insn == BB_HEAD (bb))
14617 			break;
14618 		      continue;
14619 		    }
14620 
14621 		  /* If the register is being set at TEM_INSN, see if that is all
14622 		     TEM_INSN is doing.  If so, delete TEM_INSN.  Otherwise, make this
14623 		     into a REG_UNUSED note instead. Don't delete sets to
14624 		     global register vars.  */
14625 		  if ((REGNO (XEXP (note, 0)) >= FIRST_PSEUDO_REGISTER
14626 		       || !global_regs[REGNO (XEXP (note, 0))])
14627 		      && reg_set_p (XEXP (note, 0), PATTERN (tem_insn)))
14628 		    {
14629 		      rtx set = single_set (tem_insn);
14630 		      rtx inner_dest = 0;
14631 		      rtx_insn *cc0_setter = NULL;
14632 
14633 		      if (set != 0)
14634 			for (inner_dest = SET_DEST (set);
14635 			     (GET_CODE (inner_dest) == STRICT_LOW_PART
14636 			      || GET_CODE (inner_dest) == SUBREG
14637 			      || GET_CODE (inner_dest) == ZERO_EXTRACT);
14638 			     inner_dest = XEXP (inner_dest, 0))
14639 			  ;
14640 
14641 		      /* Verify that it was the set, and not a clobber that
14642 			 modified the register.
14643 
14644 			 CC0 targets must be careful to maintain setter/user
14645 			 pairs.  If we cannot delete the setter due to side
14646 			 effects, mark the user with an UNUSED note instead
14647 			 of deleting it.  */
14648 
14649 		      if (set != 0 && ! side_effects_p (SET_SRC (set))
14650 			  && rtx_equal_p (XEXP (note, 0), inner_dest)
14651 			  && (!HAVE_cc0
14652 			      || (! reg_mentioned_p (cc0_rtx, SET_SRC (set))
14653 				  || ((cc0_setter = prev_cc0_setter (tem_insn)) != NULL
14654 				      && sets_cc0_p (PATTERN (cc0_setter)) > 0))))
14655 			{
14656 			  /* Move the notes and links of TEM_INSN elsewhere.
14657 			     This might delete other dead insns recursively.
14658 			     First set the pattern to something that won't use
14659 			     any register.  */
14660 			  rtx old_notes = REG_NOTES (tem_insn);
14661 
14662 			  PATTERN (tem_insn) = pc_rtx;
14663 			  REG_NOTES (tem_insn) = NULL;
14664 
14665 			  distribute_notes (old_notes, tem_insn, tem_insn, NULL,
14666 					    NULL_RTX, NULL_RTX, NULL_RTX);
14667 			  distribute_links (LOG_LINKS (tem_insn));
14668 
14669 			  unsigned int regno = REGNO (XEXP (note, 0));
14670 			  reg_stat_type *rsp = &reg_stat[regno];
14671 			  if (rsp->last_set == tem_insn)
14672 			    record_value_for_reg (XEXP (note, 0), NULL, NULL_RTX);
14673 
14674 			  SET_INSN_DELETED (tem_insn);
14675 			  if (tem_insn == i2)
14676 			    i2 = NULL;
14677 
14678 			  /* Delete the setter too.  */
14679 			  if (cc0_setter)
14680 			    {
14681 			      PATTERN (cc0_setter) = pc_rtx;
14682 			      old_notes = REG_NOTES (cc0_setter);
14683 			      REG_NOTES (cc0_setter) = NULL;
14684 
14685 			      distribute_notes (old_notes, cc0_setter,
14686 						cc0_setter, NULL,
14687 						NULL_RTX, NULL_RTX, NULL_RTX);
14688 			      distribute_links (LOG_LINKS (cc0_setter));
14689 
14690 			      SET_INSN_DELETED (cc0_setter);
14691 			      if (cc0_setter == i2)
14692 				i2 = NULL;
14693 			    }
14694 			}
14695 		      else
14696 			{
14697 			  PUT_REG_NOTE_KIND (note, REG_UNUSED);
14698 
14699 			  /*  If there isn't already a REG_UNUSED note, put one
14700 			      here.  Do not place a REG_DEAD note, even if
14701 			      the register is also used here; that would not
14702 			      match the algorithm used in lifetime analysis
14703 			      and can cause the consistency check in the
14704 			      scheduler to fail.  */
14705 			  if (! find_regno_note (tem_insn, REG_UNUSED,
14706 						 REGNO (XEXP (note, 0))))
14707 			    place = tem_insn;
14708 			  break;
14709 			}
14710 		    }
14711 		  else if (reg_referenced_p (XEXP (note, 0), PATTERN (tem_insn))
14712 			   || (CALL_P (tem_insn)
14713 			       && find_reg_fusage (tem_insn, USE, XEXP (note, 0))))
14714 		    {
14715 		      place = tem_insn;
14716 
14717 		      /* If we are doing a 3->2 combination, and we have a
14718 			 register which formerly died in i3 and was not used
14719 			 by i2, which now no longer dies in i3 and is used in
14720 			 i2 but does not die in i2, and place is between i2
14721 			 and i3, then we may need to move a link from place to
14722 			 i2.  */
14723 		      if (i2 && DF_INSN_LUID (place) > DF_INSN_LUID (i2)
14724 			  && from_insn
14725 			  && DF_INSN_LUID (from_insn) > DF_INSN_LUID (i2)
14726 			  && reg_referenced_p (XEXP (note, 0), PATTERN (i2)))
14727 			{
14728 			  struct insn_link *links = LOG_LINKS (place);
14729 			  LOG_LINKS (place) = NULL;
14730 			  distribute_links (links);
14731 			}
14732 		      break;
14733 		    }
14734 
14735 		  if (tem_insn == BB_HEAD (bb))
14736 		    break;
14737 		}
14738 
14739 	    }
14740 
14741 	  /* If the register is set or already dead at PLACE, we needn't do
14742 	     anything with this note if it is still a REG_DEAD note.
14743 	     We check here if it is set at all, not if is it totally replaced,
14744 	     which is what `dead_or_set_p' checks, so also check for it being
14745 	     set partially.  */
14746 
14747 	  if (place && REG_NOTE_KIND (note) == REG_DEAD)
14748 	    {
14749 	      unsigned int regno = REGNO (XEXP (note, 0));
14750 	      reg_stat_type *rsp = &reg_stat[regno];
14751 
14752 	      if (dead_or_set_p (place, XEXP (note, 0))
14753 		  || reg_bitfield_target_p (XEXP (note, 0), PATTERN (place)))
14754 		{
14755 		  /* Unless the register previously died in PLACE, clear
14756 		     last_death.  [I no longer understand why this is
14757 		     being done.] */
14758 		  if (rsp->last_death != place)
14759 		    rsp->last_death = 0;
14760 		  place = 0;
14761 		}
14762 	      else
14763 		rsp->last_death = place;
14764 
14765 	      /* If this is a death note for a hard reg that is occupying
14766 		 multiple registers, ensure that we are still using all
14767 		 parts of the object.  If we find a piece of the object
14768 		 that is unused, we must arrange for an appropriate REG_DEAD
14769 		 note to be added for it.  However, we can't just emit a USE
14770 		 and tag the note to it, since the register might actually
14771 		 be dead; so we recourse, and the recursive call then finds
14772 		 the previous insn that used this register.  */
14773 
14774 	      if (place && REG_NREGS (XEXP (note, 0)) > 1)
14775 		{
14776 		  unsigned int endregno = END_REGNO (XEXP (note, 0));
14777 		  bool all_used = true;
14778 		  unsigned int i;
14779 
14780 		  for (i = regno; i < endregno; i++)
14781 		    if ((! refers_to_regno_p (i, PATTERN (place))
14782 			 && ! find_regno_fusage (place, USE, i))
14783 			|| dead_or_set_regno_p (place, i))
14784 		      {
14785 			all_used = false;
14786 			break;
14787 		      }
14788 
14789 		  if (! all_used)
14790 		    {
14791 		      /* Put only REG_DEAD notes for pieces that are
14792 			 not already dead or set.  */
14793 
14794 		      for (i = regno; i < endregno;
14795 			   i += hard_regno_nregs (i, reg_raw_mode[i]))
14796 			{
14797 			  rtx piece = regno_reg_rtx[i];
14798 			  basic_block bb = this_basic_block;
14799 
14800 			  if (! dead_or_set_p (place, piece)
14801 			      && ! reg_bitfield_target_p (piece,
14802 							  PATTERN (place)))
14803 			    {
14804 			      rtx new_note = alloc_reg_note (REG_DEAD, piece,
14805 							     NULL_RTX);
14806 
14807 			      distribute_notes (new_note, place, place,
14808 						NULL, NULL_RTX, NULL_RTX,
14809 						NULL_RTX);
14810 			    }
14811 			  else if (! refers_to_regno_p (i, PATTERN (place))
14812 				   && ! find_regno_fusage (place, USE, i))
14813 			    for (tem_insn = PREV_INSN (place); ;
14814 				 tem_insn = PREV_INSN (tem_insn))
14815 			      {
14816 				if (!NONDEBUG_INSN_P (tem_insn))
14817 				  {
14818 				    if (tem_insn == BB_HEAD (bb))
14819 			 	      break;
14820 				    continue;
14821 				  }
14822 				if (dead_or_set_p (tem_insn, piece)
14823 				    || reg_bitfield_target_p (piece,
14824 							      PATTERN (tem_insn)))
14825 				  {
14826 				    add_reg_note (tem_insn, REG_UNUSED, piece);
14827 				    break;
14828 				  }
14829 			      }
14830 			}
14831 
14832 		      place = 0;
14833 		    }
14834 		}
14835 	    }
14836 	  break;
14837 
14838 	default:
14839 	  /* Any other notes should not be present at this point in the
14840 	     compilation.  */
14841 	  gcc_unreachable ();
14842 	}
14843 
14844       if (place)
14845 	{
14846 	  XEXP (note, 1) = REG_NOTES (place);
14847 	  REG_NOTES (place) = note;
14848 
14849 	  /* Set added_notes_insn to the earliest insn we added a note to.  */
14850 	  if (added_notes_insn == 0
14851 	      || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place))
14852 	    added_notes_insn = place;
14853 	}
14854 
14855       if (place2)
14856 	{
14857 	  add_shallow_copy_of_reg_note (place2, note);
14858 
14859 	  /* Set added_notes_insn to the earliest insn we added a note to.  */
14860 	  if (added_notes_insn == 0
14861 	      || DF_INSN_LUID (added_notes_insn) > DF_INSN_LUID (place2))
14862 	    added_notes_insn = place2;
14863 	}
14864     }
14865 }
14866 
14867 /* Similarly to above, distribute the LOG_LINKS that used to be present on
14868    I3, I2, and I1 to new locations.  This is also called to add a link
14869    pointing at I3 when I3's destination is changed.  */
14870 
14871 static void
distribute_links(struct insn_link * links)14872 distribute_links (struct insn_link *links)
14873 {
14874   struct insn_link *link, *next_link;
14875 
14876   for (link = links; link; link = next_link)
14877     {
14878       rtx_insn *place = 0;
14879       rtx_insn *insn;
14880       rtx set, reg;
14881 
14882       next_link = link->next;
14883 
14884       /* If the insn that this link points to is a NOTE, ignore it.  */
14885       if (NOTE_P (link->insn))
14886 	continue;
14887 
14888       set = 0;
14889       rtx pat = PATTERN (link->insn);
14890       if (GET_CODE (pat) == SET)
14891 	set = pat;
14892       else if (GET_CODE (pat) == PARALLEL)
14893 	{
14894 	  int i;
14895 	  for (i = 0; i < XVECLEN (pat, 0); i++)
14896 	    {
14897 	      set = XVECEXP (pat, 0, i);
14898 	      if (GET_CODE (set) != SET)
14899 		continue;
14900 
14901 	      reg = SET_DEST (set);
14902 	      while (GET_CODE (reg) == ZERO_EXTRACT
14903 		     || GET_CODE (reg) == STRICT_LOW_PART
14904 		     || GET_CODE (reg) == SUBREG)
14905 		reg = XEXP (reg, 0);
14906 
14907 	      if (!REG_P (reg))
14908 		continue;
14909 
14910 	      if (REGNO (reg) == link->regno)
14911 		break;
14912 	    }
14913 	  if (i == XVECLEN (pat, 0))
14914 	    continue;
14915 	}
14916       else
14917 	continue;
14918 
14919       reg = SET_DEST (set);
14920 
14921       while (GET_CODE (reg) == ZERO_EXTRACT
14922 	     || GET_CODE (reg) == STRICT_LOW_PART
14923 	     || GET_CODE (reg) == SUBREG)
14924 	reg = XEXP (reg, 0);
14925 
14926       if (reg == pc_rtx)
14927 	continue;
14928 
14929       /* A LOG_LINK is defined as being placed on the first insn that uses
14930 	 a register and points to the insn that sets the register.  Start
14931 	 searching at the next insn after the target of the link and stop
14932 	 when we reach a set of the register or the end of the basic block.
14933 
14934 	 Note that this correctly handles the link that used to point from
14935 	 I3 to I2.  Also note that not much searching is typically done here
14936 	 since most links don't point very far away.  */
14937 
14938       for (insn = NEXT_INSN (link->insn);
14939 	   (insn && (this_basic_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
14940 		     || BB_HEAD (this_basic_block->next_bb) != insn));
14941 	   insn = NEXT_INSN (insn))
14942 	if (DEBUG_INSN_P (insn))
14943 	  continue;
14944 	else if (INSN_P (insn) && reg_overlap_mentioned_p (reg, PATTERN (insn)))
14945 	  {
14946 	    if (reg_referenced_p (reg, PATTERN (insn)))
14947 	      place = insn;
14948 	    break;
14949 	  }
14950 	else if (CALL_P (insn)
14951 		 && find_reg_fusage (insn, USE, reg))
14952 	  {
14953 	    place = insn;
14954 	    break;
14955 	  }
14956 	else if (INSN_P (insn) && reg_set_p (reg, insn))
14957 	  break;
14958 
14959       /* If we found a place to put the link, place it there unless there
14960 	 is already a link to the same insn as LINK at that point.  */
14961 
14962       if (place)
14963 	{
14964 	  struct insn_link *link2;
14965 
14966 	  FOR_EACH_LOG_LINK (link2, place)
14967 	    if (link2->insn == link->insn && link2->regno == link->regno)
14968 	      break;
14969 
14970 	  if (link2 == NULL)
14971 	    {
14972 	      link->next = LOG_LINKS (place);
14973 	      LOG_LINKS (place) = link;
14974 
14975 	      /* Set added_links_insn to the earliest insn we added a
14976 		 link to.  */
14977 	      if (added_links_insn == 0
14978 		  || DF_INSN_LUID (added_links_insn) > DF_INSN_LUID (place))
14979 		added_links_insn = place;
14980 	    }
14981 	}
14982     }
14983 }
14984 
14985 /* Check for any register or memory mentioned in EQUIV that is not
14986    mentioned in EXPR.  This is used to restrict EQUIV to "specializations"
14987    of EXPR where some registers may have been replaced by constants.  */
14988 
14989 static bool
unmentioned_reg_p(rtx equiv,rtx expr)14990 unmentioned_reg_p (rtx equiv, rtx expr)
14991 {
14992   subrtx_iterator::array_type array;
14993   FOR_EACH_SUBRTX (iter, array, equiv, NONCONST)
14994     {
14995       const_rtx x = *iter;
14996       if ((REG_P (x) || MEM_P (x))
14997 	  && !reg_mentioned_p (x, expr))
14998 	return true;
14999     }
15000   return false;
15001 }
15002 
15003 DEBUG_FUNCTION void
dump_combine_stats(FILE * file)15004 dump_combine_stats (FILE *file)
15005 {
15006   fprintf
15007     (file,
15008      ";; Combiner statistics: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n\n",
15009      combine_attempts, combine_merges, combine_extras, combine_successes);
15010 }
15011 
15012 void
dump_combine_total_stats(FILE * file)15013 dump_combine_total_stats (FILE *file)
15014 {
15015   fprintf
15016     (file,
15017      "\n;; Combiner totals: %d attempts, %d substitutions (%d requiring new space),\n;; %d successes.\n",
15018      total_attempts, total_merges, total_extras, total_successes);
15019 }
15020 
15021 /* Make pseudo-to-pseudo copies after every hard-reg-to-pseudo-copy, because
15022    the reg-to-reg copy can usefully combine with later instructions, but we
15023    do not want to combine the hard reg into later instructions, for that
15024    restricts register allocation.  */
15025 static void
make_more_copies(void)15026 make_more_copies (void)
15027 {
15028   basic_block bb;
15029 
15030   FOR_EACH_BB_FN (bb, cfun)
15031     {
15032       rtx_insn *insn;
15033 
15034       FOR_BB_INSNS (bb, insn)
15035         {
15036           if (!NONDEBUG_INSN_P (insn))
15037             continue;
15038 
15039 	  rtx set = single_set (insn);
15040 	  if (!set)
15041 	    continue;
15042 
15043 	  rtx dest = SET_DEST (set);
15044 	  if (!(REG_P (dest) && !HARD_REGISTER_P (dest)))
15045 	      continue;
15046 
15047 	  rtx src = SET_SRC (set);
15048 	  if (!(REG_P (src) && HARD_REGISTER_P (src)))
15049 	    continue;
15050 	  if (TEST_HARD_REG_BIT (fixed_reg_set, REGNO (src)))
15051 	    continue;
15052 
15053 	  rtx new_reg = gen_reg_rtx (GET_MODE (dest));
15054 	  rtx_insn *new_insn = gen_move_insn (new_reg, src);
15055 	  SET_SRC (set) = new_reg;
15056 	  emit_insn_before (new_insn, insn);
15057 	  df_insn_rescan (insn);
15058 	}
15059     }
15060 }
15061 
15062 /* Try combining insns through substitution.  */
15063 static unsigned int
rest_of_handle_combine(void)15064 rest_of_handle_combine (void)
15065 {
15066   make_more_copies ();
15067 
15068   df_set_flags (DF_LR_RUN_DCE + DF_DEFER_INSN_RESCAN);
15069   df_note_add_problem ();
15070   df_analyze ();
15071 
15072   regstat_init_n_sets_and_refs ();
15073   reg_n_sets_max = max_reg_num ();
15074 
15075   int rebuild_jump_labels_after_combine
15076     = combine_instructions (get_insns (), max_reg_num ());
15077 
15078   /* Combining insns may have turned an indirect jump into a
15079      direct jump.  Rebuild the JUMP_LABEL fields of jumping
15080      instructions.  */
15081   if (rebuild_jump_labels_after_combine)
15082     {
15083       if (dom_info_available_p (CDI_DOMINATORS))
15084 	free_dominance_info (CDI_DOMINATORS);
15085       timevar_push (TV_JUMP);
15086       rebuild_jump_labels (get_insns ());
15087       cleanup_cfg (0);
15088       timevar_pop (TV_JUMP);
15089     }
15090 
15091   regstat_free_n_sets_and_refs ();
15092   return 0;
15093 }
15094 
15095 namespace {
15096 
15097 const pass_data pass_data_combine =
15098 {
15099   RTL_PASS, /* type */
15100   "combine", /* name */
15101   OPTGROUP_NONE, /* optinfo_flags */
15102   TV_COMBINE, /* tv_id */
15103   PROP_cfglayout, /* properties_required */
15104   0, /* properties_provided */
15105   0, /* properties_destroyed */
15106   0, /* todo_flags_start */
15107   TODO_df_finish, /* todo_flags_finish */
15108 };
15109 
15110 class pass_combine : public rtl_opt_pass
15111 {
15112 public:
pass_combine(gcc::context * ctxt)15113   pass_combine (gcc::context *ctxt)
15114     : rtl_opt_pass (pass_data_combine, ctxt)
15115   {}
15116 
15117   /* opt_pass methods: */
gate(function *)15118   virtual bool gate (function *) { return (optimize > 0); }
execute(function *)15119   virtual unsigned int execute (function *)
15120     {
15121       return rest_of_handle_combine ();
15122     }
15123 
15124 }; // class pass_combine
15125 
15126 } // anon namespace
15127 
15128 rtl_opt_pass *
make_pass_combine(gcc::context * ctxt)15129 make_pass_combine (gcc::context *ctxt)
15130 {
15131   return new pass_combine (ctxt);
15132 }
15133